]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Sun, 19 Feb 2017 16:18:46 +0000 (11:18 -0500)
committerDavid S. Miller <davem@davemloft.net>
Sun, 19 Feb 2017 16:18:46 +0000 (11:18 -0500)
1698 files changed:
Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
Documentation/devicetree/bindings/net/brcm,systemport.txt
Documentation/devicetree/bindings/net/cpsw.txt
Documentation/devicetree/bindings/net/dsa/dsa.txt
Documentation/devicetree/bindings/net/dsa/marvell.txt
Documentation/devicetree/bindings/net/ethernet.txt
Documentation/devicetree/bindings/net/meson-dwmac.txt
Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt
Documentation/devicetree/bindings/net/phy.txt
Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
Documentation/devicetree/bindings/net/stmmac.txt
Documentation/devicetree/bindings/net/wireless/ieee80211.txt [new file with mode: 0644]
Documentation/driver-api/80211/cfg80211.rst
Documentation/filesystems/afs.txt
Documentation/networking/dsa/dsa.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/netfilter-sysctl.txt [new file with mode: 0644]
Documentation/networking/packet_mmap.txt
Documentation/networking/regulatory.txt
Documentation/networking/vrf.txt
Documentation/siphash.txt [new file with mode: 0644]
Documentation/sysctl/net.txt
MAINTAINERS
arch/Kconfig
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/dm814x.dtsi
arch/arm/boot/dts/dra7.dtsi
arch/arm/configs/multi_v7_defconfig
arch/arm/mach-orion5x/common.c
arch/arm/mach-orion5x/common.h
arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
arch/arm/mach-orion5x/wnr854t-setup.c
arch/arm/mach-orion5x/wrt350n-v2-setup.c
arch/arm/plat-orion/common.c
arch/arm/plat-orion/include/plat/common.h
arch/arm64/net/bpf_jit_comp.c
arch/m68k/emu/nfeth.c
arch/mips/cavium-octeon/octeon-platform.c
arch/powerpc/net/bpf_jit_comp64.c
arch/s390/net/bpf_jit_comp.c
arch/sparc/Kconfig
arch/x86/net/bpf_jit_comp.c
drivers/atm/eni.c
drivers/atm/idt77252.c
drivers/atm/midway.h
drivers/bcma/main.c
drivers/infiniband/core/cma.c
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mem.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/isdn/hardware/eicon/message.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/bonding/bond_main.c
drivers/net/can/Makefile
drivers/net/can/at91_can.c
drivers/net/can/c_can/c_can.c
drivers/net/can/dev.c
drivers/net/can/flexcan.c
drivers/net/can/ifi_canfd/ifi_canfd.c
drivers/net/can/janz-ican3.c
drivers/net/can/m_can/m_can.c
drivers/net/can/rcar/rcar_can.c
drivers/net/can/rcar/rcar_canfd.c
drivers/net/can/rx-offload.c [new file with mode: 0644]
drivers/net/can/softing/softing_cs.c
drivers/net/can/xilinx_can.c
drivers/net/dsa/Makefile
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/b53/b53_mdio.c
drivers/net/dsa/b53/b53_priv.h
drivers/net/dsa/b53/b53_regs.h
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/bcm_sf2.h
drivers/net/dsa/bcm_sf2_cfp.c [new file with mode: 0644]
drivers/net/dsa/bcm_sf2_regs.h
drivers/net/dsa/mv88e6060.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/global2.c
drivers/net/dsa/mv88e6xxx/global2.h
drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
drivers/net/dsa/mv88e6xxx/port.c
drivers/net/dsa/mv88e6xxx/port.h
drivers/net/dsa/qca8k.c
drivers/net/dsa/qca8k.h
drivers/net/dummy.c
drivers/net/ethernet/3com/typhoon.c
drivers/net/ethernet/Kconfig
drivers/net/ethernet/Makefile
drivers/net/ethernet/adi/bfin_mac.c
drivers/net/ethernet/aeroflex/greth.c
drivers/net/ethernet/agere/et131x.c
drivers/net/ethernet/alacritech/slicoss.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/amazon/ena/ena_admin_defs.h
drivers/net/ethernet/amazon/ena/ena_com.c
drivers/net/ethernet/amazon/ena/ena_com.h
drivers/net/ethernet/amazon/ena/ena_eth_com.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amazon/ena/ena_netdev.h
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/aquantia/Kconfig [new file with mode: 0644]
drivers/net/ethernet/aquantia/Makefile [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/Makefile [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_cfg.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_common.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_hw.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_main.c [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_main.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_nic.c [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_nic.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_ring.c [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_ring.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_rss.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_utils.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_vec.c [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/aq_vec.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h [new file with mode: 0644]
drivers/net/ethernet/aquantia/atlantic/ver.h [new file with mode: 0644]
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
drivers/net/ethernet/atheros/atlx/atl1.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bcm63xx_enet.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
drivers/net/ethernet/broadcom/bgmac-bcma.c
drivers/net/ethernet/broadcom/bgmac-platform.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bgmac.h
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnxt/Makefile
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c [new file with mode: 0644]
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h [new file with mode: 0644]
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/sb1250-mac.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/calxeda/xgmac.c
drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
drivers/net/ethernet/cavium/liquidio/liquidio_common.h
drivers/net/ethernet/cavium/liquidio/octeon_config.h
drivers/net/ethernet/cavium/liquidio/octeon_console.c
drivers/net/ethernet/cavium/liquidio/octeon_device.c
drivers/net/ethernet/cavium/liquidio/octeon_device.h
drivers/net/ethernet/cavium/liquidio/octeon_iq.h
drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
drivers/net/ethernet/cavium/liquidio/octeon_nic.c
drivers/net/ethernet/cavium/liquidio/request_manager.c
drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.h
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/chelsio/cxgb/sge.c
drivers/net/ethernet/chelsio/cxgb3/l2t.c
drivers/net/ethernet/chelsio/cxgb3/sge.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/sched.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/cirrus/ep93xx_eth.c
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/vnic_dev.c
drivers/net/ethernet/cisco/enic/vnic_dev.h
drivers/net/ethernet/cisco/enic/vnic_devcmd.h
drivers/net/ethernet/cisco/enic/vnic_enet.h
drivers/net/ethernet/cisco/enic/vnic_rq.h
drivers/net/ethernet/dec/tulip/de2104x.c
drivers/net/ethernet/dec/tulip/interrupt.c
drivers/net/ethernet/dec/tulip/uli526x.c
drivers/net/ethernet/dec/tulip/winbond-840.c
drivers/net/ethernet/dlink/dl2k.c
drivers/net/ethernet/dlink/sundance.c
drivers/net/ethernet/dnet.c
drivers/net/ethernet/ec_bhf.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/faraday/ftmac100.c
drivers/net/ethernet/fealnx.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fman/fman_memac.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/gianfar.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/hisilicon/hisi_femac.c
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/mal.c
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/fm10k/fm10k.h
drivers/net/ethernet/intel/fm10k/fm10k_common.c
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_client.c
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_osdep.h
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_common.c
drivers/net/ethernet/intel/i40evf/i40e_devids.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
drivers/net/ethernet/intel/i40evf/i40evf.h
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_i210.c
drivers/net/ethernet/intel/igb/e1000_mac.c
drivers/net/ethernet/intel/igb/e1000_phy.c
drivers/net/ethernet/intel/igb/e1000_regs.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/mbx.h
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/jme.h
drivers/net/ethernet/korina.c
drivers/net/ethernet/lantiq_etop.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/marvell/sky2.h
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx4/cq.c
drivers/net/ethernet/mellanox/mlx4/en_clock.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_port.h
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/cq.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/qp.c
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlxsw/Kconfig
drivers/net/ethernet/mellanox/mlxsw/Makefile
drivers/net/ethernet/mellanox/mlxsw/cmd.h
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/core.h
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/i2c.c
drivers/net/ethernet/mellanox/mlxsw/item.h
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/resources.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
drivers/net/ethernet/mellanox/mlxsw/trap.h
drivers/net/ethernet/micrel/ks8695net.c
drivers/net/ethernet/micrel/ks8851.c
drivers/net/ethernet/micrel/ks8851_mll.c
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/microchip/enc28j60.c
drivers/net/ethernet/microchip/encx24j600.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/natsemi/natsemi.c
drivers/net/ethernet/natsemi/ns83820.c
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/netronome/Kconfig
drivers/net/ethernet/netronome/Makefile
drivers/net/ethernet/netronome/nfp/Makefile
drivers/net/ethernet/netronome/nfp/nfp_bpf.h
drivers/net/ethernet/netronome/nfp/nfp_main.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfp_main.h [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfp_net.h
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
drivers/net/ethernet/netronome/nfp/nfp_net_main.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c [new file with mode: 0644]
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c [new file with mode: 0644]
drivers/net/ethernet/nuvoton/w90p910_ether.c
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/packetengines/hamachi.c
drivers/net/ethernet/pasemi/pasemi_mac.c
drivers/net/ethernet/qlogic/Kconfig
drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qed/Makefile
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_cxt.c
drivers/net/ethernet/qlogic/qed/qed_cxt.h
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.h
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_dev_api.h
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_hw.c
drivers/net/ethernet/qlogic/qed/qed_hw.h
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
drivers/net/ethernet/qlogic/qed/qed_init_ops.c
drivers/net/ethernet/qlogic/qed/qed_init_ops.h
drivers/net/ethernet/qlogic/qed/qed_int.c
drivers/net/ethernet/qlogic/qed/qed_int.h
drivers/net/ethernet/qlogic/qed/qed_iscsi.c
drivers/net/ethernet/qlogic/qed/qed_iscsi.h
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_l2.h
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_ll2.h
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_mcp.h
drivers/net/ethernet/qlogic/qed/qed_ooo.c
drivers/net/ethernet/qlogic/qed/qed_ooo.h
drivers/net/ethernet/qlogic/qed/qed_ptp.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_ptp.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/qlogic/qed/qed_roce.h
drivers/net/ethernet/qlogic/qed/qed_selftest.c
drivers/net/ethernet/qlogic/qed/qed_sp.h
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
drivers/net/ethernet/qlogic/qed/qed_spq.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qed/qed_sriov.h
drivers/net/ethernet/qlogic/qed/qed_vf.c
drivers/net/ethernet/qlogic/qed/qed_vf.h
drivers/net/ethernet/qlogic/qede/Makefile
drivers/net/ethernet/qlogic/qede/qede.h
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
drivers/net/ethernet/qlogic/qede/qede_filter.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/qede_fp.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qede/qede_ptp.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/qede_ptp.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qede/qede_roce.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/qualcomm/emac/Makefile
drivers/net/ethernet/qualcomm/emac/emac-ethtool.c [new file with mode: 0644]
drivers/net/ethernet/qualcomm/emac/emac-mac.c
drivers/net/ethernet/qualcomm/emac/emac-mac.h
drivers/net/ethernet/qualcomm/emac/emac-phy.c
drivers/net/ethernet/qualcomm/emac/emac-phy.h
drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c
drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c
drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
drivers/net/ethernet/qualcomm/emac/emac.c
drivers/net/ethernet/qualcomm/emac/emac.h
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/realtek/8139too.c
drivers/net/ethernet/realtek/atp.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/ravb.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/rocker/rocker_main.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
drivers/net/ethernet/sfc/bitfield.h
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/ef10_sriov.c
drivers/net/ethernet/sfc/ef10_sriov.h
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/falcon/efx.c
drivers/net/ethernet/sfc/falcon/ethtool.c
drivers/net/ethernet/sfc/falcon/mdio_10g.c
drivers/net/ethernet/sfc/falcon/mdio_10g.h
drivers/net/ethernet/sfc/falcon/net_driver.h
drivers/net/ethernet/sfc/falcon/qt202x_phy.c
drivers/net/ethernet/sfc/falcon/rx.c
drivers/net/ethernet/sfc/falcon/tenxpress.c
drivers/net/ethernet/sfc/falcon/txc43128_phy.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/sfc/filter.h
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/mcdi.h
drivers/net/ethernet/sfc/mcdi_pcol.h
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sfc/selftest.c
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/sfc/sriov.c
drivers/net/ethernet/sfc/sriov.h
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/smsc/epic100.c
drivers/net/ethernet/smsc/smc91c92_cs.c
drivers/net/ethernet/smsc/smsc9420.c
drivers/net/ethernet/stmicro/Kconfig
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/Makefile
drivers/net/ethernet/stmicro/stmmac/chain_mode.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/descs.h
drivers/net/ethernet/stmicro/stmmac/descs_com.h
drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c [new file with mode: 0644]
drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/dwmac100.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac4.h
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/stmicro/stmmac/mmc.h
drivers/net/ethernet/stmicro/stmmac/mmc_core.c
drivers/net/ethernet/stmicro/stmmac/norm_desc.c
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
drivers/net/ethernet/sun/Kconfig
drivers/net/ethernet/sun/ldmvsw.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/sun/sunvnet_common.c
drivers/net/ethernet/synopsys/Kconfig [deleted file]
drivers/net/ethernet/synopsys/Makefile [deleted file]
drivers/net/ethernet/synopsys/dwc_eth_qos.c [deleted file]
drivers/net/ethernet/tehuti/tehuti.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/cpsw_ale.h
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/ti/davinci_cpdma.h
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/ti/netcp.h
drivers/net/ethernet/ti/netcp_core.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/ethernet/tile/tilegx.c
drivers/net/ethernet/tile/tilepro.c
drivers/net/ethernet/toshiba/ps3_gelic_net.c
drivers/net/ethernet/toshiba/spider_net.c
drivers/net/ethernet/toshiba/tc35815.c
drivers/net/ethernet/tundra/tsi108_eth.c
drivers/net/ethernet/via/via-rhine.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/ethernet/wiznet/w5100.c
drivers/net/ethernet/wiznet/w5300.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/fddi/skfp/cfm.c
drivers/net/fddi/skfp/drvfbi.c
drivers/net/fddi/skfp/ecm.c
drivers/net/fddi/skfp/ess.c
drivers/net/fddi/skfp/fplustm.c
drivers/net/fddi/skfp/h/cmtdef.h
drivers/net/fddi/skfp/h/hwmtm.h
drivers/net/fddi/skfp/hwmtm.c
drivers/net/fddi/skfp/pcmplc.c
drivers/net/fddi/skfp/pmf.c
drivers/net/fddi/skfp/rmt.c
drivers/net/fddi/skfp/smt.c
drivers/net/fddi/skfp/srf.c
drivers/net/fjes/fjes_main.c
drivers/net/gtp.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ifb.c
drivers/net/ipvlan/Makefile
drivers/net/ipvlan/ipvlan.h
drivers/net/ipvlan/ipvlan_core.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/ipvlan/ipvtap.c [new file with mode: 0644]
drivers/net/irda/au1k_ir.c
drivers/net/irda/bfin_sir.c
drivers/net/irda/sh_sir.c
drivers/net/loopback.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/macvtap.c
drivers/net/mdio.c
drivers/net/nlmon.c
drivers/net/phy/Makefile
drivers/net/phy/bcm7xxx.c
drivers/net/phy/broadcom.c
drivers/net/phy/dp83867.c
drivers/net/phy/marvell.c
drivers/net/phy/mdio-boardinfo.c [new file with mode: 0644]
drivers/net/phy/mdio-boardinfo.h [new file with mode: 0644]
drivers/net/phy/mdio-gpio.c
drivers/net/phy/mdio-xgene.c
drivers/net/phy/mdio-xgene.h
drivers/net/phy/mdio_bus.c
drivers/net/phy/mdio_device.c
drivers/net/phy/mscc.c
drivers/net/ppp/ppp_generic.c
drivers/net/slip/slip.c
drivers/net/tap.c [new file with mode: 0644]
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/lan78xx.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wan/fsl_ucc_hdlc.c
drivers/net/wan/hd64572.c
drivers/net/wan/slic_ds26522.c
drivers/net/wireless/admtek/adm8211.c
drivers/net/wireless/ath/ath10k/Kconfig
drivers/net/wireless/ath/ath10k/ahb.c
drivers/net/wireless/ath/ath10k/ce.c
drivers/net/wireless/ath/ath10k/ce.h
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/core.h
drivers/net/wireless/ath/ath10k/debug.c
drivers/net/wireless/ath/ath10k/debug.h
drivers/net/wireless/ath/ath10k/debugfs_sta.c
drivers/net/wireless/ath/ath10k/htc.c
drivers/net/wireless/ath/ath10k/htc.h
drivers/net/wireless/ath/ath10k/htt.h
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/mac.h
drivers/net/wireless/ath/ath10k/p2p.c
drivers/net/wireless/ath/ath10k/pci.c
drivers/net/wireless/ath/ath10k/pci.h
drivers/net/wireless/ath/ath10k/spectral.c
drivers/net/wireless/ath/ath10k/testmode.c
drivers/net/wireless/ath/ath10k/wmi-tlv.c
drivers/net/wireless/ath/ath10k/wmi-tlv.h
drivers/net/wireless/ath/ath10k/wmi.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/ath5k/ahb.c
drivers/net/wireless/ath/ath5k/mac80211-ops.c
drivers/net/wireless/ath/ath6kl/core.h
drivers/net/wireless/ath/ath6kl/main.c
drivers/net/wireless/ath/ath6kl/sdio.c
drivers/net/wireless/ath/ath6kl/txrx.c
drivers/net/wireless/ath/ath9k/Kconfig
drivers/net/wireless/ath/ath9k/Makefile
drivers/net/wireless/ath/ath9k/ar5008_phy.c
drivers/net/wireless/ath/ath9k/ar9002_hw.c
drivers/net/wireless/ath/ath9k/ar9002_mac.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
drivers/net/wireless/ath/ath9k/ar9003_mac.c
drivers/net/wireless/ath/ath9k/ath9k.h
drivers/net/wireless/ath/ath9k/channel.c
drivers/net/wireless/ath/ath9k/common-debug.h
drivers/net/wireless/ath/ath9k/common-spectral.c
drivers/net/wireless/ath/ath9k/common-spectral.h
drivers/net/wireless/ath/ath9k/debug.c
drivers/net/wireless/ath/ath9k/debug.h
drivers/net/wireless/ath/ath9k/debug_sta.c
drivers/net/wireless/ath/ath9k/eeprom.c
drivers/net/wireless/ath/ath9k/eeprom.h
drivers/net/wireless/ath/ath9k/eeprom_4k.c
drivers/net/wireless/ath/ath9k/eeprom_9287.c
drivers/net/wireless/ath/ath9k/eeprom_def.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/ath/ath9k/link.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/mac.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/ath/wcn36xx/Kconfig
drivers/net/wireless/ath/wcn36xx/dxe.c
drivers/net/wireless/ath/wcn36xx/hal.h
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/ath/wcn36xx/smd.c
drivers/net/wireless/ath/wcn36xx/smd.h
drivers/net/wireless/ath/wcn36xx/txrx.c
drivers/net/wireless/ath/wcn36xx/wcn36xx.h
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/ath/wil6210/debugfs.c
drivers/net/wireless/ath/wil6210/ethtool.c
drivers/net/wireless/ath/wil6210/fw.c
drivers/net/wireless/ath/wil6210/fw_inc.c
drivers/net/wireless/ath/wil6210/interrupt.c
drivers/net/wireless/ath/wil6210/main.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/ath/wil6210/p2p.c
drivers/net/wireless/ath/wil6210/pcie_bus.c
drivers/net/wireless/ath/wil6210/pm.c
drivers/net/wireless/ath/wil6210/pmc.c
drivers/net/wireless/ath/wil6210/rx_reorder.c
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/ath/wil6210/wil6210.h
drivers/net/wireless/ath/wil6210/wil_crash_dump.c
drivers/net/wireless/ath/wil6210/wmi.c
drivers/net/wireless/ath/wil6210/wmi.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
drivers/net/wireless/intel/iwlegacy/3945-mac.c
drivers/net/wireless/intel/iwlwifi/Kconfig
drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/dvm/rs.c
drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
drivers/net/wireless/intel/iwlwifi/iwl-6000.c
drivers/net/wireless/intel/iwlwifi/iwl-7000.c
drivers/net/wireless/intel/iwlwifi/iwl-8000.c
drivers/net/wireless/intel/iwlwifi/iwl-9000.c
drivers/net/wireless/intel/iwlwifi/iwl-a000.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.h
drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
drivers/net/wireless/intel/iwlwifi/iwl-fw.h
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/power.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rx.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/intersil/orinoco/main.c
drivers/net/wireless/intersil/orinoco/orinoco.h
drivers/net/wireless/intersil/orinoco/orinoco_usb.c
drivers/net/wireless/marvell/libertas/cfg.c
drivers/net/wireless/marvell/libertas/cmd.c
drivers/net/wireless/marvell/mwifiex/11n_aggr.c
drivers/net/wireless/marvell/mwifiex/cfg80211.c
drivers/net/wireless/marvell/mwifiex/debugfs.c
drivers/net/wireless/marvell/mwifiex/decl.h
drivers/net/wireless/marvell/mwifiex/fw.h
drivers/net/wireless/marvell/mwifiex/init.c
drivers/net/wireless/marvell/mwifiex/main.c
drivers/net/wireless/marvell/mwifiex/main.h
drivers/net/wireless/marvell/mwifiex/pcie.c
drivers/net/wireless/marvell/mwifiex/pcie.h
drivers/net/wireless/marvell/mwifiex/sdio.c
drivers/net/wireless/marvell/mwifiex/sdio.h
drivers/net/wireless/marvell/mwifiex/sta_cmd.c
drivers/net/wireless/marvell/mwifiex/sta_event.c
drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
drivers/net/wireless/marvell/mwifiex/usb.c
drivers/net/wireless/marvell/mwifiex/util.c
drivers/net/wireless/ralink/rt2x00/rt2400pci.c
drivers/net/wireless/ralink/rt2x00/rt2500pci.c
drivers/net/wireless/ralink/rt2x00/rt2500usb.c
drivers/net/wireless/ralink/rt2x00/rt2800.h
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
drivers/net/wireless/ralink/rt2x00/rt2800lib.h
drivers/net/wireless/ralink/rt2x00/rt2800usb.c
drivers/net/wireless/ralink/rt2x00/rt2x00.h
drivers/net/wireless/ralink/rt2x00/rt2x00config.c
drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
drivers/net/wireless/ralink/rt2x00/rt2x00lib.h
drivers/net/wireless/ralink/rt2x00/rt2x00link.c
drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
drivers/net/wireless/ralink/rt2x00/rt2x00soc.c
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
drivers/net/wireless/ralink/rt2x00/rt61pci.c
drivers/net/wireless/ralink/rt2x00/rt73usb.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
drivers/net/wireless/realtek/rtlwifi/base.c
drivers/net/wireless/realtek/rtlwifi/base.h
drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h
drivers/net/wireless/realtek/rtlwifi/cam.c
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/net/wireless/realtek/rtlwifi/debug.c
drivers/net/wireless/realtek/rtlwifi/debug.h
drivers/net/wireless/realtek/rtlwifi/efuse.c
drivers/net/wireless/realtek/rtlwifi/efuse.h
drivers/net/wireless/realtek/rtlwifi/pci.c
drivers/net/wireless/realtek/rtlwifi/pci.h
drivers/net/wireless/realtek/rtlwifi/ps.c
drivers/net/wireless/realtek/rtlwifi/rc.c
drivers/net/wireless/realtek/rtlwifi/regd.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h
drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h
drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
drivers/net/wireless/realtek/rtlwifi/usb.c
drivers/net/wireless/realtek/rtlwifi/usb.h
drivers/net/wireless/realtek/rtlwifi/wifi.h
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/rsi/rsi_91x_mac80211.c
drivers/net/wireless/st/cw1200/sta.c
drivers/net/wireless/ti/wl1251/event.c
drivers/net/wireless/ti/wlcore/debugfs.c
drivers/net/wireless/ti/wlcore/event.c
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/sdio.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netfront.c
drivers/pci/pci-driver.c
drivers/ptp/ptp_clock.c
drivers/ptp/ptp_private.h
drivers/ptp/ptp_sysfs.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.h
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/fcoe/fcoe.c
drivers/staging/ks7010/ks_hostif.c
drivers/staging/netlogic/xlr_net.c
drivers/staging/octeon/ethernet-rx.c
drivers/staging/octeon/ethernet-tx.c
drivers/staging/rtl8192e/rtllib_rx.c
drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
drivers/staging/unisys/visornic/visornic_main.c
drivers/staging/wlan-ng/hfa384x_usb.c
drivers/staging/wlan-ng/p80211netdev.c
drivers/target/iscsi/iscsi_target_login.c
drivers/vhost/Kconfig
drivers/vhost/net.c
drivers/vhost/vhost.c
drivers/virtio/virtio.c
fs/afs/callback.c
fs/afs/cmservice.c
fs/afs/file.c
fs/afs/fsclient.c
fs/afs/internal.h
fs/afs/main.c
fs/afs/netdevices.c
fs/afs/rxrpc.c
fs/afs/vlclient.c
fs/afs/vlocation.c
fs/afs/vnode.c
fs/afs/volume.c
fs/afs/write.c
fs/gfs2/glock.c
include/dt-bindings/net/mscc-phy-vsc8531.h [new file with mode: 0644]
include/linux/bitfield.h
include/linux/bpf.h
include/linux/bpf_trace.h [new file with mode: 0644]
include/linux/brcmphy.h
include/linux/can/dev.h
include/linux/can/rx-offload.h [new file with mode: 0644]
include/linux/device.h
include/linux/etherdevice.h
include/linux/filter.h
include/linux/ieee80211.h
include/linux/if_bridge.h
include/linux/if_frad.h
include/linux/if_macvlan.h
include/linux/if_tap.h [new file with mode: 0644]
include/linux/ipv6.h
include/linux/list.h
include/linux/marvell_phy.h
include/linux/mdio.h
include/linux/mlx4/device.h
include/linux/mlx5/cq.h
include/linux/mlx5/device.h
include/linux/mlx5/doorbell.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/qp.h
include/linux/mlx5/vport.h
include/linux/mmc/sdio_ids.h
include/linux/mod_devicetable.h
include/linux/mroute.h
include/linux/mroute6.h
include/linux/netdev_features.h
include/linux/netdevice.h
include/linux/netfilter/nfnetlink.h
include/linux/netfilter/x_tables.h
include/linux/parman.h [new file with mode: 0644]
include/linux/pci.h
include/linux/phy.h
include/linux/qed/common_hsi.h
include/linux/qed/eth_common.h
include/linux/qed/iscsi_common.h
include/linux/qed/qed_chain.h
include/linux/qed/qed_eth_if.h
include/linux/qed/qed_if.h
include/linux/qed/qed_iov_if.h
include/linux/qed/qed_iscsi_if.h
include/linux/qed/qed_ll2_if.h
include/linux/qed/qed_roce_if.h
include/linux/qed/qede_roce.h
include/linux/qed/rdma_common.h
include/linux/qed/roce_common.h
include/linux/qed/storage_common.h
include/linux/qed/tcp_common.h
include/linux/rfkill-regulator.h [deleted file]
include/linux/rhashtable.h
include/linux/sctp.h
include/linux/siphash.h [new file with mode: 0644]
include/linux/skbuff.h
include/linux/soc/qcom/smem_state.h
include/linux/soc/ti/knav_dma.h
include/linux/socket.h
include/linux/stmmac.h
include/linux/tcp.h
include/linux/trace_events.h
include/linux/uuid.h
include/linux/virtio.h
include/net/act_api.h
include/net/addrconf.h
include/net/arp.h
include/net/busy_poll.h
include/net/cfg80211.h
include/net/checksum.h
include/net/dsa.h
include/net/dst.h
include/net/dst_ops.h
include/net/flow_dissector.h
include/net/gro_cells.h
include/net/ieee80211_radiotap.h
include/net/if_inet6.h
include/net/ife.h [new file with mode: 0644]
include/net/inet6_connection_sock.h
include/net/inet_common.h
include/net/inet_connection_sock.h
include/net/inet_frag.h
include/net/inet_hashtables.h
include/net/inet_sock.h
include/net/inet_timewait_sock.h
include/net/ip.h
include/net/ip6_fib.h
include/net/ip_fib.h
include/net/ip_tunnels.h
include/net/ip_vs.h
include/net/iw_handler.h
include/net/lwtunnel.h
include/net/mac80211.h
include/net/ndisc.h
include/net/neighbour.h
include/net/netfilter/ipv4/nf_conntrack_ipv4.h
include/net/netfilter/ipv6/nf_conntrack_ipv6.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_core.h
include/net/netfilter/nf_conntrack_l4proto.h
include/net/netfilter/nf_log.h
include/net/netfilter/nf_tables.h
include/net/netlink.h
include/net/netns/conntrack.h
include/net/netns/ipv4.h
include/net/netns/sctp.h
include/net/pkt_cls.h
include/net/psample.h [new file with mode: 0644]
include/net/request_sock.h
include/net/rtnetlink.h
include/net/sch_generic.h
include/net/sctp/constants.h
include/net/sctp/sctp.h
include/net/sctp/sm.h
include/net/sctp/structs.h
include/net/smc.h [new file with mode: 0644]
include/net/sock.h
include/net/switchdev.h
include/net/tc_act/tc_ife.h
include/net/tc_act/tc_pedit.h
include/net/tc_act/tc_sample.h [new file with mode: 0644]
include/net/tcp.h
include/net/udp.h
include/net/xfrm.h
include/rdma/ib_addr.h
include/trace/events/afs.h [new file with mode: 0644]
include/trace/events/bpf.h [new file with mode: 0644]
include/trace/events/rxrpc.h
include/trace/events/xdp.h [new file with mode: 0644]
include/trace/trace_events.h
include/uapi/linux/Kbuild
include/uapi/linux/batman_adv.h
include/uapi/linux/bpf.h
include/uapi/linux/can/netlink.h
include/uapi/linux/devlink.h
include/uapi/linux/if_bridge.h
include/uapi/linux/if_link.h
include/uapi/linux/ife.h [new file with mode: 0644]
include/uapi/linux/igmp.h
include/uapi/linux/ipv6.h
include/uapi/linux/mpls.h
include/uapi/linux/neighbour.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/netfilter/nfnetlink.h
include/uapi/linux/netlink.h
include/uapi/linux/nl80211.h
include/uapi/linux/openvswitch.h
include/uapi/linux/pkt_cls.h
include/uapi/linux/psample.h [new file with mode: 0644]
include/uapi/linux/rds.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/sctp.h
include/uapi/linux/seg6.h
include/uapi/linux/seg6_hmac.h
include/uapi/linux/seg6_iptunnel.h
include/uapi/linux/smc.h [new file with mode: 0644]
include/uapi/linux/smc_diag.h [new file with mode: 0644]
include/uapi/linux/snmp.h
include/uapi/linux/tc_act/Kbuild
include/uapi/linux/tc_act/tc_csum.h
include/uapi/linux/tc_act/tc_ife.h
include/uapi/linux/tc_act/tc_pedit.h
include/uapi/linux/tc_act/tc_sample.h [new file with mode: 0644]
include/uapi/linux/tcp.h
include/uapi/linux/tipc.h
include/uapi/linux/un.h
include/uapi/rdma/mlx5-abi.h
kernel/bpf/Makefile
kernel/bpf/arraymap.c
kernel/bpf/bpf_lru_list.c
kernel/bpf/core.c
kernel/bpf/hashtab.c
kernel/bpf/helpers.c
kernel/bpf/inode.c
kernel/bpf/lpm_trie.c [new file with mode: 0644]
kernel/bpf/stackmap.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/extable.c
kernel/kallsyms.c
kernel/trace/bpf_trace.c
kernel/trace/trace_output.c
lib/Kconfig
lib/Kconfig.debug
lib/Makefile
lib/parman.c [new file with mode: 0644]
lib/rhashtable.c
lib/siphash.c [new file with mode: 0644]
lib/test_parman.c [new file with mode: 0644]
lib/test_siphash.c [new file with mode: 0644]
net/6lowpan/nhc.c
net/8021q/vlan_dev.c
net/Kconfig
net/Makefile
net/batman-adv/Makefile
net/batman-adv/bat_algo.c
net/batman-adv/bat_algo.h
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_iv_ogm.h
net/batman-adv/bat_v.c
net/batman-adv/bat_v.h
net/batman-adv/bat_v_elp.c
net/batman-adv/bat_v_elp.h
net/batman-adv/bat_v_ogm.c
net/batman-adv/bat_v_ogm.h
net/batman-adv/bitarray.c
net/batman-adv/bitarray.h
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/debugfs.c
net/batman-adv/debugfs.h
net/batman-adv/distributed-arp-table.c
net/batman-adv/distributed-arp-table.h
net/batman-adv/fragmentation.c
net/batman-adv/fragmentation.h
net/batman-adv/gateway_client.c
net/batman-adv/gateway_client.h
net/batman-adv/gateway_common.c
net/batman-adv/gateway_common.h
net/batman-adv/hard-interface.c
net/batman-adv/hard-interface.h
net/batman-adv/hash.c
net/batman-adv/hash.h
net/batman-adv/icmp_socket.c
net/batman-adv/icmp_socket.h
net/batman-adv/log.c
net/batman-adv/log.h
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/multicast.c
net/batman-adv/multicast.h
net/batman-adv/netlink.c
net/batman-adv/netlink.h
net/batman-adv/network-coding.c
net/batman-adv/network-coding.h
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/routing.h
net/batman-adv/send.c
net/batman-adv/send.h
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.h
net/batman-adv/sysfs.c
net/batman-adv/sysfs.h
net/batman-adv/tp_meter.c
net/batman-adv/tp_meter.h
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/tvlv.c
net/batman-adv/tvlv.h
net/batman-adv/types.h
net/bridge/Makefile
net/bridge/br_device.c
net/bridge/br_fdb.c
net/bridge/br_forward.c
net/bridge/br_if.c
net/bridge/br_input.c
net/bridge/br_ioctl.c
net/bridge/br_mdb.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/br_netlink_tunnel.c [new file with mode: 0644]
net/bridge/br_private.h
net/bridge/br_private_tunnel.h [new file with mode: 0644]
net/bridge/br_stp.c
net/bridge/br_stp_if.c
net/bridge/br_stp_timer.c
net/bridge/br_sysfs_br.c
net/bridge/br_sysfs_if.c
net/bridge/br_vlan.c
net/bridge/br_vlan_tunnel.c [new file with mode: 0644]
net/bridge/netfilter/ebt_limit.c
net/bridge/netfilter/ebt_log.c
net/bridge/netfilter/ebtables.c
net/caif/chnl_net.c
net/compat.c
net/core/Makefile
net/core/dev.c
net/core/devlink.c
net/core/dst.c
net/core/ethtool.c
net/core/filter.c
net/core/flow_dissector.c
net/core/gro_cells.c [new file with mode: 0644]
net/core/lwt_bpf.c
net/core/lwtunnel.c
net/core/netprio_cgroup.c
net/core/pktgen.c
net/core/request_sock.c
net/core/rtnetlink.c
net/core/scm.c
net/core/secure_seq.c
net/core/skbuff.c
net/core/sock.c
net/core/sysctl_net_core.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dsa/Kconfig
net/dsa/Makefile
net/dsa/dsa.c
net/dsa/dsa2.c
net/dsa/dsa_priv.h
net/dsa/slave.c
net/dsa/switch.c [new file with mode: 0644]
net/dsa/tag_brcm.c
net/dsa/tag_dsa.c
net/dsa/tag_edsa.c
net/dsa/tag_qca.c
net/dsa/tag_trailer.c
net/ethernet/eth.c
net/hsr/hsr_slave.c
net/ife/Kconfig [new file with mode: 0644]
net/ife/Makefile [new file with mode: 0644]
net/ife/ife.c [new file with mode: 0644]
net/ipv4/Kconfig
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/ah4.c
net/ipv4/devinet.c
net/ipv4/esp4.c
net/ipv4/esp4_offload.c [new file with mode: 0644]
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/icmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ipmr.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/netfilter/nf_conntrack_proto_icmp.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/netfilter/nf_dup_ipv4.c
net/ipv4/netfilter/nf_log_arp.c
net/ipv4/netfilter/nf_log_ipv4.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_recovery.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv4/xfrm4_input.c
net/ipv4/xfrm4_mode_transport.c
net/ipv4/xfrm4_policy.c
net/ipv4/xfrm4_protocol.c
net/ipv4/xfrm4_state.c
net/ipv6/Kconfig
net/ipv6/Makefile
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/ah6.c
net/ipv6/esp6.c
net/ipv6/esp6_offload.c [new file with mode: 0644]
net/ipv6/icmp.c
net/ipv6/ila/ila_lwt.c
net/ipv6/inet6_connection_sock.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_vti.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_NPT.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
net/ipv6/netfilter/nf_dup_ipv6.c
net/ipv6/netfilter/nf_log_ipv6.c
net/ipv6/ping.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/seg6_hmac.c
net/ipv6/seg6_iptunnel.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/xfrm6_input.c
net/ipv6/xfrm6_mode_transport.c
net/ipv6/xfrm6_policy.c
net/ipv6/xfrm6_protocol.c
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/mac80211/Kconfig
net/mac80211/aes_cmac.c
net/mac80211/aes_cmac.h
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs.c
net/mac80211/debugfs_netdev.c
net/mac80211/debugfs_sta.c
net/mac80211/fils_aead.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.h
net/mac80211/mesh.c
net/mac80211/mesh.h
net/mac80211/mesh_plink.c
net/mac80211/mesh_sync.c
net/mac80211/mlme.c
net/mac80211/rc80211_minstrel.c
net/mac80211/rc80211_minstrel.h
net/mac80211/rc80211_minstrel_debugfs.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rc80211_minstrel_ht.h
net/mac80211/rc80211_minstrel_ht_debugfs.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/status.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/vht.c
net/mac80211/wep.c
net/mac80211/wpa.c
net/mpls/af_mpls.c
net/mpls/internal.h
net/mpls/mpls_iptunnel.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_expect.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_conntrack_proto_udplite.c [deleted file]
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_log.c
net/netfilter/nf_nat_helper.c
net/netfilter/nf_nat_proto_udp.c
net/netfilter/nf_nat_proto_udplite.c [deleted file]
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink.c
net/netfilter/nft_ct.c
net/netfilter/nft_exthdr.c
net/netfilter/nft_meta.c
net/netfilter/nft_set_bitmap.c [new file with mode: 0644]
net/netfilter/nft_set_hash.c
net/netfilter/nft_set_rbtree.c
net/netfilter/x_tables.c
net/netfilter/xt_CT.c
net/netfilter/xt_RATEEST.c
net/netfilter/xt_TEE.c
net/netfilter/xt_bpf.c
net/netfilter/xt_cgroup.c
net/netfilter/xt_connlimit.c
net/netfilter/xt_hashlimit.c
net/netfilter/xt_limit.c
net/netfilter/xt_pkttype.c
net/netfilter/xt_quota.c
net/netfilter/xt_rateest.c
net/netfilter/xt_string.c
net/netlink/af_netlink.c
net/openvswitch/actions.c
net/openvswitch/conntrack.c
net/openvswitch/conntrack.h
net/openvswitch/flow.c
net/openvswitch/flow.h
net/openvswitch/flow_netlink.c
net/openvswitch/flow_netlink.h
net/openvswitch/vport-internal_dev.c
net/packet/af_packet.c
net/packet/diag.c
net/psample/Kconfig [new file with mode: 0644]
net/psample/Makefile [new file with mode: 0644]
net/psample/psample.c [new file with mode: 0644]
net/rds/af_rds.c
net/rds/bind.c
net/rds/connection.c
net/rds/ib.c
net/rds/ib.h
net/rds/ib_cm.c
net/rds/ib_frmr.c
net/rds/ib_recv.c
net/rds/ib_send.c
net/rds/ib_stats.c
net/rds/rdma.c
net/rds/rdma_transport.c
net/rds/rds.h
net/rds/recv.c
net/rds/send.c
net/rds/tcp_listen.c
net/rds/tcp_recv.c
net/rfkill/Kconfig
net/rfkill/Makefile
net/rfkill/core.c
net/rfkill/rfkill-regulator.c [deleted file]
net/rxrpc/Makefile
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_object.c
net/rxrpc/input.c
net/rxrpc/misc.c
net/rxrpc/proc.c
net/rxrpc/sendmsg.c
net/sched/Kconfig
net/sched/Makefile
net/sched/act_api.c
net/sched/act_csum.c
net/sched/act_ife.c
net/sched/act_mirred.c
net/sched/act_pedit.c
net/sched/act_sample.c [new file with mode: 0644]
net/sched/cls_api.c
net/sched/cls_bpf.c
net/sched/cls_flow.c
net/sched/cls_flower.c
net/sched/cls_matchall.c
net/sched/cls_u32.c
net/sched/sch_api.c
net/sched/sch_atm.c
net/sched/sch_cbq.c
net/sched/sch_choke.c
net/sched/sch_dsmark.c
net/sched/sch_fq_codel.c
net/sched/sch_generic.c
net/sched/sch_hhf.c
net/sched/sch_htb.c
net/sched/sch_ingress.c
net/sched/sch_mq.c
net/sched/sch_mqprio.c
net/sched/sch_multiq.c
net/sched/sch_netem.c
net/sched/sch_prio.c
net/sched/sch_sfb.c
net/sched/sch_sfq.c
net/sched/sch_teql.c
net/sctp/Makefile
net/sctp/associola.c
net/sctp/chunk.c
net/sctp/debug.c
net/sctp/endpointola.c
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/objcnt.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/primitive.c
net/sctp/protocol.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/sm_statetable.c
net/sctp/socket.c
net/sctp/ssnmap.c [deleted file]
net/sctp/stream.c [new file with mode: 0644]
net/sctp/transport.c
net/sctp/ulpqueue.c
net/smc/Kconfig [new file with mode: 0644]
net/smc/Makefile [new file with mode: 0644]
net/smc/af_smc.c [new file with mode: 0644]
net/smc/smc.h [new file with mode: 0644]
net/smc/smc_cdc.c [new file with mode: 0644]
net/smc/smc_cdc.h [new file with mode: 0644]
net/smc/smc_clc.c [new file with mode: 0644]
net/smc/smc_clc.h [new file with mode: 0644]
net/smc/smc_close.c [new file with mode: 0644]
net/smc/smc_close.h [new file with mode: 0644]
net/smc/smc_core.c [new file with mode: 0644]
net/smc/smc_core.h [new file with mode: 0644]
net/smc/smc_diag.c [new file with mode: 0644]
net/smc/smc_ib.c [new file with mode: 0644]
net/smc/smc_ib.h [new file with mode: 0644]
net/smc/smc_llc.c [new file with mode: 0644]
net/smc/smc_llc.h [new file with mode: 0644]
net/smc/smc_pnet.c [new file with mode: 0644]
net/smc/smc_pnet.h [new file with mode: 0644]
net/smc/smc_rx.c [new file with mode: 0644]
net/smc/smc_rx.h [new file with mode: 0644]
net/smc/smc_tx.c [new file with mode: 0644]
net/smc/smc_tx.h [new file with mode: 0644]
net/smc/smc_wr.c [new file with mode: 0644]
net/smc/smc_wr.h [new file with mode: 0644]
net/socket.c
net/sunrpc/xprtrdma/svc_rdma_backchannel.c
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/link.c
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_table.c
net/tipc/name_table.h
net/tipc/net.c
net/tipc/node.c
net/tipc/node.h
net/tipc/socket.c
net/tipc/udp_media.c
net/unix/af_unix.c
net/wireless/Makefile
net/wireless/core.c
net/wireless/core.h
net/wireless/debugfs.c
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/nl80211.h
net/wireless/of.c [new file with mode: 0644]
net/wireless/reg.c
net/wireless/scan.c
net/wireless/sme.c
net/wireless/sysfs.c
net/wireless/trace.h
net/wireless/util.c
net/wireless/wext-core.c
net/wireless/wext-sme.c
net/xfrm/Kconfig
net/xfrm/xfrm_input.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
samples/bpf/map_perf_test_kern.c
samples/bpf/map_perf_test_user.c
security/selinux/hooks.c
tools/include/uapi/linux/bpf.h
tools/lib/bpf/bpf.c
tools/lib/bpf/bpf.h
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/event-parse.h
tools/perf/util/scripting-engines/trace-event-perl.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/testing/selftests/bpf/.gitignore
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/bpf_sys.h [deleted file]
tools/testing/selftests/bpf/test_lpm_map.c [new file with mode: 0644]
tools/testing/selftests/bpf/test_lru_map.c
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/bpf/test_tag.c [new file with mode: 0644]
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/net/psock_lib.h
tools/testing/selftests/net/psock_tpacket.c

index fb40891ee606ca5330d3c07844ff1fb07101efba..9a734d808aa7eb2919c87d4bc9dd7440bdd7d2c3 100644 (file)
@@ -2,7 +2,7 @@
 
 Required properties:
 
-- compatible: should be "brcm,bcm7445-switch-v4.0"
+- compatible: should be "brcm,bcm7445-switch-v4.0" or "brcm,bcm7278-switch-v4.0"
 - reg: addresses and length of the register sets for the device, must be 6
   pairs of register addresses and lengths
 - interrupts: interrupts for the devices, must be two interrupts
@@ -41,6 +41,13 @@ Optional properties:
   Admission Control Block supports reporting the number of packets in-flight in a
   switch queue
 
+Port subnodes:
+
+Optional properties:
+
+- brcm,use-bcm-hdr: boolean property, if present, indicates that the switch
+  port has Broadcom tags enabled (per-packet metadata)
+
 Example:
 
 switch_top@f0b00000 {
@@ -114,6 +121,7 @@ switch_top@f0b00000 {
                        port@0 {
                                label = "gphy";
                                reg = <0>;
+                               brcm,use-bcm-hdr;
                        };
                        ...
                };
index 877da34145b0e4d3d62f75065484e61bdd9c4749..83f29e0e11bacbda354677876d2b4d9b170999dd 100644 (file)
@@ -1,7 +1,10 @@
 * Broadcom BCM7xxx Ethernet Systemport Controller (SYSTEMPORT)
 
 Required properties:
-- compatible: should be one of "brcm,systemport-v1.00" or "brcm,systemport"
+- compatible: should be one of:
+             "brcm,systemport-v1.00"
+             "brcm,systemportlite-v1.00" or
+             "brcm,systemport"
 - reg: address and length of the register set for the device.
 - interrupts: interrupts for the device, first cell must be for the rx
   interrupts, and the second cell should be for the transmit queues. An
index ebda7c93453ad0abdcecbdc3da3932732d1d9085..7cc15c96ea95ce68c5c9bd81b9c9132b72cd5356 100644 (file)
@@ -23,7 +23,6 @@ Required properties:
 
 Optional properties:
 - ti,hwmods            : Must be "cpgmac0"
-- no_bd_ram            : Must be 0 or 1
 - dual_emac            : Specifies Switch to act as Dual EMAC
 - syscon               : Phandle to the system control device node, which is
                          the control module device of the am33x
@@ -70,7 +69,6 @@ Examples:
                cpdma_channels = <8>;
                ale_entries = <1024>;
                bd_ram_size = <0x2000>;
-               no_bd_ram = <0>;
                rx_descs = <64>;
                mac_control = <0x20>;
                slaves = <2>;
@@ -99,7 +97,6 @@ Examples:
                cpdma_channels = <8>;
                ale_entries = <1024>;
                bd_ram_size = <0x2000>;
-               no_bd_ram = <0>;
                rx_descs = <64>;
                mac_control = <0x20>;
                slaves = <2>;
index a4a570fb2494a13983c216f4a078d2f12a835a01..cfe8f64eca4fbea222f53cfd78b62d7df4e10020 100644 (file)
@@ -34,13 +34,9 @@ Required properties:
 
 Each port children node must have the following mandatory properties:
 - reg                  : Describes the port address in the switch
-- label                        : Describes the label associated with this port, which
-                          will become the netdev name. Special labels are
-                         "cpu" to indicate a CPU port and "dsa" to
-                         indicate an uplink/downlink port between switches in
-                         the cluster.
 
-A port labelled "dsa" has the following mandatory property:
+An uplink/downlink port between switches in the cluster has the following
+mandatory property:
 
 - link                 : Should be a list of phandles to other switch's DSA
                          port. This port is used as the outgoing port
@@ -48,12 +44,17 @@ A port labelled "dsa" has the following mandatory property:
                          information must be given, not just the one hop
                          routes to neighbouring switches.
 
-A port labelled "cpu" has the following mandatory property:
+A CPU port has the following mandatory property:
 
 - ethernet             : Should be a phandle to a valid Ethernet device node.
                           This host device is what the switch port is
                          connected to.
 
+A user port has the following optional property:
+
+- label                        : Describes the label associated with this port, which
+                          will become the netdev name.
+
 Port child nodes may also contain the following optional standardised
 properties, described in binding documents:
 
@@ -107,7 +108,6 @@ linked into one DSA cluster.
 
                        switch0port5: port@5 {
                                reg = <5>;
-                               label = "dsa";
                                phy-mode = "rgmii-txid";
                                link = <&switch1port6
                                        &switch2port9>;
@@ -119,7 +119,6 @@ linked into one DSA cluster.
 
                        port@6 {
                                reg = <6>;
-                               label = "cpu";
                                ethernet = <&fec1>;
                                fixed-link {
                                        speed = <100>;
@@ -165,7 +164,6 @@ linked into one DSA cluster.
 
                        switch1port5: port@5 {
                                reg = <5>;
-                               label = "dsa";
                                link = <&switch2port9>;
                                phy-mode = "rgmii-txid";
                                fixed-link {
@@ -176,7 +174,6 @@ linked into one DSA cluster.
 
                        switch1port6: port@6 {
                                reg = <6>;
-                               label = "dsa";
                                phy-mode = "rgmii-txid";
                                link = <&switch0port5>;
                                fixed-link {
@@ -255,7 +252,6 @@ linked into one DSA cluster.
 
                        switch2port9: port@9 {
                                reg = <9>;
-                               label = "dsa";
                                phy-mode = "rgmii-txid";
                                link = <&switch1port5
                                        &switch0port5>;
index b3dd6b40e0de29a71cdd96627cca55a09215ea9d..7ef9dbb08957a593528a4d873d1f3af29892f5c2 100644 (file)
@@ -14,9 +14,9 @@ The properties described here are those specific to Marvell devices.
 Additional required and optional properties can be found in dsa.txt.
 
 Required properties:
-- compatible          : Should be one of "marvell,mv88e6085" or
-                        "marvell,mv88e6190"
-- reg                  : Address on the MII bus for the switch.
+- compatible           : Should be one of "marvell,mv88e6085" or
+                         "marvell,mv88e6190"
+- reg                  : Address on the MII bus for the switch.
 
 Optional properties:
 
@@ -26,30 +26,67 @@ Optional properties:
 - interrupt-controller : Indicates the switch is itself an interrupt
                          controller. This is used for the PHY interrupts.
 #interrupt-cells = <2> : Controller uses two cells, number and flag
-- mdio                 : container of PHY and devices on the switches MDIO
-                         bus
+- mdio                 : Container of PHY and devices on the switches MDIO
+                         bus.
+- mdio?                : Container of PHYs and devices on the external MDIO
+                         bus. The node must contains a compatible string of
+                         "marvell,mv88e6xxx-mdio-external"
+
 Example:
 
-       mdio {
-               #address-cells = <1>;
-               #size-cells = <0>;
-               interrupt-parent = <&gpio0>;
-               interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
-               interrupt-controller;
-               #interrupt-cells = <2>;
-
-               switch0: switch@0 {
-                       compatible = "marvell,mv88e6085";
-                       reg = <0>;
-                      reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
-               };
-               mdio {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       switch1phy0: switch1phy0@0 {
-                               reg = <0>;
-                               interrupt-parent = <&switch0>;
-                               interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
-                       };
-               };
-       };
+       mdio {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               interrupt-parent = <&gpio0>;
+               interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+
+               switch0: switch@0 {
+                       compatible = "marvell,mv88e6085";
+                       reg = <0>;
+                       reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
+               };
+               mdio {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       switch1phy0: switch1phy0@0 {
+                               reg = <0>;
+                               interrupt-parent = <&switch0>;
+                               interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+                       };
+               };
+       };
+
+       mdio {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               interrupt-parent = <&gpio0>;
+               interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+
+               switch0: switch@0 {
+                       compatible = "marvell,mv88e6390";
+                       reg = <0>;
+                       reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
+               };
+               mdio {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       switch1phy0: switch1phy0@0 {
+                               reg = <0>;
+                               interrupt-parent = <&switch0>;
+                               interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+                       };
+               };
+
+               mdio1 {
+                       compatible = "marvell,mv88e6xxx-mdio-external";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       switch1phy9: switch1phy0@9 {
+                               reg = <9>;
+                       };
+               };
+       };
index 05150957ecfda1bdee82f716fc7a00c70f9aa713..3a6916909d90193d57af64ff1fd63ef9c875f3fd 100644 (file)
@@ -29,6 +29,9 @@ The following properties are common to the Ethernet controllers:
   * "smii"
   * "xgmii"
   * "trgmii"
+  * "2000base-x",
+  * "2500base-x",
+  * "rxaui"
 - phy-connection-type: the same as "phy-mode" property but described in ePAPR;
 - phy-handle: phandle, specifies a reference to a node representing a PHY
   device; this property is described in ePAPR and so preferred;
index 89e62ddc69caddeff18cef8a1a4447226c3755f5..0703ad3f3c1eba631905102c6f9b77c49cfb764e 100644 (file)
@@ -25,6 +25,22 @@ Required properties on Meson8b and newer:
                - "clkin0" - first parent clock of the internal mux
                - "clkin1" - second parent clock of the internal mux
 
+Optional properties on Meson8b and newer:
+- amlogic,tx-delay-ns: The internal RGMII TX clock delay (provided
+                       by this driver) in nanoseconds. Allowed values
+                       are: 0ns, 2ns, 4ns, 6ns.
+                       When phy-mode is set to "rgmii" then the TX
+                       delay should be explicitly configured. When
+                       not configured a fallback of 2ns is used.
+                       When the phy-mode is set to either "rgmii-id"
+                       or "rgmii-txid" the TX clock delay is already
+                       provided by the PHY. In that case this
+                       property should be set to 0ns (which disables
+                       the TX clock delay in the MAC to prevent the
+                       clock from going off because both PHY and MAC
+                       are adding a delay).
+                       Any configuration is ignored when the phy-mode
+                       is set to "rmii".
 
 Example for Meson6:
 
index bdefefc665949225d77e4c5c22cde5dd6b678069..0eedabe22cc3db8c79410b197ca0e8f0afc45710 100644 (file)
@@ -27,6 +27,14 @@ Optional properties:
                          'vddmac'.
                          Default value is 0%.
                          Ref: Table:1 - Edge rate change (below).
+- vsc8531,led-0-mode   : LED mode. Specify how the LED[0] should behave.
+                         Allowed values are define in
+                         "include/dt-bindings/net/mscc-phy-vsc8531.h".
+                         Default value is VSC8531_LINK_1000_ACTIVITY (1).
+- vsc8531,led-1-mode   : LED mode. Specify how the LED[1] should behave.
+                         Allowed values are define in
+                         "include/dt-bindings/net/mscc-phy-vsc8531.h".
+                         Default value is VSC8531_LINK_100_ACTIVITY (2).
 
 Table: 1 - Edge rate change
 ----------------------------------------------------------------|
@@ -60,4 +68,6 @@ Example:
                 compatible = "ethernet-phy-id0007.0570";
                 vsc8531,vddmac         = <3300>;
                 vsc8531,edge-slowdown  = <7>;
+                vsc8531,led-0-mode     = <LINK_1000_ACTIVITY>;
+                vsc8531,led-1-mode     = <LINK_100_ACTIVITY>;
         };
index fb5056b22685c249c9bf812a1ada038070d8c03d..b55857696fc315dc5c1cfad52bb4f40d152e9b8d 100644 (file)
@@ -39,6 +39,10 @@ Optional Properties:
 - enet-phy-lane-swap: If set, indicates the PHY will swap the TX/RX lanes to
   compensate for the board being designed with the lanes swapped.
 
+- enet-phy-lane-no-swap: If set, indicates that PHY will disable swap of the
+  TX/RX lanes. This property allows the PHY to work correcly after e.g. wrong
+  bootstrap configuration caused by issues in PCB layout design.
+
 - eee-broken-100tx:
 - eee-broken-1000t:
 - eee-broken-10gt:
index d93f71ce834649f8b79e69263fab26afb6676866..21d27aa4c68c53265436027d0c3c1c50cdaa402c 100644 (file)
@@ -1,5 +1,8 @@
 * Synopsys DWC Ethernet QoS IP version 4.10 driver (GMAC)
 
+This binding is deprecated, but it continues to be supported, but new
+features should be preferably added to the stmmac binding document.
+
 This binding supports the Synopsys Designware Ethernet QoS (Quality Of Service)
 IP block. The IP supports multiple options for bus type, clocking and reset
 structure, and feature list. Consequently, a number of properties and list
index 128da752fec95bae1e48a987009a4565a1791562..d3bfc2b30fb5ecc07493b4c5510d5cdc32b3ff3a 100644 (file)
@@ -49,6 +49,8 @@ Optional properties:
 - snps,force_sf_dma_mode       Force DMA to use the Store and Forward
                                mode for both tx and rx. This flag is
                                ignored if force_thresh_dma_mode is set.
+- snps,en-tx-lpi-clockgating   Enable gating of the MAC TX clock during
+                               TX low-power mode
 - snps,multicast-filter-bins:  Number of multicast filter hash bins
                                supported by this device instance
 - snps,perfect-filter-entries: Number of perfect filter entries supported
@@ -65,7 +67,6 @@ Optional properties:
        - snps,wr_osr_lmt: max write outstanding req. limit
        - snps,rd_osr_lmt: max read outstanding req. limit
        - snps,kbbe: do not cross 1KiB boundary.
-       - snps,axi_all: align address
        - snps,blen: this is a vector of supported burst length.
        - snps,fb: fixed-burst
        - snps,mb: mixed-burst
diff --git a/Documentation/devicetree/bindings/net/wireless/ieee80211.txt b/Documentation/devicetree/bindings/net/wireless/ieee80211.txt
new file mode 100644 (file)
index 0000000..f6442b1
--- /dev/null
@@ -0,0 +1,24 @@
+Common IEEE 802.11 properties
+
+This provides documentation of common properties that are valid for all wireless
+devices.
+
+Optional properties:
+ - ieee80211-freq-limit : list of supported frequency ranges in KHz. This can be
+       used for devices that in a given config support less channels than
+       normally. It may happen chipset supports a wide wireless band but it is
+       limited to some part of it due to used antennas or power amplifier.
+       An example case for this can be tri-band wireless router with two
+       identical chipsets used for two different 5 GHz subbands. Using them
+       incorrectly could not work or decrease performance noticeably.
+
+Example:
+
+pcie@0,0 {
+       reg = <0x0000 0 0 0 0>;
+       wifi@0,0 {
+               reg = <0x0000 0 0 0 0>;
+               ieee80211-freq-limit = <2402000 2482000>,
+                                      <5170000 5250000>;
+       };
+};
index b1e149ea6feeb9debed0f51208033eeee35219f4..eca534ab617259a63bdf0d63f6a14319dedc7c0d 100644 (file)
@@ -44,6 +44,9 @@ Device registration
 .. kernel-doc:: include/net/cfg80211.h
    :functions: wiphy_new
 
+.. kernel-doc:: include/net/cfg80211.h
+   :functions: wiphy_read_of_freq_limits
+
 .. kernel-doc:: include/net/cfg80211.h
    :functions: wiphy_register
 
index ffef91c4e0d690312fc8eb176121c76d1522fa43..060da408923b26bea0017d86ada9369ae8cd91f4 100644 (file)
@@ -64,8 +64,7 @@ USAGE
 When inserting the driver modules the root cell must be specified along with a
 list of volume location server IP addresses:
 
-       modprobe af_rxrpc
-       modprobe rxkad
+       modprobe rxrpc
        modprobe kafs rootcell=cambridge.redhat.com:172.16.18.73:172.16.18.91
 
 The first module is the AF_RXRPC network protocol driver.  This provides the
@@ -214,34 +213,3 @@ If a file is opened with a particular key and then the file descriptor is
 passed to a process that doesn't have that key (perhaps over an AF_UNIX
 socket), then the operations on the file will be made with key that was used to
 open the file.
-
-
-========
-EXAMPLES
-========
-
-Here's what I use to test this.  Some of the names and IP addresses are local
-to my internal DNS.  My "root.afs" partition has a mount point within it for
-some public volumes volumes.
-
-insmod /tmp/rxrpc.o
-insmod /tmp/rxkad.o
-insmod /tmp/kafs.o rootcell=cambridge.redhat.com:172.16.18.91
-
-mount -t afs \%root.afs. /afs
-mount -t afs \%cambridge.redhat.com:root.cell. /afs/cambridge.redhat.com/
-
-echo add grand.central.org 18.9.48.14:128.2.203.61:130.237.48.87 > /proc/fs/afs/cells
-mount -t afs "#grand.central.org:root.cell." /afs/grand.central.org/
-mount -t afs "#grand.central.org:root.archive." /afs/grand.central.org/archive
-mount -t afs "#grand.central.org:root.contrib." /afs/grand.central.org/contrib
-mount -t afs "#grand.central.org:root.doc." /afs/grand.central.org/doc
-mount -t afs "#grand.central.org:root.project." /afs/grand.central.org/project
-mount -t afs "#grand.central.org:root.service." /afs/grand.central.org/service
-mount -t afs "#grand.central.org:root.software." /afs/grand.central.org/software
-mount -t afs "#grand.central.org:root.user." /afs/grand.central.org/user
-
-umount /afs
-rmmod kafs
-rmmod rxkad
-rmmod rxrpc
index 63912ef346069b228b984c2f2d1a70f0c9c6ffc0..b8b40753133e75a548400b2035efd72ef105b2f9 100644 (file)
@@ -295,7 +295,6 @@ DSA currently leverages the following subsystems:
 - MDIO/PHY library: drivers/net/phy/phy.c, mdio_bus.c
 - Switchdev: net/switchdev/*
 - Device Tree for various of_* functions
-- HWMON: drivers/hwmon/*
 
 MDIO/PHY library
 ----------------
@@ -349,12 +348,6 @@ Documentation/devicetree/bindings/net/dsa/dsa.txt. PHY/MDIO library helper
 functions such as of_get_phy_mode(), of_phy_connect() are also used to query
 per-port PHY specific details: interface connection, MDIO bus location etc..
 
-HWMON
------
-
-Some switch drivers feature internal temperature sensors which are exposed as
-regular HWMON devices in /sys/class/hwmon/.
-
 Driver development
 ==================
 
@@ -495,23 +488,6 @@ Power management
   BR_STATE_DISABLED and propagating changes to the hardware if this port is
   disabled while being a bridge member
 
-Hardware monitoring
--------------------
-
-These callbacks are only available if CONFIG_NET_DSA_HWMON is enabled:
-
-- get_temp: this function queries the given switch for its temperature
-
-- get_temp_limit: this function returns the switch current maximum temperature
-  limit
-
-- set_temp_limit: this function configures the maximum temperature limit allowed
-
-- get_temp_alarm: this function returns the critical temperature threshold
-  returning an alarm notification
-
-See Documentation/hwmon/sysfs-interface for details.
-
 Bridge layer
 ------------
 
index 7dd65c9cf7072661a22f0961753850c1dd7c74b4..fc73eeb7b3b8b119083a03a42e0046a564ea2f0e 100644 (file)
@@ -246,21 +246,12 @@ tcp_dsack - BOOLEAN
        Allows TCP to send "duplicate" SACKs.
 
 tcp_early_retrans - INTEGER
-       Enable Early Retransmit (ER), per RFC 5827. ER lowers the threshold
-       for triggering fast retransmit when the amount of outstanding data is
-       small and when no previously unsent data can be transmitted (such
-       that limited transmit could be used). Also controls the use of
-       Tail loss probe (TLP) that converts RTOs occurring due to tail
-       losses into fast recovery (draft-dukkipati-tcpm-tcp-loss-probe-01).
+       Tail loss probe (TLP) converts RTOs occurring due to tail
+       losses into fast recovery (draft-ietf-tcpm-rack). Note that
+       TLP requires RACK to function properly (see tcp_recovery below)
        Possible values:
-               0 disables ER
-               1 enables ER
-               2 enables ER but delays fast recovery and fast retransmit
-                 by a fourth of RTT. This mitigates connection falsely
-                 recovers when network has a small degree of reordering
-                 (less than 3 packets).
-               3 enables delayed ER and TLP.
-               4 enables TLP only.
+               0 disables TLP
+               3 or 4 enables TLP
        Default: 3
 
 tcp_ecn - INTEGER
@@ -712,18 +703,6 @@ tcp_thin_linear_timeouts - BOOLEAN
        Documentation/networking/tcp-thin.txt
        Default: 0
 
-tcp_thin_dupack - BOOLEAN
-       Enable dynamic triggering of retransmissions after one dupACK
-       for thin streams. If set, a check is performed upon reception
-       of a dupACK to determine if the stream is thin (less than 4
-       packets in flight). As long as the stream is found to be thin,
-       data is retransmitted on the first received dupACK. This
-       improves retransmission latency for non-aggressive thin
-       streams, often found to be time-dependent.
-       For more information on thin streams, see
-       Documentation/networking/tcp-thin.txt
-       Default: 0
-
 tcp_limit_output_bytes - INTEGER
        Controls TCP Small Queue limit per tcp socket.
        TCP bulk sender tends to increase packets in flight until it
@@ -742,6 +721,13 @@ tcp_challenge_ack_limit - INTEGER
 
 UDP variables:
 
+udp_l3mdev_accept - BOOLEAN
+       Enabling this option allows a "global" bound socket to work
+       across L3 master domains (e.g., VRFs) with packets capable of
+       being received regardless of the L3 domain in which they
+       originated. Only valid when the kernel was compiled with
+       CONFIG_NET_L3_MASTER_DEV.
+
 udp_mem - vector of 3 INTEGERs: min, pressure, max
        Number of pages allowed for queueing by all UDP sockets.
 
@@ -843,6 +829,15 @@ ip_local_reserved_ports - list of comma separated ranges
 
        Default: Empty
 
+ip_unprivileged_port_start - INTEGER
+       This is a per-namespace sysctl.  It defines the first
+       unprivileged port in the network namespace.  Privileged ports
+       require root or CAP_NET_BIND_SERVICE in order to bind to them.
+       To disable all privileged ports, set this to 0.  It may not
+       overlap with the ip_local_reserved_ports range.
+
+       Default: 1024
+
 ip_nonlocal_bind - BOOLEAN
        If set, allows processes to bind() to non-local IP addresses,
        which can be quite useful - but may break some applications.
diff --git a/Documentation/networking/netfilter-sysctl.txt b/Documentation/networking/netfilter-sysctl.txt
new file mode 100644 (file)
index 0000000..55791e5
--- /dev/null
@@ -0,0 +1,10 @@
+/proc/sys/net/netfilter/* Variables:
+
+nf_log_all_netns - BOOLEAN
+       0 - disabled (default)
+       not 0 - enabled
+
+       By default, only init_net namespace can log packets into kernel log
+       with LOG target; this aims to prevent containers from flooding host
+       kernel log. If enabled, this target also works in other network
+       namespaces. This variable is only accessible from init_net.
index daa015af16a092a8d4b7fd1df73f2bbe282251d9..f3b9e507ab05b26eb6517ac2f43992863a606ba1 100644 (file)
@@ -565,7 +565,7 @@ TPACKET_V1 --> TPACKET_V2:
                   (void *)hdr + TPACKET_ALIGN(sizeof(struct tpacket_hdr))
 
 TPACKET_V2 --> TPACKET_V3:
-       - Flexible buffer implementation:
+       - Flexible buffer implementation for RX_RING:
                1. Blocks can be configured with non-static frame-size
                2. Read/poll is at a block-level (as opposed to packet-level)
                3. Added poll timeout to avoid indefinite user-space wait
@@ -574,7 +574,12 @@ TPACKET_V2 --> TPACKET_V3:
                        4.1 block::timeout
                        4.2 tpkt_hdr::sk_rxhash
        - RX Hash data available in user space
-       - Currently only RX_RING available
+       - TX_RING semantics are conceptually similar to TPACKET_V2;
+         use tpacket3_hdr instead of tpacket2_hdr, and TPACKET3_HDRLEN
+         instead of TPACKET2_HDRLEN. In the current implementation,
+         the tp_next_offset field in the tpacket3_hdr MUST be set to
+         zero, indicating that the ring does not hold variable sized frames.
+         Packets with non-zero values of tp_next_offset will be dropped.
 
 -------------------------------------------------------------------------------
 + AF_PACKET fanout mode
index 356f791af5747fe4a2e1f25dea588d8b371d3d5b..7818b5fe448b60dae421e391ec4679b1c1f18c79 100644 (file)
@@ -156,12 +156,12 @@ struct ieee80211_regdomain mydriver_jp_regdom = {
        //.alpha2 =  "99", /* If I have no alpha2 to map it to */
        .reg_rules = {
                /* IEEE 802.11b/g, channels 1..14 */
-               REG_RULE(2412-20, 2484+20, 40, 6, 20, 0),
+               REG_RULE(2412-10, 2484+10, 40, 6, 20, 0),
                /* IEEE 802.11a, channels 34..48 */
-               REG_RULE(5170-20, 5240+20, 40, 6, 20,
+               REG_RULE(5170-10, 5240+10, 40, 6, 20,
                        NL80211_RRF_NO_IR),
                /* IEEE 802.11a, channels 52..64 */
-               REG_RULE(5260-20, 5320+20, 40, 6, 20,
+               REG_RULE(5260-10, 5320+10, 40, 6, 20,
                        NL80211_RRF_NO_IR|
                        NL80211_RRF_DFS),
        }
@@ -205,7 +205,7 @@ the data in regdb.c as an alternative to using CRDA.
 The file net/wireless/db.txt should be kept up-to-date with the db.txt
 file available in the git repository here:
 
-    git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-regdb.git
+    git://git.kernel.org/pub/scm/linux/kernel/git/sforshee/wireless-regdb.git
 
 Again, most users in most situations should be using the CRDA package
 provided with their distribution, and in most other situations users
index 755dab856392ccf55431b2a8e9a8ff9d3e433b25..3918dae964d4960ccb8b32a566bedd16a999ce68 100644 (file)
@@ -98,10 +98,11 @@ VRF device:
 
 or to specify the output device using cmsg and IP_PKTINFO.
 
-TCP services running in the default VRF context (ie., not bound to any VRF
-device) can work across all VRF domains by enabling the tcp_l3mdev_accept
-sysctl option:
+TCP & UDP services running in the default VRF context (ie., not bound
+to any VRF device) can work across all VRF domains by enabling the
+tcp_l3mdev_accept and udp_l3mdev_accept sysctl options:
     sysctl -w net.ipv4.tcp_l3mdev_accept=1
+    sysctl -w net.ipv4.udp_l3mdev_accept=1
 
 netfilter rules on the VRF device can be used to limit access to services
 running in the default VRF context as well.
diff --git a/Documentation/siphash.txt b/Documentation/siphash.txt
new file mode 100644 (file)
index 0000000..908d348
--- /dev/null
@@ -0,0 +1,175 @@
+         SipHash - a short input PRF
+-----------------------------------------------
+Written by Jason A. Donenfeld <jason@zx2c4.com>
+
+SipHash is a cryptographically secure PRF -- a keyed hash function -- that
+performs very well for short inputs, hence the name. It was designed by
+cryptographers Daniel J. Bernstein and Jean-Philippe Aumasson. It is intended
+as a replacement for some uses of: `jhash`, `md5_transform`, `sha_transform`,
+and so forth.
+
+SipHash takes a secret key filled with randomly generated numbers and either
+an input buffer or several input integers. It spits out an integer that is
+indistinguishable from random. You may then use that integer as part of secure
+sequence numbers, secure cookies, or mask it off for use in a hash table.
+
+1. Generating a key
+
+Keys should always be generated from a cryptographically secure source of
+random numbers, either using get_random_bytes or get_random_once:
+
+siphash_key_t key;
+get_random_bytes(&key, sizeof(key));
+
+If you're not deriving your key from here, you're doing it wrong.
+
+2. Using the functions
+
+There are two variants of the function, one that takes a list of integers, and
+one that takes a buffer:
+
+u64 siphash(const void *data, size_t len, const siphash_key_t *key);
+
+And:
+
+u64 siphash_1u64(u64, const siphash_key_t *key);
+u64 siphash_2u64(u64, u64, const siphash_key_t *key);
+u64 siphash_3u64(u64, u64, u64, const siphash_key_t *key);
+u64 siphash_4u64(u64, u64, u64, u64, const siphash_key_t *key);
+u64 siphash_1u32(u32, const siphash_key_t *key);
+u64 siphash_2u32(u32, u32, const siphash_key_t *key);
+u64 siphash_3u32(u32, u32, u32, const siphash_key_t *key);
+u64 siphash_4u32(u32, u32, u32, u32, const siphash_key_t *key);
+
+If you pass the generic siphash function something of a constant length, it
+will constant fold at compile-time and automatically choose one of the
+optimized functions.
+
+3. Hashtable key function usage:
+
+struct some_hashtable {
+       DECLARE_HASHTABLE(hashtable, 8);
+       siphash_key_t key;
+};
+
+void init_hashtable(struct some_hashtable *table)
+{
+       get_random_bytes(&table->key, sizeof(table->key));
+}
+
+static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
+{
+       return &table->hashtable[siphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
+}
+
+You may then iterate like usual over the returned hash bucket.
+
+4. Security
+
+SipHash has a very high security margin, with its 128-bit key. So long as the
+key is kept secret, it is impossible for an attacker to guess the outputs of
+the function, even if being able to observe many outputs, since 2^128 outputs
+is significant.
+
+Linux implements the "2-4" variant of SipHash.
+
+5. Struct-passing Pitfalls
+
+Often times the XuY functions will not be large enough, and instead you'll
+want to pass a pre-filled struct to siphash. When doing this, it's important
+to always ensure the struct has no padding holes. The easiest way to do this
+is to simply arrange the members of the struct in descending order of size,
+and to use offsetendof() instead of sizeof() for getting the size. For
+performance reasons, if possible, it's probably a good thing to align the
+struct to the right boundary. Here's an example:
+
+const struct {
+       struct in6_addr saddr;
+       u32 counter;
+       u16 dport;
+} __aligned(SIPHASH_ALIGNMENT) combined = {
+       .saddr = *(struct in6_addr *)saddr,
+       .counter = counter,
+       .dport = dport
+};
+u64 h = siphash(&combined, offsetofend(typeof(combined), dport), &secret);
+
+6. Resources
+
+Read the SipHash paper if you're interested in learning more:
+https://131002.net/siphash/siphash.pdf
+
+
+~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
+
+HalfSipHash - SipHash's insecure younger cousin
+-----------------------------------------------
+Written by Jason A. Donenfeld <jason@zx2c4.com>
+
+On the off-chance that SipHash is not fast enough for your needs, you might be
+able to justify using HalfSipHash, a terrifying but potentially useful
+possibility. HalfSipHash cuts SipHash's rounds down from "2-4" to "1-3" and,
+even scarier, uses an easily brute-forcable 64-bit key (with a 32-bit output)
+instead of SipHash's 128-bit key. However, this may appeal to some
+high-performance `jhash` users.
+
+Danger!
+
+Do not ever use HalfSipHash except for as a hashtable key function, and only
+then when you can be absolutely certain that the outputs will never be
+transmitted out of the kernel. This is only remotely useful over `jhash` as a
+means of mitigating hashtable flooding denial of service attacks.
+
+1. Generating a key
+
+Keys should always be generated from a cryptographically secure source of
+random numbers, either using get_random_bytes or get_random_once:
+
+hsiphash_key_t key;
+get_random_bytes(&key, sizeof(key));
+
+If you're not deriving your key from here, you're doing it wrong.
+
+2. Using the functions
+
+There are two variants of the function, one that takes a list of integers, and
+one that takes a buffer:
+
+u32 hsiphash(const void *data, size_t len, const hsiphash_key_t *key);
+
+And:
+
+u32 hsiphash_1u32(u32, const hsiphash_key_t *key);
+u32 hsiphash_2u32(u32, u32, const hsiphash_key_t *key);
+u32 hsiphash_3u32(u32, u32, u32, const hsiphash_key_t *key);
+u32 hsiphash_4u32(u32, u32, u32, u32, const hsiphash_key_t *key);
+
+If you pass the generic hsiphash function something of a constant length, it
+will constant fold at compile-time and automatically choose one of the
+optimized functions.
+
+3. Hashtable key function usage:
+
+struct some_hashtable {
+       DECLARE_HASHTABLE(hashtable, 8);
+       hsiphash_key_t key;
+};
+
+void init_hashtable(struct some_hashtable *table)
+{
+       get_random_bytes(&table->key, sizeof(table->key));
+}
+
+static inline hlist_head *some_hashtable_bucket(struct some_hashtable *table, struct interesting_input *input)
+{
+       return &table->hashtable[hsiphash(input, sizeof(*input), &table->key) & (HASH_SIZE(table->hashtable) - 1)];
+}
+
+You may then iterate like usual over the returned hash bucket.
+
+4. Performance
+
+HalfSipHash is roughly 3 times slower than JenkinsHash. For many replacements,
+this will not be a problem, as the hashtable lookup isn't the bottleneck. And
+in general, this is probably a good sacrifice to make for the security and DoS
+resistance of HalfSipHash.
index f0480f7ea7404c777c1a8943047278d6765a4fe6..2ebabc93014a2442824d2da7c79b28d53eaa4b41 100644 (file)
@@ -54,6 +54,18 @@ Values :
        1 - enable JIT hardening for unprivileged users only
        2 - enable JIT hardening for all users
 
+bpf_jit_kallsyms
+----------------
+
+When Berkeley Packet Filter Just in Time compiler is enabled, then compiled
+images are unknown addresses to the kernel, meaning they neither show up in
+traces nor in /proc/kallsyms. This enables export of these addresses, which
+can be used for debugging/tracing. If bpf_jit_harden is enabled, this feature
+is disabled.
+Values :
+       0 - disable JIT kallsyms export (default value)
+       1 - enable JIT kallsyms export for privileged users only
+
 dev_weight
 --------------
 
@@ -61,6 +73,27 @@ The maximum number of packets that kernel can handle on a NAPI interrupt,
 it's a Per-CPU variable.
 Default: 64
 
+dev_weight_rx_bias
+--------------
+
+RPS (e.g. RFS, aRFS) processing is competing with the registered NAPI poll function
+of the driver for the per softirq cycle netdev_budget. This parameter influences
+the proportion of the configured netdev_budget that is spent on RPS based packet
+processing during RX softirq cycles. It is further meant for making current
+dev_weight adaptable for asymmetric CPU needs on RX/TX side of the network stack.
+(see dev_weight_tx_bias) It is effective on a per CPU basis. Determination is based
+on dev_weight and is calculated multiplicative (dev_weight * dev_weight_rx_bias).
+Default: 1
+
+dev_weight_tx_bias
+--------------
+
+Scales the maximum number of packets that can be processed during a TX softirq cycle.
+Effective on a per CPU basis. Allows scaling of current dev_weight for asymmetric
+net stack processing needs. Be careful to avoid making TX softirq processing a CPU hog.
+Calculation is based on dev_weight (dev_weight * dev_weight_tx_bias).
+Default: 1
+
 default_qdisc
 --------------
 
index 527d13759eccf6670c8402c80e918c11f80530f7..d8f71f21fb880326bbd73adafa53f62d789738d6 100644 (file)
@@ -2599,6 +2599,12 @@ L:       netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/bnx2x/
 
+BROADCOM BNXT_EN 50 GIGABIT ETHERNET DRIVER
+M:     Michael Chan <michael.chan@broadcom.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     drivers/net/ethernet/broadcom/bnxt/
+
 BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     Ray Jui <rjui@broadcom.com>
@@ -5632,6 +5638,14 @@ T:       git git://linuxtv.org/media_tree.git
 S:     Odd Fixes
 F:     drivers/media/usb/gspca/
 
+GTP (GPRS Tunneling Protocol)
+M:     Pablo Neira Ayuso <pablo@netfilter.org>
+M:     Harald Welte <laforge@gnumonks.org>
+L:     osmocom-net-gprs@lists.osmocom.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/gtp.git
+S:     Maintained
+F:     drivers/net/gtp.c
+
 GUID PARTITION TABLE (GPT)
 M:     Davidlohr Bueso <dave@stgolabs.net>
 L:     linux-efi@vger.kernel.org
@@ -6244,6 +6258,13 @@ F:       include/net/cfg802154.h
 F:     include/net/ieee802154_netdev.h
 F:     Documentation/networking/ieee802154.txt
 
+IFE PROTOCOL
+M:     Yotam Gigi <yotamg@mellanox.com>
+M:     Jamal Hadi Salim <jhs@mojatatu.com>
+F:     net/ife
+F:     include/net/ife.h
+F:     include/uapi/linux/ife.h
+
 IGORPLUG-USB IR RECEIVER
 M:     Sean Young <sean@mess.org>
 L:     linux-media@vger.kernel.org
@@ -8565,9 +8586,8 @@ F:        Documentation/networking/s2io.txt
 F:     Documentation/networking/vxge.txt
 F:     drivers/net/ethernet/neterion/
 
-NETFILTER ({IP,IP6,ARP,EB,NF}TABLES)
+NETFILTER
 M:     Pablo Neira Ayuso <pablo@netfilter.org>
-M:     Patrick McHardy <kaber@trash.net>
 M:     Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
 L:     netfilter-devel@vger.kernel.org
 L:     coreteam@netfilter.org
@@ -9368,6 +9388,14 @@ F:       drivers/video/fbdev/sti*
 F:     drivers/video/console/sti*
 F:     drivers/video/logo/logo_parisc*
 
+PARMAN
+M:     Jiri Pirko <jiri@mellanox.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     lib/parman.c
+F:     lib/test_parman.c
+F:     include/linux/parman.h
+
 PC87360 HARDWARE MONITORING DRIVER
 M:     Jim Cromie <jim.cromie@gmail.com>
 L:     linux-hwmon@vger.kernel.org
@@ -9966,6 +9994,13 @@ L:       linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     drivers/block/ps3vram.c
 
+PSAMPLE PACKET SAMPLING SUPPORT:
+M:     Yotam Gigi <yotamg@mellanox.com>
+S:     Maintained
+F:     net/psample
+F:     include/net/psample.h
+F:     include/uapi/linux/psample.h
+
 PSTORE FILESYSTEM
 M:     Anton Vorontsov <anton@enomsg.org>
 M:     Colin Cross <ccross@android.com>
@@ -10592,7 +10627,7 @@ F:      drivers/net/wireless/realtek/rtlwifi/
 F:     drivers/net/wireless/realtek/rtlwifi/rtl8192ce/
 
 RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
-M:     Jes Sorensen <Jes.Sorensen@redhat.com>
+M:     Jes Sorensen <Jes.Sorensen@gmail.com>
 L:     linux-wireless@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-devel
 S:     Maintained
@@ -10858,6 +10893,13 @@ S:     Maintained
 F:     drivers/staging/media/st-cec/
 F:     Documentation/devicetree/bindings/media/stih-cec.txt
 
+SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
+M:     Ursula Braun <ubraun@linux.vnet.ibm.com>
+L:     linux-s390@vger.kernel.org
+W:     http://www.ibm.com/developerworks/linux/linux390/
+S:     Supported
+F:     net/smc/
+
 SYNOPSYS DESIGNWARE DMAC DRIVER
 M:     Viresh Kumar <vireshk@kernel.org>
 M:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
@@ -10866,13 +10908,6 @@ F:     include/linux/dma/dw.h
 F:     include/linux/platform_data/dma-dw.h
 F:     drivers/dma/dw/
 
-SYNOPSYS DESIGNWARE ETHERNET QOS 4.10a driver
-M: Lars Persson <lars.persson@axis.com>
-L: netdev@vger.kernel.org
-S: Supported
-F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
-F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
-
 SYNOPSYS DESIGNWARE I2C DRIVER
 M:     Jarkko Nikula <jarkko.nikula@linux.intel.com>
 R:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
@@ -11315,6 +11350,13 @@ F:     arch/arm/mach-s3c24xx/mach-bast.c
 F:     arch/arm/mach-s3c24xx/bast-ide.c
 F:     arch/arm/mach-s3c24xx/bast-irq.c
 
+SIPHASH PRF ROUTINES
+M:     Jason A. Donenfeld <Jason@zx2c4.com>
+S:     Maintained
+F:     lib/siphash.c
+F:     lib/test_siphash.c
+F:     include/linux/siphash.h
+
 TI DAVINCI MACHINE SUPPORT
 M:     Sekhar Nori <nsekhar@ti.com>
 M:     Kevin Hilman <khilman@kernel.org>
@@ -11886,6 +11928,7 @@ F:      include/linux/swiotlb.h
 
 SWITCHDEV
 M:     Jiri Pirko <jiri@resnulli.us>
+M:     Ivan Vecera <ivecera@redhat.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     net/switchdev/
index 99839c23d453fa8ded2061968aead8a6ee4b2317..bd04eace455c643eb7c7480c865b10baba6545b1 100644 (file)
@@ -781,4 +781,7 @@ config VMAP_STACK
          the stack to map directly to the KASAN shadow map using a formula
          that is incorrect if the stack is in vmalloc space.
 
+config ARCH_WANT_RELAX_ORDER
+       bool
+
 source "kernel/gcov/Kconfig"
index 18d72a245e889ac28561b34417680702fb0e10e1..579cca498fd30f4a6c924f6ccc3aaef9a48ed817 100644 (file)
                        cpdma_channels = <8>;
                        ale_entries = <1024>;
                        bd_ram_size = <0x2000>;
-                       no_bd_ram = <0>;
                        mac_control = <0x20>;
                        slaves = <2>;
                        active_slave = <0>;
index 2df9e6050c2f382fea2d09f6a95e634210d3a88f..97fcaf415de1858267cf57c1f60c07638b4c7129 100644 (file)
                        cpdma_channels = <8>;
                        ale_entries = <1024>;
                        bd_ram_size = <0x2000>;
-                       no_bd_ram = <0>;
                        mac_control = <0x20>;
                        slaves = <2>;
                        active_slave = <0>;
index 81b8cecb58206d8c98d9d986a1995e67e17fe7ed..5986ea3a90b08b545cea505710a2d35f38eb0656 100644 (file)
                        cpdma_channels = <8>;
                        ale_entries = <1024>;
                        bd_ram_size = <0x2000>;
-                       no_bd_ram = <0>;
                        mac_control = <0x20>;
                        slaves = <2>;
                        active_slave = <0>;
index 5ba161679e01f97c868266aee23b34c62d079c3a..3e1f75026eac467992644e51f43fcfd824b58499 100644 (file)
                        cpdma_channels = <8>;
                        ale_entries = <1024>;
                        bd_ram_size = <0x2000>;
-                       no_bd_ram = <0>;
                        mac_control = <0x20>;
                        slaves = <2>;
                        active_slave = <0>;
index f57ec511e7ae41d8d3d35351b11a15d708f0647e..e5c7d9d8592a4e3faad995aa2cd02ff600eaee9f 100644 (file)
@@ -253,7 +253,8 @@ CONFIG_R8169=y
 CONFIG_SH_ETH=y
 CONFIG_SMSC911X=y
 CONFIG_STMMAC_ETH=y
-CONFIG_SYNOPSYS_DWC_ETH_QOS=y
+CONFIG_STMMAC_PLATFORM=y
+CONFIG_DWMAC_DWC_QOS_ETH=y
 CONFIG_TI_CPSW=y
 CONFIG_XILINX_EMACLITE=y
 CONFIG_AT803X_PHY=y
index 04910764c38591c6b4d5b7bc14b0ed79ef15c3e6..83a7ec4c16d08e275f28a00de4c8840205676d56 100644 (file)
@@ -105,7 +105,7 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
 /*****************************************************************************
  * Ethernet switch
  ****************************************************************************/
-void __init orion5x_eth_switch_init(struct dsa_platform_data *d)
+void __init orion5x_eth_switch_init(struct dsa_chip_data *d)
 {
        orion_ge00_switch_init(d);
 }
index 8a4115bd441daa2e3c16ff8ec52caafe53c43c6e..efeffc6b4ebbddd63a8a7853a26b806073c061c2 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <linux/reboot.h>
 
-struct dsa_platform_data;
+struct dsa_chip_data;
 struct mv643xx_eth_platform_data;
 struct mv_sata_platform_data;
 
@@ -41,7 +41,7 @@ void orion5x_setup_wins(void);
 void orion5x_ehci0_init(void);
 void orion5x_ehci1_init(void);
 void orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data);
-void orion5x_eth_switch_init(struct dsa_platform_data *d);
+void orion5x_eth_switch_init(struct dsa_chip_data *d);
 void orion5x_i2c_init(void);
 void orion5x_sata_init(struct mv_sata_platform_data *sata_data);
 void orion5x_spi_init(void);
index dccadf68ea2b7bf5277490d6cb198f2b141a96e5..a3c1336d30c930d5da372a570f2e3e49eaffb77d 100644 (file)
@@ -101,11 +101,6 @@ static struct dsa_chip_data rd88f5181l_fxo_switch_chip_data = {
        .port_names[7]  = "lan3",
 };
 
-static struct dsa_platform_data __initdata rd88f5181l_fxo_switch_plat_data = {
-       .nr_chips       = 1,
-       .chip           = &rd88f5181l_fxo_switch_chip_data,
-};
-
 static void __init rd88f5181l_fxo_init(void)
 {
        /*
@@ -120,7 +115,7 @@ static void __init rd88f5181l_fxo_init(void)
         */
        orion5x_ehci0_init();
        orion5x_eth_init(&rd88f5181l_fxo_eth_data);
-       orion5x_eth_switch_init(&rd88f5181l_fxo_switch_plat_data);
+       orion5x_eth_switch_init(&rd88f5181l_fxo_switch_chip_data);
        orion5x_uart0_init();
 
        mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
index affe5ec825de2cbd819398e52bcb2dc22b6cc8b6..252efe29bd1a7e9867a594c3b6e298e56e2f90dd 100644 (file)
@@ -102,11 +102,6 @@ static struct dsa_chip_data rd88f5181l_ge_switch_chip_data = {
        .port_names[7]  = "lan3",
 };
 
-static struct dsa_platform_data __initdata rd88f5181l_ge_switch_plat_data = {
-       .nr_chips       = 1,
-       .chip           = &rd88f5181l_ge_switch_chip_data,
-};
-
 static struct i2c_board_info __initdata rd88f5181l_ge_i2c_rtc = {
        I2C_BOARD_INFO("ds1338", 0x68),
 };
@@ -125,7 +120,7 @@ static void __init rd88f5181l_ge_init(void)
         */
        orion5x_ehci0_init();
        orion5x_eth_init(&rd88f5181l_ge_eth_data);
-       orion5x_eth_switch_init(&rd88f5181l_ge_switch_plat_data);
+       orion5x_eth_switch_init(&rd88f5181l_ge_switch_chip_data);
        orion5x_i2c_init();
        orion5x_uart0_init();
 
index 67ee8571b03c86e16b014745d1c96de887b3a0e8..f4f1dbe1d91d3544f12401d0ee3da4613f91a951 100644 (file)
@@ -40,11 +40,6 @@ static struct dsa_chip_data rd88f6183ap_ge_switch_chip_data = {
        .port_names[5]  = "cpu",
 };
 
-static struct dsa_platform_data __initdata rd88f6183ap_ge_switch_plat_data = {
-       .nr_chips       = 1,
-       .chip           = &rd88f6183ap_ge_switch_chip_data,
-};
-
 static struct mtd_partition rd88f6183ap_ge_partitions[] = {
        {
                .name   = "kernel",
@@ -89,7 +84,7 @@ static void __init rd88f6183ap_ge_init(void)
         */
        orion5x_ehci0_init();
        orion5x_eth_init(&rd88f6183ap_ge_eth_data);
-       orion5x_eth_switch_init(&rd88f6183ap_ge_switch_plat_data);
+       orion5x_eth_switch_init(&rd88f6183ap_ge_switch_chip_data);
        spi_register_board_info(rd88f6183ap_ge_spi_slave_info,
                                ARRAY_SIZE(rd88f6183ap_ge_spi_slave_info));
        orion5x_spi_init();
index 4dbcdbe1de7ccb2f6e3e4bdc14d478a447c10453..d162d4c7f85db6f85ba47756b62906a858dfb8b6 100644 (file)
@@ -106,11 +106,6 @@ static struct dsa_chip_data wnr854t_switch_chip_data = {
        .port_names[7] = "lan2",
 };
 
-static struct dsa_platform_data __initdata wnr854t_switch_plat_data = {
-       .nr_chips       = 1,
-       .chip           = &wnr854t_switch_chip_data,
-};
-
 static void __init wnr854t_init(void)
 {
        /*
@@ -124,7 +119,7 @@ static void __init wnr854t_init(void)
         * Configure peripherals.
         */
        orion5x_eth_init(&wnr854t_eth_data);
-       orion5x_eth_switch_init(&wnr854t_switch_plat_data);
+       orion5x_eth_switch_init(&wnr854t_switch_chip_data);
        orion5x_uart0_init();
 
        mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
index a6a8c4648d74f0382ca283440c359e12e7452d38..9250bb2e429cfc1a2677e5137d0cb5b33f71e24a 100644 (file)
@@ -191,11 +191,6 @@ static struct dsa_chip_data wrt350n_v2_switch_chip_data = {
        .port_names[7]  = "lan4",
 };
 
-static struct dsa_platform_data __initdata wrt350n_v2_switch_plat_data = {
-       .nr_chips       = 1,
-       .chip           = &wrt350n_v2_switch_chip_data,
-};
-
 static void __init wrt350n_v2_init(void)
 {
        /*
@@ -210,7 +205,7 @@ static void __init wrt350n_v2_init(void)
         */
        orion5x_ehci0_init();
        orion5x_eth_init(&wrt350n_v2_eth_data);
-       orion5x_eth_switch_init(&wrt350n_v2_switch_plat_data);
+       orion5x_eth_switch_init(&wrt350n_v2_switch_chip_data);
        orion5x_uart0_init();
 
        mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
index 272f49b2c68fc5606533b4110ab157bc6318c19e..9255b6d67ba5e3a3b3586639cadc86342e8b4b04 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/platform_data/dma-mv_xor.h>
 #include <linux/platform_data/usb-ehci-orion.h>
 #include <plat/common.h>
+#include <linux/phy.h>
 
 /* Create a clkdev entry for a given device/clk */
 void __init orion_clkdev_add(const char *con_id, const char *dev_id,
@@ -470,15 +471,27 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
 /*****************************************************************************
  * Ethernet switch
  ****************************************************************************/
-void __init orion_ge00_switch_init(struct dsa_platform_data *d)
+static __initconst const char *orion_ge00_mvmdio_bus_name = "orion-mii";
+static __initdata struct mdio_board_info
+                 orion_ge00_switch_board_info;
+
+void __init orion_ge00_switch_init(struct dsa_chip_data *d)
 {
-       int i;
+       struct mdio_board_info *bd;
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
+               if (!strcmp(d->port_names[i], "cpu"))
+                       break;
 
-       d->netdev = &orion_ge00.dev;
-       for (i = 0; i < d->nr_chips; i++)
-               d->chip[i].host_dev = &orion_ge_mvmdio.dev;
+       bd = &orion_ge00_switch_board_info;
+       bd->bus_id = orion_ge00_mvmdio_bus_name;
+       bd->mdio_addr = d->sw_addr;
+       d->netdev[i] = &orion_ge00.dev;
+       strcpy(bd->modalias, "mv88e6085");
+       bd->platform_data = d;
 
-       platform_device_register_data(NULL, "dsa", 0, d, sizeof(d));
+       mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
 }
 
 /*****************************************************************************
index 9347f3c58a6dfc7f44f340cbd47b8e45f698d04a..3647d3b33c2061e6d1eacef1bc961e3582ad128e 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/mv643xx_eth.h>
 #include <linux/platform_data/usb-ehci-orion.h>
 
-struct dsa_platform_data;
+struct dsa_chip_data;
 struct mv_sata_platform_data;
 
 void __init orion_uart0_init(void __iomem *membase,
@@ -57,7 +57,7 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
                            unsigned long mapbase,
                            unsigned long irq);
 
-void __init orion_ge00_switch_init(struct dsa_platform_data *d);
+void __init orion_ge00_switch_init(struct dsa_chip_data *d);
 
 void __init orion_i2c_init(unsigned long mapbase,
                           unsigned long irq,
index b2fc97a2c56c80fe633777bbb18322c980949953..05d12104d270acae74e2865757496c59339839be 100644 (file)
@@ -813,11 +813,6 @@ static inline void bpf_flush_icache(void *start, void *end)
        flush_icache_range((unsigned long)start, (unsigned long)end);
 }
 
-void bpf_jit_compile(struct bpf_prog *prog)
-{
-       /* Nothing to do here. We support Internal BPF. */
-}
-
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
        struct bpf_prog *tmp, *orig_prog = prog;
@@ -915,18 +910,3 @@ out:
                                           tmp : orig_prog);
        return prog;
 }
-
-void bpf_jit_free(struct bpf_prog *prog)
-{
-       unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK;
-       struct bpf_binary_header *header = (void *)addr;
-
-       if (!prog->jited)
-               goto free_filter;
-
-       set_memory_rw(addr, header->pages);
-       bpf_jit_binary_free(header);
-
-free_filter:
-       bpf_prog_unlock_free(prog);
-}
index fc4be028c4189ee9ec93265b844633411a945eb4..e45ce4243aaa3bdb7f4f22a0ecafd9de57b60261 100644 (file)
@@ -124,7 +124,6 @@ static inline void recv_packet(struct net_device *dev)
 
        skb->protocol = eth_type_trans(skb, dev);
        netif_rx(skb);
-       dev->last_rx = jiffies;
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += pktlen;
 
index 37a932d9148c214e2cb3fe9668af7330f3f8bf52..8297ce714c5e625d1eecd83b38d26f1d4ed656aa 100644 (file)
@@ -1060,7 +1060,3 @@ static int __init octeon_publish_devices(void)
        return of_platform_bus_probe(NULL, octeon_ids, NULL);
 }
 arch_initcall(octeon_publish_devices);
-
-MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Platform driver for Octeon SOC");
index 73a5cf18fd84f3553f564456f0e482977afdfbe2..c34166ef76fc4923e97e658e7fe249f82e3522fc 100644 (file)
@@ -961,8 +961,6 @@ common_load:
        return 0;
 }
 
-void bpf_jit_compile(struct bpf_prog *fp) { }
-
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 {
        u32 proglen;
@@ -1066,6 +1064,7 @@ out:
        return fp;
 }
 
+/* Overriding bpf_jit_free() as we don't set images read-only. */
 void bpf_jit_free(struct bpf_prog *fp)
 {
        unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
index 167b31b186c1313c2cea094ee3b972233d61a608..f1d0e62ec1dd58044646d491360f534013f4f49d 100644 (file)
@@ -1262,14 +1262,6 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
        return 0;
 }
 
-/*
- * Classic BPF function stub. BPF programs will be converted into
- * eBPF and then bpf_int_jit_compile() will be called.
- */
-void bpf_jit_compile(struct bpf_prog *fp)
-{
-}
-
 /*
  * Compile eBPF program "fp"
  */
@@ -1347,21 +1339,3 @@ out:
                                           tmp : orig_fp);
        return fp;
 }
-
-/*
- * Free eBPF program
- */
-void bpf_jit_free(struct bpf_prog *fp)
-{
-       unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
-       struct bpf_binary_header *header = (void *)addr;
-
-       if (!fp->jited)
-               goto free_filter;
-
-       set_memory_rw(addr, header->pages);
-       bpf_jit_binary_free(header);
-
-free_filter:
-       bpf_prog_unlock_free(fp);
-}
index cf4034c66362f3016c532bccc0efbebe2f747a25..68ac5c7cd982619581dfa65b75ac89aa8e359554 100644 (file)
@@ -44,6 +44,7 @@ config SPARC
        select CPU_NO_EFFICIENT_FFS
        select HAVE_ARCH_HARDENED_USERCOPY
        select PROVE_LOCKING_SMALL if PROVE_LOCKING
+       select ARCH_WANT_RELAX_ORDER
 
 config SPARC32
        def_bool !64BIT
index bb660e53cbd6ba51eace412622fdd7342939ddc1..18a62e2088262eed8b171118100ae5e3538c59b2 100644 (file)
@@ -1067,13 +1067,13 @@ common_load:
 
                ilen = prog - temp;
                if (ilen > BPF_MAX_INSN_SIZE) {
-                       pr_err("bpf_jit_compile fatal insn size error\n");
+                       pr_err("bpf_jit: fatal insn size error\n");
                        return -EFAULT;
                }
 
                if (image) {
                        if (unlikely(proglen + ilen > oldproglen)) {
-                               pr_err("bpf_jit_compile fatal error\n");
+                               pr_err("bpf_jit: fatal error\n");
                                return -EFAULT;
                        }
                        memcpy(image + proglen, temp, ilen);
@@ -1085,10 +1085,6 @@ common_load:
        return proglen;
 }
 
-void bpf_jit_compile(struct bpf_prog *prog)
-{
-}
-
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 {
        struct bpf_binary_header *header = NULL;
@@ -1184,18 +1180,3 @@ out:
                                           tmp : orig_prog);
        return prog;
 }
-
-void bpf_jit_free(struct bpf_prog *fp)
-{
-       unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
-       struct bpf_binary_header *header = (void *)addr;
-
-       if (!fp->jited)
-               goto free_filter;
-
-       set_memory_rw(addr, header->pages);
-       bpf_jit_binary_free(header);
-
-free_filter:
-       bpf_prog_unlock_free(fp);
-}
index c53a9dd1353f4e473007d81100d5b60fb70524be..623359e407aa20543d5f5e73d7999b48a2322262 100644 (file)
@@ -1779,7 +1779,7 @@ static int eni_do_init(struct atm_dev *dev)
        printk(")\n");
        printk(KERN_NOTICE DEV_LABEL "(itf %d): %s,%s\n",dev->number,
            eni_in(MID_RES_ID_MCON) & 0x200 ? "ASIC" : "FPGA",
-           media_name[eni_in(MID_RES_ID_MCON) & DAUGTHER_ID]);
+           media_name[eni_in(MID_RES_ID_MCON) & DAUGHTER_ID]);
 
        error = suni_init(dev);
        if (error)
index 471ddfd93ea87b2d4da09b398bb6982b735036b8..5ec109533bb9129519e066fd2409af8c333eb8bc 100644 (file)
@@ -2132,12 +2132,8 @@ idt77252_init_est(struct vc_map *vc, int pcr)
 
        est->interval = 2;              /* XXX: make this configurable */
        est->ewma_log = 2;              /* XXX: make this configurable */
-       init_timer(&est->timer);
-       est->timer.data = (unsigned long)vc;
-       est->timer.function = idt77252_est_timer;
-
-       est->timer.expires = jiffies + ((HZ / 4) << est->interval);
-       add_timer(&est->timer);
+       setup_timer(&est->timer, idt77252_est_timer, (unsigned long)vc);
+       mod_timer(&est->timer, jiffies + ((HZ / 4) << est->interval));
 
        return est;
 }
@@ -3638,9 +3634,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
        spin_lock_init(&card->cmd_lock);
        spin_lock_init(&card->tst_lock);
 
-       init_timer(&card->tst_timer);
-       card->tst_timer.data = (unsigned long)card;
-       card->tst_timer.function = tst_timer;
+       setup_timer(&card->tst_timer, tst_timer, (unsigned long)card);
 
        /* Do the I/O remapping... */
        card->membase = ioremap(membase, 1024);
index 432525ad5e46929b9d93c2757fef80309b54464f..d8bec0f2a71cba2a2851f1bb5c38bd80bad0f07c 100644 (file)
@@ -56,7 +56,7 @@
 #define MID_CON_SUNI   0x00000040      /* 0: UTOPIA; 1: SUNI */
 #define MID_CON_V6     0x00000020      /* 0: non-pipel UTOPIA (required iff
                                           !CON_SUNI; 1: UTOPIA */
-#define DAUGTHER_ID    0x0000001f      /* daugther board id */
+#define DAUGHTER_ID    0x0000001f      /* daughter board id */
 
 /*
  * Interrupt Status Acknowledge, Interrupt Status & Interrupt Enable
index 2c1798e38abd166bb62596e81c9921a13c452d94..12da68ec48baa751d78899e89ac7da2e60f197f7 100644 (file)
@@ -136,17 +136,17 @@ static bool bcma_is_core_needed_early(u16 core_id)
        return false;
 }
 
-static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
+static struct device_node *bcma_of_find_child_device(struct device *parent,
                                                     struct bcma_device *core)
 {
        struct device_node *node;
        u64 size;
        const __be32 *reg;
 
-       if (!parent || !parent->dev.of_node)
+       if (!parent->of_node)
                return NULL;
 
-       for_each_child_of_node(parent->dev.of_node, node) {
+       for_each_child_of_node(parent->of_node, node) {
                reg = of_get_address(node, 0, &size, NULL);
                if (!reg)
                        continue;
@@ -156,7 +156,7 @@ static struct device_node *bcma_of_find_child_device(struct platform_device *par
        return NULL;
 }
 
-static int bcma_of_irq_parse(struct platform_device *parent,
+static int bcma_of_irq_parse(struct device *parent,
                             struct bcma_device *core,
                             struct of_phandle_args *out_irq, int num)
 {
@@ -169,7 +169,7 @@ static int bcma_of_irq_parse(struct platform_device *parent,
                        return rc;
        }
 
-       out_irq->np = parent->dev.of_node;
+       out_irq->np = parent->of_node;
        out_irq->args_count = 1;
        out_irq->args[0] = num;
 
@@ -177,13 +177,13 @@ static int bcma_of_irq_parse(struct platform_device *parent,
        return of_irq_parse_raw(laddr, out_irq);
 }
 
-static unsigned int bcma_of_get_irq(struct platform_device *parent,
+static unsigned int bcma_of_get_irq(struct device *parent,
                                    struct bcma_device *core, int num)
 {
        struct of_phandle_args out_irq;
        int ret;
 
-       if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent || !parent->dev.of_node)
+       if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent->of_node)
                return 0;
 
        ret = bcma_of_irq_parse(parent, core, &out_irq, num);
@@ -196,7 +196,7 @@ static unsigned int bcma_of_get_irq(struct platform_device *parent,
        return irq_create_of_mapping(&out_irq);
 }
 
-static void bcma_of_fill_device(struct platform_device *parent,
+static void bcma_of_fill_device(struct device *parent,
                                struct bcma_device *core)
 {
        struct device_node *node;
@@ -227,7 +227,7 @@ unsigned int bcma_core_irq(struct bcma_device *core, int num)
                        return mips_irq <= 4 ? mips_irq + 2 : 0;
                }
                if (bus->host_pdev)
-                       return bcma_of_get_irq(bus->host_pdev, core, num);
+                       return bcma_of_get_irq(&bus->host_pdev->dev, core, num);
                return 0;
        case BCMA_HOSTTYPE_SDIO:
                return 0;
@@ -253,7 +253,8 @@ void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
                if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
                        core->dma_dev = &bus->host_pdev->dev;
                        core->dev.parent = &bus->host_pdev->dev;
-                       bcma_of_fill_device(bus->host_pdev, core);
+                       if (core->dev.parent)
+                               bcma_of_fill_device(core->dev.parent, core);
                } else {
                        core->dev.dma_mask = &core->dev.coherent_dma_mask;
                        core->dma_dev = &core->dev;
@@ -633,8 +634,11 @@ static int bcma_device_probe(struct device *dev)
                                               drv);
        int err = 0;
 
+       get_device(dev);
        if (adrv->probe)
                err = adrv->probe(core);
+       if (err)
+               put_device(dev);
 
        return err;
 }
@@ -647,6 +651,7 @@ static int bcma_device_remove(struct device *dev)
 
        if (adrv->remove)
                adrv->remove(core);
+       put_device(dev);
 
        return 0;
 }
index 3e70a9c5d79d5a50ba3be228cb4174d1f66c98d7..4eb5a80e5d81395b5c6d0ab358c7c40b46a7fc58 100644 (file)
@@ -2467,14 +2467,12 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos)
        struct net_device *dev;
 
        prio = rt_tos2priority(tos);
-       dev = ndev->priv_flags & IFF_802_1Q_VLAN ?
-               vlan_dev_real_dev(ndev) : ndev;
-
+       dev = is_vlan_dev(ndev) ? vlan_dev_real_dev(ndev) : ndev;
        if (dev->num_tc)
                return netdev_get_prio_tc_map(dev, prio);
 
 #if IS_ENABLED(CONFIG_VLAN_8021Q)
-       if (ndev->priv_flags & IFF_802_1Q_VLAN)
+       if (is_vlan_dev(ndev))
                return (vlan_dev_get_egress_qos_mask(ndev, prio) &
                        VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
 #endif
index b3ef47c3ab732a9588cd79f8d5ac2f24c68366ac..31803b3671040b6595e73c77d7905fbc778994e1 100644 (file)
@@ -689,7 +689,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
 {
        struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
        struct mlx5_ib_cq *cq = to_mcq(ibcq);
-       void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
+       void __iomem *uar_page = mdev->priv.uar->map;
        unsigned long irq_flags;
        int ret = 0;
 
@@ -704,9 +704,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
        mlx5_cq_arm(&cq->mcq,
                    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
                    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
-                   uar_page,
-                   MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
-                   to_mcq(ibcq)->mcq.cons_index);
+                   uar_page, to_mcq(ibcq)->mcq.cons_index);
 
        return ret;
 }
@@ -790,7 +788,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
        MLX5_SET(cqc, cqc, log_page_size,
                 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
 
-       *index = to_mucontext(context)->uuari.uars[0].index;
+       *index = to_mucontext(context)->bfregi.sys_pages[0];
 
        if (ucmd.cqe_comp_en == 1) {
                if (unlikely((*cqe_size != 64) ||
@@ -886,7 +884,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
        MLX5_SET(cqc, cqc, log_page_size,
                 cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
 
-       *index = dev->mdev->priv.uuari.uars[0].index;
+       *index = dev->mdev->priv.uar->index;
 
        return 0;
 
index d566f673883348f58a2d9b0672313aa5560af80c..9d8535385bb8bded0e6dc5161396034cdf524b9b 100644 (file)
@@ -53,6 +53,7 @@
 #include <linux/in.h>
 #include <linux/etherdevice.h>
 #include <linux/mlx5/fs.h>
+#include <linux/mlx5/vport.h>
 #include "mlx5_ib.h"
 
 #define DRIVER_NAME "mlx5_ib"
@@ -672,17 +673,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                        1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
        }
 
-       if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
-                       uhw->outlen)) {
-               resp.mlx5_ib_support_multi_pkt_send_wqes =
-                       MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
-               resp.response_length +=
-                       sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
-       }
-
-       if (field_avail(typeof(resp), reserved, uhw->outlen))
-               resp.response_length += sizeof(resp.reserved);
-
        if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
                resp.cqe_comp_caps.max_num =
                        MLX5_CAP_GEN(dev->mdev, cqe_compression) ?
@@ -706,6 +696,17 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                resp.response_length += sizeof(resp.packet_pacing_caps);
        }
 
+       if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
+                       uhw->outlen)) {
+               resp.mlx5_ib_support_multi_pkt_send_wqes =
+                       MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
+               resp.response_length +=
+                       sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
+       }
+
+       if (field_avail(typeof(resp), reserved, uhw->outlen))
+               resp.response_length += sizeof(resp.reserved);
+
        if (uhw->outlen) {
                err = ib_copy_to_udata(uhw, &resp, resp.response_length);
 
@@ -992,6 +993,86 @@ out:
        return err;
 }
 
+static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
+{
+       mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
+                   caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
+}
+
+static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
+                            struct mlx5_ib_alloc_ucontext_req_v2 *req,
+                            u32 *num_sys_pages)
+{
+       int uars_per_sys_page;
+       int bfregs_per_sys_page;
+       int ref_bfregs = req->total_num_bfregs;
+
+       if (req->total_num_bfregs == 0)
+               return -EINVAL;
+
+       BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
+       BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
+
+       if (req->total_num_bfregs > MLX5_MAX_BFREGS)
+               return -ENOMEM;
+
+       uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
+       bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
+       req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
+       *num_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
+
+       if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
+               return -EINVAL;
+
+       mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, alloated %d, using %d sys pages\n",
+                   MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
+                   lib_uar_4k ? "yes" : "no", ref_bfregs,
+                   req->total_num_bfregs, *num_sys_pages);
+
+       return 0;
+}
+
+static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
+{
+       struct mlx5_bfreg_info *bfregi;
+       int err;
+       int i;
+
+       bfregi = &context->bfregi;
+       for (i = 0; i < bfregi->num_sys_pages; i++) {
+               err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
+               if (err)
+                       goto error;
+
+               mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
+       }
+       return 0;
+
+error:
+       for (--i; i >= 0; i--)
+               if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
+                       mlx5_ib_warn(dev, "failed to free uar %d\n", i);
+
+       return err;
+}
+
+static int deallocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
+{
+       struct mlx5_bfreg_info *bfregi;
+       int err;
+       int i;
+
+       bfregi = &context->bfregi;
+       for (i = 0; i < bfregi->num_sys_pages; i++) {
+               err = mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
+               if (err) {
+                       mlx5_ib_warn(dev, "failed to free uar %d\n", i);
+                       return err;
+               }
+       }
+       return 0;
+}
+
 static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
                                                  struct ib_udata *udata)
 {
@@ -999,17 +1080,13 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        struct mlx5_ib_alloc_ucontext_req_v2 req = {};
        struct mlx5_ib_alloc_ucontext_resp resp = {};
        struct mlx5_ib_ucontext *context;
-       struct mlx5_uuar_info *uuari;
-       struct mlx5_uar *uars;
-       int gross_uuars;
-       int num_uars;
+       struct mlx5_bfreg_info *bfregi;
        int ver;
-       int uuarn;
        int err;
-       int i;
        size_t reqlen;
        size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
                                     max_cqe_version);
+       bool lib_uar_4k;
 
        if (!dev->ib_active)
                return ERR_PTR(-EAGAIN);
@@ -1032,27 +1109,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        if (req.flags)
                return ERR_PTR(-EINVAL);
 
-       if (req.total_num_uuars > MLX5_MAX_UUARS)
-               return ERR_PTR(-ENOMEM);
-
-       if (req.total_num_uuars == 0)
-               return ERR_PTR(-EINVAL);
-
        if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
                return ERR_PTR(-EOPNOTSUPP);
 
-       if (reqlen > sizeof(req) &&
-           !ib_is_udata_cleared(udata, sizeof(req),
-                                reqlen - sizeof(req)))
-               return ERR_PTR(-EOPNOTSUPP);
-
-       req.total_num_uuars = ALIGN(req.total_num_uuars,
-                                   MLX5_NON_FP_BF_REGS_PER_PAGE);
-       if (req.num_low_latency_uuars > req.total_num_uuars - 1)
+       req.total_num_bfregs = ALIGN(req.total_num_bfregs,
+                                   MLX5_NON_FP_BFREGS_PER_UAR);
+       if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
                return ERR_PTR(-EINVAL);
 
-       num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
-       gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
        resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
        if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf))
                resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
@@ -1065,6 +1129,10 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        resp.cqe_version = min_t(__u8,
                                 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
                                 req.max_cqe_version);
+       resp.log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+                               MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
+       resp.num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+                                       MLX5_CAP_GEN(dev->mdev, num_of_uars_per_page) : 1;
        resp.response_length = min(offsetof(typeof(resp), response_length) +
                                   sizeof(resp.response_length), udata->outlen);
 
@@ -1072,58 +1140,58 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        if (!context)
                return ERR_PTR(-ENOMEM);
 
-       uuari = &context->uuari;
-       mutex_init(&uuari->lock);
-       uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
-       if (!uars) {
-               err = -ENOMEM;
+       lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
+       bfregi = &context->bfregi;
+
+       /* updates req->total_num_bfregs */
+       err = calc_total_bfregs(dev, lib_uar_4k, &req, &bfregi->num_sys_pages);
+       if (err)
                goto out_ctx;
-       }
 
-       uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
-                               sizeof(*uuari->bitmap),
+       mutex_init(&bfregi->lock);
+       bfregi->lib_uar_4k = lib_uar_4k;
+       bfregi->count = kcalloc(req.total_num_bfregs, sizeof(*bfregi->count),
                                GFP_KERNEL);
-       if (!uuari->bitmap) {
+       if (!bfregi->count) {
                err = -ENOMEM;
-               goto out_uar_ctx;
-       }
-       /*
-        * clear all fast path uuars
-        */
-       for (i = 0; i < gross_uuars; i++) {
-               uuarn = i & 3;
-               if (uuarn == 2 || uuarn == 3)
-                       set_bit(i, uuari->bitmap);
+               goto out_ctx;
        }
 
-       uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
-       if (!uuari->count) {
+       bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
+                                   sizeof(*bfregi->sys_pages),
+                                   GFP_KERNEL);
+       if (!bfregi->sys_pages) {
                err = -ENOMEM;
-               goto out_bitmap;
+               goto out_count;
        }
 
-       for (i = 0; i < num_uars; i++) {
-               err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
-               if (err)
-                       goto out_count;
-       }
+       err = allocate_uars(dev, context);
+       if (err)
+               goto out_sys_pages;
 
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
        context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
 #endif
 
+       context->upd_xlt_page = __get_free_page(GFP_KERNEL);
+       if (!context->upd_xlt_page) {
+               err = -ENOMEM;
+               goto out_uars;
+       }
+       mutex_init(&context->upd_xlt_page_mutex);
+
        if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) {
                err = mlx5_core_alloc_transport_domain(dev->mdev,
                                                       &context->tdn);
                if (err)
-                       goto out_uars;
+                       goto out_page;
        }
 
        INIT_LIST_HEAD(&context->vma_private_list);
        INIT_LIST_HEAD(&context->db_page_list);
        mutex_init(&context->db_page_mutex);
 
-       resp.tot_uuars = req.total_num_uuars;
+       resp.tot_bfregs = req.total_num_bfregs;
        resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
 
        if (field_avail(typeof(resp), cqe_version, udata->outlen))
@@ -1135,32 +1203,46 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
                resp.response_length += sizeof(resp.cmds_supp_uhw);
        }
 
+       if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
+               if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
+                       mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
+                       resp.eth_min_inline++;
+               }
+               resp.response_length += sizeof(resp.eth_min_inline);
+       }
+
        /*
         * We don't want to expose information from the PCI bar that is located
         * after 4096 bytes, so if the arch only supports larger pages, let's
         * pretend we don't support reading the HCA's core clock. This is also
         * forced by mmap function.
         */
-       if (PAGE_SIZE <= 4096 &&
-           field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
-               resp.comp_mask |=
-                       MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
-               resp.hca_core_clock_offset =
-                       offsetof(struct mlx5_init_seg, internal_timer_h) %
-                       PAGE_SIZE;
+       if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
+               if (PAGE_SIZE <= 4096) {
+                       resp.comp_mask |=
+                               MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
+                       resp.hca_core_clock_offset =
+                               offsetof(struct mlx5_init_seg, internal_timer_h) % PAGE_SIZE;
+               }
                resp.response_length += sizeof(resp.hca_core_clock_offset) +
                                        sizeof(resp.reserved2);
        }
 
+       if (field_avail(typeof(resp), log_uar_size, udata->outlen))
+               resp.response_length += sizeof(resp.log_uar_size);
+
+       if (field_avail(typeof(resp), num_uars_per_page, udata->outlen))
+               resp.response_length += sizeof(resp.num_uars_per_page);
+
        err = ib_copy_to_udata(udata, &resp, resp.response_length);
        if (err)
                goto out_td;
 
-       uuari->ver = ver;
-       uuari->num_low_latency_uuars = req.num_low_latency_uuars;
-       uuari->uars = uars;
-       uuari->num_uars = num_uars;
+       bfregi->ver = ver;
+       bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
        context->cqe_version = resp.cqe_version;
+       context->lib_caps = req.lib_caps;
+       print_lib_caps(dev, context->lib_caps);
 
        return &context->ibucontext;
 
@@ -1168,20 +1250,21 @@ out_td:
        if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
                mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
 
+out_page:
+       free_page(context->upd_xlt_page);
+
 out_uars:
-       for (i--; i >= 0; i--)
-               mlx5_cmd_free_uar(dev->mdev, uars[i].index);
-out_count:
-       kfree(uuari->count);
+       deallocate_uars(dev, context);
 
-out_bitmap:
-       kfree(uuari->bitmap);
+out_sys_pages:
+       kfree(bfregi->sys_pages);
 
-out_uar_ctx:
-       kfree(uars);
+out_count:
+       kfree(bfregi->count);
 
 out_ctx:
        kfree(context);
+
        return ERR_PTR(err);
 }
 
@@ -1189,28 +1272,31 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
 {
        struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
        struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
-       struct mlx5_uuar_info *uuari = &context->uuari;
-       int i;
+       struct mlx5_bfreg_info *bfregi;
 
+       bfregi = &context->bfregi;
        if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
                mlx5_core_dealloc_transport_domain(dev->mdev, context->tdn);
 
-       for (i = 0; i < uuari->num_uars; i++) {
-               if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
-                       mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
-       }
-
-       kfree(uuari->count);
-       kfree(uuari->bitmap);
-       kfree(uuari->uars);
+       free_page(context->upd_xlt_page);
+       deallocate_uars(dev, context);
+       kfree(bfregi->sys_pages);
+       kfree(bfregi->count);
        kfree(context);
 
        return 0;
 }
 
-static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
+static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
+                                struct mlx5_bfreg_info *bfregi,
+                                int idx)
 {
-       return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
+       int fw_uars_per_page;
+
+       fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
+
+       return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) +
+                       bfregi->sys_pages[idx] / fw_uars_per_page;
 }
 
 static int get_command(unsigned long offset)
@@ -1365,11 +1451,23 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
                    struct vm_area_struct *vma,
                    struct mlx5_ib_ucontext *context)
 {
-       struct mlx5_uuar_info *uuari = &context->uuari;
+       struct mlx5_bfreg_info *bfregi = &context->bfregi;
        int err;
        unsigned long idx;
        phys_addr_t pfn, pa;
        pgprot_t prot;
+       int uars_per_page;
+
+       if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+               return -EINVAL;
+
+       uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
+       idx = get_index(vma->vm_pgoff);
+       if (idx % uars_per_page ||
+           idx * uars_per_page >= bfregi->num_sys_pages) {
+               mlx5_ib_warn(dev, "invalid uar index %lu\n", idx);
+               return -EINVAL;
+       }
 
        switch (cmd) {
        case MLX5_IB_MMAP_WC_PAGE:
@@ -1392,14 +1490,7 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
                return -EINVAL;
        }
 
-       if (vma->vm_end - vma->vm_start != PAGE_SIZE)
-               return -EINVAL;
-
-       idx = get_index(vma->vm_pgoff);
-       if (idx >= uuari->num_uars)
-               return -EINVAL;
-
-       pfn = uar_index2pfn(dev, uuari->uars[idx].index);
+       pfn = uar_index2pfn(dev, bfregi, idx);
        mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
 
        vma->vm_page_prot = prot;
@@ -1622,9 +1713,9 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
 
                if (ib_spec->eth.mask.vlan_tag) {
                        MLX5_SET(fte_match_set_lyr_2_4, headers_c,
-                                vlan_tag, 1);
+                                cvlan_tag, 1);
                        MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-                                vlan_tag, 1);
+                                cvlan_tag, 1);
 
                        MLX5_SET(fte_match_set_lyr_2_4, headers_c,
                                 first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
@@ -3060,8 +3151,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        if (mlx5_use_mad_ifc(dev))
                get_ext_port_caps(dev);
 
-       MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
-
        if (!mlx5_lag_is_active(mdev))
                name = "mlx5_%d";
        else
@@ -3237,9 +3326,21 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        if (err)
                goto err_odp;
 
+       dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
+       if (!dev->mdev->priv.uar)
+               goto err_q_cnt;
+
+       err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
+       if (err)
+               goto err_uar_page;
+
+       err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
+       if (err)
+               goto err_bfreg;
+
        err = ib_register_device(&dev->ib_dev, NULL);
        if (err)
-               goto err_q_cnt;
+               goto err_fp_bfreg;
 
        err = create_umr_res(dev);
        if (err)
@@ -3262,6 +3363,15 @@ err_umrc:
 err_dev:
        ib_unregister_device(&dev->ib_dev);
 
+err_fp_bfreg:
+       mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+
+err_bfreg:
+       mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+
+err_uar_page:
+       mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
+
 err_q_cnt:
        mlx5_ib_dealloc_q_counters(dev);
 
@@ -3293,6 +3403,9 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
 
        mlx5_remove_netdev_notifier(dev);
        ib_unregister_device(&dev->ib_dev);
+       mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+       mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+       mlx5_put_uars_page(dev->mdev, mdev->priv.uar);
        mlx5_ib_dealloc_q_counters(dev);
        destroy_umrc_res(dev);
        mlx5_ib_odp_remove_one(dev);
@@ -3307,6 +3420,9 @@ static struct mlx5_interface mlx5_ib_interface = {
        .add            = mlx5_ib_add,
        .remove         = mlx5_ib_remove,
        .event          = mlx5_ib_event,
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       .pfault         = mlx5_ib_pfault,
+#endif
        .protocol       = MLX5_INTERFACE_PROTOCOL_IB,
 };
 
@@ -3317,25 +3433,14 @@ static int __init mlx5_ib_init(void)
        if (deprecated_prof_sel != 2)
                pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
 
-       err = mlx5_ib_odp_init();
-       if (err)
-               return err;
-
        err = mlx5_register_interface(&mlx5_ib_interface);
-       if (err)
-               goto clean_odp;
-
-       return err;
 
-clean_odp:
-       mlx5_ib_odp_cleanup();
        return err;
 }
 
 static void __exit mlx5_ib_cleanup(void)
 {
        mlx5_unregister_interface(&mlx5_ib_interface);
-       mlx5_ib_odp_cleanup();
 }
 
 module_init(mlx5_ib_init);
index 6851357c16f4ea30f5a51ae5265549f088bd1b48..778d8a18925f909d7f65e8e47f329f0f4ad8f44f 100644 (file)
@@ -159,7 +159,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
        unsigned long umem_page_shift = ilog2(umem->page_size);
        int shift = page_shift - umem_page_shift;
        int mask = (1 << shift) - 1;
-       int i, k;
+       int i, k, idx;
        u64 cur = 0;
        u64 base;
        int len;
@@ -185,18 +185,36 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
        for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
                len = sg_dma_len(sg) >> umem_page_shift;
                base = sg_dma_address(sg);
-               for (k = 0; k < len; k++) {
+
+               /* Skip elements below offset */
+               if (i + len < offset << shift) {
+                       i += len;
+                       continue;
+               }
+
+               /* Skip pages below offset */
+               if (i < offset << shift) {
+                       k = (offset << shift) - i;
+                       i = offset << shift;
+               } else {
+                       k = 0;
+               }
+
+               for (; k < len; k++) {
                        if (!(i & mask)) {
                                cur = base + (k << umem_page_shift);
                                cur |= access_flags;
+                               idx = (i >> shift) - offset;
 
-                               pas[i >> shift] = cpu_to_be64(cur);
+                               pas[idx] = cpu_to_be64(cur);
                                mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
-                                           i >> shift, be64_to_cpu(pas[i >> shift]));
-                       }  else
-                               mlx5_ib_dbg(dev, "=====> 0x%llx\n",
-                                           base + (k << umem_page_shift));
+                                           i >> shift, be64_to_cpu(pas[idx]));
+                       }
                        i++;
+
+                       /* Stop after num_pages reached */
+                       if (i >> shift >= offset + num_pages)
+                               return;
                }
        }
 }
index 6c6057eb60ea8a37f608dfffb6b707c6dc0d5fa6..e1a4b93dce6b5957ac0a0ab4941e178e290c0e89 100644 (file)
@@ -90,7 +90,6 @@ enum mlx5_ib_latency_class {
        MLX5_IB_LATENCY_CLASS_LOW,
        MLX5_IB_LATENCY_CLASS_MEDIUM,
        MLX5_IB_LATENCY_CLASS_HIGH,
-       MLX5_IB_LATENCY_CLASS_FAST_PATH
 };
 
 enum mlx5_ib_mad_ifc_flags {
@@ -100,7 +99,7 @@ enum mlx5_ib_mad_ifc_flags {
 };
 
 enum {
-       MLX5_CROSS_CHANNEL_UUAR         = 0,
+       MLX5_CROSS_CHANNEL_BFREG         = 0,
 };
 
 enum {
@@ -120,11 +119,16 @@ struct mlx5_ib_ucontext {
        /* protect doorbell record alloc/free
         */
        struct mutex            db_page_mutex;
-       struct mlx5_uuar_info   uuari;
+       struct mlx5_bfreg_info  bfregi;
        u8                      cqe_version;
        /* Transport Domain number */
        u32                     tdn;
        struct list_head        vma_private_list;
+
+       unsigned long           upd_xlt_page;
+       /* protect ODP/KSM */
+       struct mutex            upd_xlt_page_mutex;
+       u64                     lib_caps;
 };
 
 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
@@ -174,13 +178,12 @@ struct mlx5_ib_flow_db {
  * enum ib_send_flags and enum ib_qp_type for low-level driver
  */
 
-#define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
-#define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
-#define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
-
-#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION    (IB_SEND_RESERVED_START << 3)
-#define MLX5_IB_SEND_UMR_UPDATE_PD             (IB_SEND_RESERVED_START << 4)
-#define MLX5_IB_SEND_UMR_UPDATE_ACCESS         IB_SEND_RESERVED_END
+#define MLX5_IB_SEND_UMR_ENABLE_MR            (IB_SEND_RESERVED_START << 0)
+#define MLX5_IB_SEND_UMR_DISABLE_MR           (IB_SEND_RESERVED_START << 1)
+#define MLX5_IB_SEND_UMR_FAIL_IF_FREE         (IB_SEND_RESERVED_START << 2)
+#define MLX5_IB_SEND_UMR_UPDATE_XLT           (IB_SEND_RESERVED_START << 3)
+#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION    (IB_SEND_RESERVED_START << 4)
+#define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS       IB_SEND_RESERVED_END
 
 #define MLX5_IB_QPT_REG_UMR    IB_QPT_RESERVED1
 /*
@@ -190,6 +193,16 @@ struct mlx5_ib_flow_db {
 #define MLX5_IB_QPT_HW_GSI     IB_QPT_RESERVED2
 #define MLX5_IB_WR_UMR         IB_WR_RESERVED1
 
+#define MLX5_IB_UMR_OCTOWORD          16
+#define MLX5_IB_UMR_XLT_ALIGNMENT      64
+
+#define MLX5_IB_UPD_XLT_ZAP          BIT(0)
+#define MLX5_IB_UPD_XLT_ENABLE       BIT(1)
+#define MLX5_IB_UPD_XLT_ATOMIC       BIT(2)
+#define MLX5_IB_UPD_XLT_ADDR         BIT(3)
+#define MLX5_IB_UPD_XLT_PD           BIT(4)
+#define MLX5_IB_UPD_XLT_ACCESS       BIT(5)
+
 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
  *
  * These flags are intended for internal use by the mlx5_ib driver, and they
@@ -264,29 +277,6 @@ struct mlx5_ib_rwq_ind_table {
        u32                     rqtn;
 };
 
-/*
- * Connect-IB can trigger up to four concurrent pagefaults
- * per-QP.
- */
-enum mlx5_ib_pagefault_context {
-       MLX5_IB_PAGEFAULT_RESPONDER_READ,
-       MLX5_IB_PAGEFAULT_REQUESTOR_READ,
-       MLX5_IB_PAGEFAULT_RESPONDER_WRITE,
-       MLX5_IB_PAGEFAULT_REQUESTOR_WRITE,
-       MLX5_IB_PAGEFAULT_CONTEXTS
-};
-
-static inline enum mlx5_ib_pagefault_context
-       mlx5_ib_get_pagefault_context(struct mlx5_pagefault *pagefault)
-{
-       return pagefault->flags & (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE);
-}
-
-struct mlx5_ib_pfault {
-       struct work_struct      work;
-       struct mlx5_pagefault   mpfault;
-};
-
 struct mlx5_ib_ubuffer {
        struct ib_umem         *umem;
        int                     buf_size;
@@ -334,6 +324,12 @@ struct mlx5_ib_raw_packet_qp {
        struct mlx5_ib_rq rq;
 };
 
+struct mlx5_bf {
+       int                     buf_size;
+       unsigned long           offset;
+       struct mlx5_sq_bfreg   *bfreg;
+};
+
 struct mlx5_ib_qp {
        struct ib_qp            ibqp;
        union {
@@ -359,33 +355,19 @@ struct mlx5_ib_qp {
        int                     wq_sig;
        int                     scat_cqe;
        int                     max_inline_data;
-       struct mlx5_bf         *bf;
+       struct mlx5_bf          bf;
        int                     has_rq;
 
        /* only for user space QPs. For kernel
         * we have it from the bf object
         */
-       int                     uuarn;
+       int                     bfregn;
 
        int                     create_type;
 
        /* Store signature errors */
        bool                    signature_en;
 
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-       /*
-        * A flag that is true for QP's that are in a state that doesn't
-        * allow page faults, and shouldn't schedule any more faults.
-        */
-       int                     disable_page_faults;
-       /*
-        * The disable_page_faults_lock protects a QP's disable_page_faults
-        * field, allowing for a thread to atomically check whether the QP
-        * allows page faults, and if so schedule a page fault.
-        */
-       spinlock_t              disable_page_faults_lock;
-       struct mlx5_ib_pfault   pagefaults[MLX5_IB_PAGEFAULT_CONTEXTS];
-#endif
        struct list_head        qps_list;
        struct list_head        cq_recv_list;
        struct list_head        cq_send_list;
@@ -414,13 +396,11 @@ enum mlx5_ib_qp_flags {
 
 struct mlx5_umr_wr {
        struct ib_send_wr               wr;
-       union {
-               u64                     virt_addr;
-               u64                     offset;
-       } target;
+       u64                             virt_addr;
+       u64                             offset;
        struct ib_pd                   *pd;
        unsigned int                    page_shift;
-       unsigned int                    npages;
+       unsigned int                    xlt_size;
        u64                             length;
        int                             access_flags;
        u32                             mkey;
@@ -617,7 +597,6 @@ struct mlx5_ib_dev {
        struct ib_device                ib_dev;
        struct mlx5_core_dev            *mdev;
        struct mlx5_roce                roce;
-       MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
        int                             num_ports;
        /* serialize update of capability mask
         */
@@ -634,6 +613,7 @@ struct mlx5_ib_dev {
        int                             fill_delay;
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
        struct ib_odp_caps      odp_caps;
+       u64                     odp_max_size;
        /*
         * Sleepable RCU that prevents destruction of MRs while they are still
         * being used by a page fault handler.
@@ -646,6 +626,8 @@ struct mlx5_ib_dev {
        struct list_head        qp_list;
        /* Array with num_ports elements */
        struct mlx5_ib_port     *port;
+       struct mlx5_sq_bfreg     bfreg;
+       struct mlx5_sq_bfreg     fp_bfreg;
 };
 
 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -787,8 +769,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
                               struct ib_udata *udata);
 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
-int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
-                      int npages, int zap);
+int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
+                      int page_shift, int flags);
 int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
                          u64 length, u64 virt_addr, int access_flags,
                          struct ib_pd *pd, struct ib_udata *udata);
@@ -857,18 +839,13 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
 
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-extern struct workqueue_struct *mlx5_ib_page_fault_wq;
-
 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
-void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
-                              struct mlx5_ib_pfault *pfault);
-void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
+void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
+                   struct mlx5_pagefault *pfault);
 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
 void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev);
 int __init mlx5_ib_odp_init(void);
 void mlx5_ib_odp_cleanup(void);
-void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
-void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
                              unsigned long end);
 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
@@ -877,13 +854,10 @@ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
        return;
 }
 
-static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp)                {}
 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
 static inline void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)   {}
 static inline int mlx5_ib_odp_init(void) { return 0; }
 static inline void mlx5_ib_odp_cleanup(void)                           {}
-static inline void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp) {}
-static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp)  {}
 
 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
 
@@ -1001,4 +975,17 @@ static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
 
        return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
 }
+
+static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
+{
+       return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+                               MLX5_UARS_IN_PAGE : 1;
+}
+
+static inline int get_num_uars(struct mlx5_ib_dev *dev,
+                              struct mlx5_bfreg_info *bfregi)
+{
+       return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) * bfregi->num_sys_pages;
+}
+
 #endif /* MLX5_IB_H */
index 8f608debe1415a43887527a87c7d03ab93222972..8cf2a67f9fb0bad7bdbd201e7f359a859c782b80 100644 (file)
@@ -46,14 +46,9 @@ enum {
 };
 
 #define MLX5_UMR_ALIGN 2048
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-static __be64 mlx5_ib_update_mtt_emergency_buffer[
-               MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
-       __aligned(MLX5_UMR_ALIGN);
-static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
-#endif
 
 static int clean_mr(struct mlx5_ib_mr *mr);
+static int use_umr(struct mlx5_ib_dev *dev, int order);
 
 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
@@ -134,6 +129,7 @@ static void reg_mr_callback(int status, void *context)
                return;
        }
 
+       mr->mmkey.type = MLX5_MKEY_MR;
        spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
        key = dev->mdev->priv.mkey_key++;
        spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
@@ -629,7 +625,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
                ent->dev = dev;
 
                if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
-                   (mlx5_core_is_pf(dev->mdev)))
+                   mlx5_core_is_pf(dev->mdev) &&
+                   use_umr(dev, ent->order))
                        limit = dev->mdev->profile->mr_cache[i].limit;
                else
                        limit = 0;
@@ -732,6 +729,7 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
                goto err_in;
 
        kfree(in);
+       mr->mmkey.type = MLX5_MKEY_MR;
        mr->ibmr.lkey = mr->mmkey.key;
        mr->ibmr.rkey = mr->mmkey.key;
        mr->umem = NULL;
@@ -757,94 +755,13 @@ static int get_octo_len(u64 addr, u64 len, int page_size)
        return (npages + 1) / 2;
 }
 
-static int use_umr(int order)
+static int use_umr(struct mlx5_ib_dev *dev, int order)
 {
+       if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
+               return order < MAX_MR_CACHE_ENTRIES + 2;
        return order <= MLX5_MAX_UMR_SHIFT;
 }
 
-static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
-                         int npages, int page_shift, int *size,
-                         __be64 **mr_pas, dma_addr_t *dma)
-{
-       __be64 *pas;
-       struct device *ddev = dev->ib_dev.dma_device;
-
-       /*
-        * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
-        * To avoid copying garbage after the pas array, we allocate
-        * a little more.
-        */
-       *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
-       *mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
-       if (!(*mr_pas))
-               return -ENOMEM;
-
-       pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN);
-       mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
-       /* Clear padding after the actual pages. */
-       memset(pas + npages, 0, *size - npages * sizeof(u64));
-
-       *dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE);
-       if (dma_mapping_error(ddev, *dma)) {
-               kfree(*mr_pas);
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr,
-                               struct ib_sge *sg, u64 dma, int n, u32 key,
-                               int page_shift)
-{
-       struct mlx5_ib_dev *dev = to_mdev(pd->device);
-       struct mlx5_umr_wr *umrwr = umr_wr(wr);
-
-       sg->addr = dma;
-       sg->length = ALIGN(sizeof(u64) * n, 64);
-       sg->lkey = dev->umrc.pd->local_dma_lkey;
-
-       wr->next = NULL;
-       wr->sg_list = sg;
-       if (n)
-               wr->num_sge = 1;
-       else
-               wr->num_sge = 0;
-
-       wr->opcode = MLX5_IB_WR_UMR;
-
-       umrwr->npages = n;
-       umrwr->page_shift = page_shift;
-       umrwr->mkey = key;
-}
-
-static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
-                            struct ib_sge *sg, u64 dma, int n, u32 key,
-                            int page_shift, u64 virt_addr, u64 len,
-                            int access_flags)
-{
-       struct mlx5_umr_wr *umrwr = umr_wr(wr);
-
-       prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift);
-
-       wr->send_flags = 0;
-
-       umrwr->target.virt_addr = virt_addr;
-       umrwr->length = len;
-       umrwr->access_flags = access_flags;
-       umrwr->pd = pd;
-}
-
-static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
-                              struct ib_send_wr *wr, u32 key)
-{
-       struct mlx5_umr_wr *umrwr = umr_wr(wr);
-
-       wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
-       wr->opcode = MLX5_IB_WR_UMR;
-       umrwr->mkey = key;
-}
-
 static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
                       int access_flags, struct ib_umem **umem,
                       int *npages, int *page_shift, int *ncont,
@@ -891,21 +808,39 @@ static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
        init_completion(&context->done);
 }
 
+static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
+                                 struct mlx5_umr_wr *umrwr)
+{
+       struct umr_common *umrc = &dev->umrc;
+       struct ib_send_wr *bad;
+       int err;
+       struct mlx5_ib_umr_context umr_context;
+
+       mlx5_ib_init_umr_context(&umr_context);
+       umrwr->wr.wr_cqe = &umr_context.cqe;
+
+       down(&umrc->sem);
+       err = ib_post_send(umrc->qp, &umrwr->wr, &bad);
+       if (err) {
+               mlx5_ib_warn(dev, "UMR post send failed, err %d\n", err);
+       } else {
+               wait_for_completion(&umr_context.done);
+               if (umr_context.status != IB_WC_SUCCESS) {
+                       mlx5_ib_warn(dev, "reg umr failed (%u)\n",
+                                    umr_context.status);
+                       err = -EFAULT;
+               }
+       }
+       up(&umrc->sem);
+       return err;
+}
+
 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
                                  u64 virt_addr, u64 len, int npages,
                                  int page_shift, int order, int access_flags)
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
-       struct device *ddev = dev->ib_dev.dma_device;
-       struct umr_common *umrc = &dev->umrc;
-       struct mlx5_ib_umr_context umr_context;
-       struct mlx5_umr_wr umrwr = {};
-       struct ib_send_wr *bad;
        struct mlx5_ib_mr *mr;
-       struct ib_sge sg;
-       int size;
-       __be64 *mr_pas;
-       dma_addr_t dma;
        int err = 0;
        int i;
 
@@ -924,173 +859,174 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
        if (!mr)
                return ERR_PTR(-EAGAIN);
 
-       err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas,
-                            &dma);
-       if (err)
-               goto free_mr;
-
-       mlx5_ib_init_umr_context(&umr_context);
-
-       umrwr.wr.wr_cqe = &umr_context.cqe;
-       prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
-                        page_shift, virt_addr, len, access_flags);
-
-       down(&umrc->sem);
-       err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
-       if (err) {
-               mlx5_ib_warn(dev, "post send failed, err %d\n", err);
-               goto unmap_dma;
-       } else {
-               wait_for_completion(&umr_context.done);
-               if (umr_context.status != IB_WC_SUCCESS) {
-                       mlx5_ib_warn(dev, "reg umr failed\n");
-                       err = -EFAULT;
-               }
-       }
-
+       mr->ibmr.pd = pd;
+       mr->umem = umem;
+       mr->access_flags = access_flags;
+       mr->desc_size = sizeof(struct mlx5_mtt);
        mr->mmkey.iova = virt_addr;
        mr->mmkey.size = len;
        mr->mmkey.pd = to_mpd(pd)->pdn;
 
-       mr->live = 1;
+       err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
+                                MLX5_IB_UPD_XLT_ENABLE);
 
-unmap_dma:
-       up(&umrc->sem);
-       dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
-
-       kfree(mr_pas);
-
-free_mr:
        if (err) {
                free_cached_mr(dev, mr);
                return ERR_PTR(err);
        }
 
+       mr->live = 1;
+
        return mr;
 }
 
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
-                      int zap)
+static inline int populate_xlt(struct mlx5_ib_mr *mr, int idx, int npages,
+                              void *xlt, int page_shift, size_t size,
+                              int flags)
 {
        struct mlx5_ib_dev *dev = mr->dev;
-       struct device *ddev = dev->ib_dev.dma_device;
-       struct umr_common *umrc = &dev->umrc;
-       struct mlx5_ib_umr_context umr_context;
        struct ib_umem *umem = mr->umem;
+
+       npages = min_t(size_t, npages, ib_umem_num_pages(umem) - idx);
+
+       if (!(flags & MLX5_IB_UPD_XLT_ZAP)) {
+               __mlx5_ib_populate_pas(dev, umem, page_shift,
+                                      idx, npages, xlt,
+                                      MLX5_IB_MTT_PRESENT);
+               /* Clear padding after the pages
+                * brought from the umem.
+                */
+               memset(xlt + (npages * sizeof(struct mlx5_mtt)), 0,
+                      size - npages * sizeof(struct mlx5_mtt));
+       }
+
+       return npages;
+}
+
+#define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
+                           MLX5_UMR_MTT_ALIGNMENT)
+#define MLX5_SPARE_UMR_CHUNK 0x10000
+
+int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
+                      int page_shift, int flags)
+{
+       struct mlx5_ib_dev *dev = mr->dev;
+       struct device *ddev = dev->ib_dev.dma_device;
+       struct mlx5_ib_ucontext *uctx = NULL;
        int size;
-       __be64 *pas;
+       void *xlt;
        dma_addr_t dma;
-       struct ib_send_wr *bad;
        struct mlx5_umr_wr wr;
        struct ib_sge sg;
        int err = 0;
-       const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
-       const int page_index_mask = page_index_alignment - 1;
+       int desc_size = sizeof(struct mlx5_mtt);
+       const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
+       const int page_mask = page_align - 1;
        size_t pages_mapped = 0;
        size_t pages_to_map = 0;
        size_t pages_iter = 0;
-       int use_emergency_buf = 0;
+       gfp_t gfp;
 
        /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
-        * so we need to align the offset and length accordingly */
-       if (start_page_index & page_index_mask) {
-               npages += start_page_index & page_index_mask;
-               start_page_index &= ~page_index_mask;
+        * so we need to align the offset and length accordingly
+        */
+       if (idx & page_mask) {
+               npages += idx & page_mask;
+               idx &= ~page_mask;
        }
 
-       pages_to_map = ALIGN(npages, page_index_alignment);
+       gfp = flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC : GFP_KERNEL;
+       gfp |= __GFP_ZERO | __GFP_NOWARN;
 
-       if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
-               return -EINVAL;
+       pages_to_map = ALIGN(npages, page_align);
+       size = desc_size * pages_to_map;
+       size = min_t(int, size, MLX5_MAX_UMR_CHUNK);
+
+       xlt = (void *)__get_free_pages(gfp, get_order(size));
+       if (!xlt && size > MLX5_SPARE_UMR_CHUNK) {
+               mlx5_ib_dbg(dev, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
+                           size, get_order(size), MLX5_SPARE_UMR_CHUNK);
 
-       size = sizeof(u64) * pages_to_map;
-       size = min_t(int, PAGE_SIZE, size);
-       /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
-        * code, when we are called from an invalidation. The pas buffer must
-        * be 2k-aligned for Connect-IB. */
-       pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
-       if (!pas) {
-               mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
-               pas = mlx5_ib_update_mtt_emergency_buffer;
-               size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
-               use_emergency_buf = 1;
-               mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
-               memset(pas, 0, size);
+               size = MLX5_SPARE_UMR_CHUNK;
+               xlt = (void *)__get_free_pages(gfp, get_order(size));
        }
-       pages_iter = size / sizeof(u64);
-       dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
+
+       if (!xlt) {
+               uctx = to_mucontext(mr->ibmr.uobject->context);
+               mlx5_ib_warn(dev, "Using XLT emergency buffer\n");
+               size = PAGE_SIZE;
+               xlt = (void *)uctx->upd_xlt_page;
+               mutex_lock(&uctx->upd_xlt_page_mutex);
+               memset(xlt, 0, size);
+       }
+       pages_iter = size / desc_size;
+       dma = dma_map_single(ddev, xlt, size, DMA_TO_DEVICE);
        if (dma_mapping_error(ddev, dma)) {
-               mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
+               mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
                err = -ENOMEM;
-               goto free_pas;
+               goto free_xlt;
        }
 
+       sg.addr = dma;
+       sg.lkey = dev->umrc.pd->local_dma_lkey;
+
+       memset(&wr, 0, sizeof(wr));
+       wr.wr.send_flags = MLX5_IB_SEND_UMR_UPDATE_XLT;
+       if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
+               wr.wr.send_flags |= MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+       wr.wr.sg_list = &sg;
+       wr.wr.num_sge = 1;
+       wr.wr.opcode = MLX5_IB_WR_UMR;
+
+       wr.pd = mr->ibmr.pd;
+       wr.mkey = mr->mmkey.key;
+       wr.length = mr->mmkey.size;
+       wr.virt_addr = mr->mmkey.iova;
+       wr.access_flags = mr->access_flags;
+       wr.page_shift = page_shift;
+
        for (pages_mapped = 0;
             pages_mapped < pages_to_map && !err;
-            pages_mapped += pages_iter, start_page_index += pages_iter) {
+            pages_mapped += pages_iter, idx += pages_iter) {
                dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
-
-               npages = min_t(size_t,
-                              pages_iter,
-                              ib_umem_num_pages(umem) - start_page_index);
-
-               if (!zap) {
-                       __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
-                                              start_page_index, npages, pas,
-                                              MLX5_IB_MTT_PRESENT);
-                       /* Clear padding after the pages brought from the
-                        * umem. */
-                       memset(pas + npages, 0, size - npages * sizeof(u64));
-               }
+               npages = populate_xlt(mr, idx, pages_iter, xlt,
+                                     page_shift, size, flags);
 
                dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
 
-               mlx5_ib_init_umr_context(&umr_context);
-
-               memset(&wr, 0, sizeof(wr));
-               wr.wr.wr_cqe = &umr_context.cqe;
-
-               sg.addr = dma;
-               sg.length = ALIGN(npages * sizeof(u64),
-                               MLX5_UMR_MTT_ALIGNMENT);
-               sg.lkey = dev->umrc.pd->local_dma_lkey;
+               sg.length = ALIGN(npages * desc_size,
+                                 MLX5_UMR_MTT_ALIGNMENT);
+
+               if (pages_mapped + pages_iter >= pages_to_map) {
+                       if (flags & MLX5_IB_UPD_XLT_ENABLE)
+                               wr.wr.send_flags |=
+                                       MLX5_IB_SEND_UMR_ENABLE_MR |
+                                       MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS |
+                                       MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
+                       if (flags & MLX5_IB_UPD_XLT_PD ||
+                           flags & MLX5_IB_UPD_XLT_ACCESS)
+                               wr.wr.send_flags |=
+                                       MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
+                       if (flags & MLX5_IB_UPD_XLT_ADDR)
+                               wr.wr.send_flags |=
+                                       MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
+               }
 
-               wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
-                               MLX5_IB_SEND_UMR_UPDATE_MTT;
-               wr.wr.sg_list = &sg;
-               wr.wr.num_sge = 1;
-               wr.wr.opcode = MLX5_IB_WR_UMR;
-               wr.npages = sg.length / sizeof(u64);
-               wr.page_shift = PAGE_SHIFT;
-               wr.mkey = mr->mmkey.key;
-               wr.target.offset = start_page_index;
+               wr.offset = idx * desc_size;
+               wr.xlt_size = sg.length;
 
-               down(&umrc->sem);
-               err = ib_post_send(umrc->qp, &wr.wr, &bad);
-               if (err) {
-                       mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
-               } else {
-                       wait_for_completion(&umr_context.done);
-                       if (umr_context.status != IB_WC_SUCCESS) {
-                               mlx5_ib_err(dev, "UMR completion failed, code %d\n",
-                                           umr_context.status);
-                               err = -EFAULT;
-                       }
-               }
-               up(&umrc->sem);
+               err = mlx5_ib_post_send_wait(dev, &wr);
        }
        dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
 
-free_pas:
-       if (!use_emergency_buf)
-               free_page((unsigned long)pas);
+free_xlt:
+       if (uctx)
+               mutex_unlock(&uctx->upd_xlt_page_mutex);
        else
-               mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
+               free_pages((unsigned long)xlt, get_order(size));
 
        return err;
 }
-#endif
 
 /*
  * If ibmr is NULL it will be allocated by reg_create.
@@ -1122,8 +1058,9 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
                goto err_1;
        }
        pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
-       mlx5_ib_populate_pas(dev, umem, page_shift, pas,
-                            pg_cap ? MLX5_IB_MTT_PRESENT : 0);
+       if (!(access_flags & IB_ACCESS_ON_DEMAND))
+               mlx5_ib_populate_pas(dev, umem, page_shift, pas,
+                                    pg_cap ? MLX5_IB_MTT_PRESENT : 0);
 
        /* The pg_access bit allows setting the access flags
         * in the page list submitted with the command. */
@@ -1153,6 +1090,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
                mlx5_ib_warn(dev, "create mkey failed\n");
                goto err_2;
        }
+       mr->mmkey.type = MLX5_MKEY_MR;
        mr->umem = umem;
        mr->dev = dev;
        mr->live = 1;
@@ -1204,14 +1142,15 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
         if (err < 0)
                return ERR_PTR(err);
 
-       if (use_umr(order)) {
+       if (use_umr(dev, order)) {
                mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
                             order, access_flags);
                if (PTR_ERR(mr) == -EAGAIN) {
                        mlx5_ib_dbg(dev, "cache empty for order %d", order);
                        mr = NULL;
                }
-       } else if (access_flags & IB_ACCESS_ON_DEMAND) {
+       } else if (access_flags & IB_ACCESS_ON_DEMAND &&
+                  !MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
                err = -EINVAL;
                pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
                goto error;
@@ -1248,106 +1187,39 @@ error:
 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
        struct mlx5_core_dev *mdev = dev->mdev;
-       struct umr_common *umrc = &dev->umrc;
-       struct mlx5_ib_umr_context umr_context;
        struct mlx5_umr_wr umrwr = {};
-       struct ib_send_wr *bad;
-       int err;
 
        if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
                return 0;
 
-       mlx5_ib_init_umr_context(&umr_context);
-
-       umrwr.wr.wr_cqe = &umr_context.cqe;
-       prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
+       umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
+                             MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+       umrwr.wr.opcode = MLX5_IB_WR_UMR;
+       umrwr.mkey = mr->mmkey.key;
 
-       down(&umrc->sem);
-       err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
-       if (err) {
-               up(&umrc->sem);
-               mlx5_ib_dbg(dev, "err %d\n", err);
-               goto error;
-       } else {
-               wait_for_completion(&umr_context.done);
-               up(&umrc->sem);
-       }
-       if (umr_context.status != IB_WC_SUCCESS) {
-               mlx5_ib_warn(dev, "unreg umr failed\n");
-               err = -EFAULT;
-               goto error;
-       }
-       return 0;
-
-error:
-       return err;
+       return mlx5_ib_post_send_wait(dev, &umrwr);
 }
 
-static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
-                    u64 length, int npages, int page_shift, int order,
+static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
                     int access_flags, int flags)
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
-       struct device *ddev = dev->ib_dev.dma_device;
-       struct mlx5_ib_umr_context umr_context;
-       struct ib_send_wr *bad;
        struct mlx5_umr_wr umrwr = {};
-       struct ib_sge sg;
-       struct umr_common *umrc = &dev->umrc;
-       dma_addr_t dma = 0;
-       __be64 *mr_pas = NULL;
-       int size;
        int err;
 
-       mlx5_ib_init_umr_context(&umr_context);
-
-       umrwr.wr.wr_cqe = &umr_context.cqe;
        umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
 
-       if (flags & IB_MR_REREG_TRANS) {
-               err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size,
-                                    &mr_pas, &dma);
-               if (err)
-                       return err;
+       umrwr.wr.opcode = MLX5_IB_WR_UMR;
+       umrwr.mkey = mr->mmkey.key;
 
-               umrwr.target.virt_addr = virt_addr;
-               umrwr.length = length;
-               umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
-       }
-
-       prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
-                           page_shift);
-
-       if (flags & IB_MR_REREG_PD) {
+       if (flags & IB_MR_REREG_PD || flags & IB_MR_REREG_ACCESS) {
                umrwr.pd = pd;
-               umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD;
-       }
-
-       if (flags & IB_MR_REREG_ACCESS) {
                umrwr.access_flags = access_flags;
-               umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
+               umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
        }
 
-       /* post send request to UMR QP */
-       down(&umrc->sem);
-       err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
-
-       if (err) {
-               mlx5_ib_warn(dev, "post send failed, err %d\n", err);
-       } else {
-               wait_for_completion(&umr_context.done);
-               if (umr_context.status != IB_WC_SUCCESS) {
-                       mlx5_ib_warn(dev, "reg umr failed (%u)\n",
-                                    umr_context.status);
-                       err = -EFAULT;
-               }
-       }
+       err = mlx5_ib_post_send_wait(dev, &umrwr);
 
-       up(&umrc->sem);
-       if (flags & IB_MR_REREG_TRANS) {
-               dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
-               kfree(mr_pas);
-       }
        return err;
 }
 
@@ -1364,6 +1236,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
        u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
        u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
        int page_shift = 0;
+       int upd_flags = 0;
        int npages = 0;
        int ncont = 0;
        int order = 0;
@@ -1372,6 +1245,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
        mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
                    start, virt_addr, length, access_flags);
 
+       atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
+
        if (flags != IB_MR_REREG_PD) {
                /*
                 * Replace umem. This needs to be done whether or not UMR is
@@ -1382,7 +1257,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
                err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
                                  &npages, &page_shift, &ncont, &order);
                if (err < 0) {
-                       mr->umem = NULL;
+                       clean_mr(mr);
                        return err;
                }
        }
@@ -1414,32 +1289,37 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
                /*
                 * Send a UMR WQE
                 */
-               err = rereg_umr(pd, mr, addr, len, npages, page_shift,
-                               order, access_flags, flags);
+               mr->ibmr.pd = pd;
+               mr->access_flags = access_flags;
+               mr->mmkey.iova = addr;
+               mr->mmkey.size = len;
+               mr->mmkey.pd = to_mpd(pd)->pdn;
+
+               if (flags & IB_MR_REREG_TRANS) {
+                       upd_flags = MLX5_IB_UPD_XLT_ADDR;
+                       if (flags & IB_MR_REREG_PD)
+                               upd_flags |= MLX5_IB_UPD_XLT_PD;
+                       if (flags & IB_MR_REREG_ACCESS)
+                               upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
+                       err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
+                                                upd_flags);
+               } else {
+                       err = rereg_umr(pd, mr, access_flags, flags);
+               }
+
                if (err) {
                        mlx5_ib_warn(dev, "Failed to rereg UMR\n");
+                       ib_umem_release(mr->umem);
+                       clean_mr(mr);
                        return err;
                }
        }
 
-       if (flags & IB_MR_REREG_PD) {
-               ib_mr->pd = pd;
-               mr->mmkey.pd = to_mpd(pd)->pdn;
-       }
+       set_mr_fileds(dev, mr, npages, len, access_flags);
 
-       if (flags & IB_MR_REREG_ACCESS)
-               mr->access_flags = access_flags;
-
-       if (flags & IB_MR_REREG_TRANS) {
-               atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
-               set_mr_fileds(dev, mr, npages, len, access_flags);
-               mr->mmkey.iova = addr;
-               mr->mmkey.size = len;
-       }
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
        update_odp_mr(mr);
 #endif
-
        return 0;
 }
 
@@ -1603,11 +1483,11 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
                mr->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
                MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
                err = mlx5_alloc_priv_descs(pd->device, mr,
-                                           ndescs, sizeof(u64));
+                                           ndescs, sizeof(struct mlx5_mtt));
                if (err)
                        goto err_free_in;
 
-               mr->desc_size = sizeof(u64);
+               mr->desc_size = sizeof(struct mlx5_mtt);
                mr->max_descs = ndescs;
        } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
                mr->access_mode = MLX5_MKC_ACCESS_MODE_KLMS;
@@ -1656,6 +1536,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
        if (err)
                goto err_destroy_psv;
 
+       mr->mmkey.type = MLX5_MKEY_MR;
        mr->ibmr.lkey = mr->mmkey.key;
        mr->ibmr.rkey = mr->mmkey.key;
        mr->umem = NULL;
@@ -1736,6 +1617,7 @@ struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
        if (err)
                goto free;
 
+       mw->mmkey.type = MLX5_MKEY_MW;
        mw->ibmw.rkey = mw->mmkey.key;
 
        resp.response_length = min(offsetof(typeof(resp), response_length) +
index cacb631a7b0a9890ad9e9a7a0fffd522cdcf1734..e5bc267aca73383667a58e9914f863d28efdea70 100644 (file)
  * a pagefault. */
 #define MMU_NOTIFIER_TIMEOUT 1000
 
-struct workqueue_struct *mlx5_ib_page_fault_wq;
-
 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
                              unsigned long end)
 {
        struct mlx5_ib_mr *mr;
-       const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT / sizeof(u64)) - 1;
+       const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
+                                   sizeof(struct mlx5_mtt)) - 1;
        u64 idx = 0, blk_start_idx = 0;
        int in_block = 0;
        u64 addr;
@@ -90,16 +89,21 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
                        u64 umr_offset = idx & umr_block_mask;
 
                        if (in_block && umr_offset == 0) {
-                               mlx5_ib_update_mtt(mr, blk_start_idx,
-                                                  idx - blk_start_idx, 1);
+                               mlx5_ib_update_xlt(mr, blk_start_idx,
+                                                  idx - blk_start_idx,
+                                                  PAGE_SHIFT,
+                                                  MLX5_IB_UPD_XLT_ZAP |
+                                                  MLX5_IB_UPD_XLT_ATOMIC);
                                in_block = 0;
                        }
                }
        }
        if (in_block)
-               mlx5_ib_update_mtt(mr, blk_start_idx, idx - blk_start_idx + 1,
-                                  1);
-
+               mlx5_ib_update_xlt(mr, blk_start_idx,
+                                  idx - blk_start_idx + 1,
+                                  PAGE_SHIFT,
+                                  MLX5_IB_UPD_XLT_ZAP |
+                                  MLX5_IB_UPD_XLT_ATOMIC);
        /*
         * We are now sure that the device will not access the
         * memory. We can safely unmap it, and mark it as dirty if
@@ -120,6 +124,11 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
 
        caps->general_caps = IB_ODP_SUPPORT;
 
+       if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
+               dev->odp_max_size = U64_MAX;
+       else
+               dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
+
        if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
                caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
 
@@ -135,6 +144,9 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
        if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
                caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
 
+       if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
+               caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
+
        return;
 }
 
@@ -143,46 +155,51 @@ static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
 {
        u32 base_key = mlx5_base_mkey(key);
        struct mlx5_core_mkey *mmkey = __mlx5_mr_lookup(dev->mdev, base_key);
-       struct mlx5_ib_mr *mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+       struct mlx5_ib_mr *mr;
 
-       if (!mmkey || mmkey->key != key || !mr->live)
+       if (!mmkey || mmkey->key != key || mmkey->type != MLX5_MKEY_MR)
+               return NULL;
+
+       mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+
+       if (!mr->live)
                return NULL;
 
        return container_of(mmkey, struct mlx5_ib_mr, mmkey);
 }
 
-static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp,
-                                     struct mlx5_ib_pfault *pfault,
+static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
+                                     struct mlx5_pagefault *pfault,
                                      int error)
 {
-       struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
-       u32 qpn = qp->trans_qp.base.mqp.qpn;
+       int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
+                    pfault->wqe.wq_num : pfault->token;
        int ret = mlx5_core_page_fault_resume(dev->mdev,
-                                             qpn,
-                                             pfault->mpfault.flags,
+                                             pfault->token,
+                                             wq_num,
+                                             pfault->type,
                                              error);
        if (ret)
-               pr_err("Failed to resolve the page fault on QP 0x%x\n", qpn);
+               mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x\n",
+                           wq_num);
 }
 
 /*
- * Handle a single data segment in a page-fault WQE.
+ * Handle a single data segment in a page-fault WQE or RDMA region.
  *
- * Returns number of pages retrieved on success. The caller will continue to
+ * Returns number of pages retrieved on success. The caller may continue to
  * the next data segment.
  * Can return the following error codes:
  * -EAGAIN to designate a temporary error. The caller will abort handling the
  *  page fault and resolve it.
  * -EFAULT when there's an error mapping the requested pages. The caller will
- *  abort the page fault handling and possibly move the QP to an error state.
- * On other errors the QP should also be closed with an error.
+ *  abort the page fault handling.
  */
-static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
-                                        struct mlx5_ib_pfault *pfault,
+static int pagefault_single_data_segment(struct mlx5_ib_dev *mib_dev,
                                         u32 key, u64 io_virt, size_t bcnt,
+                                        u32 *bytes_committed,
                                         u32 *bytes_mapped)
 {
-       struct mlx5_ib_dev *mib_dev = to_mdev(qp->ibqp.pd->device);
        int srcu_key;
        unsigned int current_seq;
        u64 start_idx;
@@ -208,12 +225,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
                         key);
                if (bytes_mapped)
                        *bytes_mapped +=
-                               (bcnt - pfault->mpfault.bytes_committed);
-               goto srcu_unlock;
-       }
-       if (mr->ibmr.pd != qp->ibqp.pd) {
-               pr_err("Page-fault with different PDs for QP and MR.\n");
-               ret = -EFAULT;
+                               (bcnt - *bytes_committed);
                goto srcu_unlock;
        }
 
@@ -229,8 +241,8 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
         * in all iterations (in iteration 2 and above,
         * bytes_committed == 0).
         */
-       io_virt += pfault->mpfault.bytes_committed;
-       bcnt -= pfault->mpfault.bytes_committed;
+       io_virt += *bytes_committed;
+       bcnt -= *bytes_committed;
 
        start_idx = (io_virt - (mr->mmkey.iova & PAGE_MASK)) >> PAGE_SHIFT;
 
@@ -251,7 +263,9 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
                         * this MR, since ib_umem_odp_map_dma_pages already
                         * checks this.
                         */
-                       ret = mlx5_ib_update_mtt(mr, start_idx, npages, 0);
+                       ret = mlx5_ib_update_xlt(mr, start_idx, npages,
+                                                PAGE_SHIFT,
+                                                MLX5_IB_UPD_XLT_ATOMIC);
                } else {
                        ret = -EAGAIN;
                }
@@ -287,7 +301,7 @@ srcu_unlock:
                }
        }
        srcu_read_unlock(&mib_dev->mr_srcu, srcu_key);
-       pfault->mpfault.bytes_committed = 0;
+       *bytes_committed = 0;
        return ret ? ret : npages;
 }
 
@@ -309,8 +323,9 @@ srcu_unlock:
  * Returns the number of pages loaded if positive, zero for an empty WQE, or a
  * negative error code.
  */
-static int pagefault_data_segments(struct mlx5_ib_qp *qp,
-                                  struct mlx5_ib_pfault *pfault, void *wqe,
+static int pagefault_data_segments(struct mlx5_ib_dev *dev,
+                                  struct mlx5_pagefault *pfault,
+                                  struct mlx5_ib_qp *qp, void *wqe,
                                   void *wqe_end, u32 *bytes_mapped,
                                   u32 *total_wqe_bytes, int receive_queue)
 {
@@ -354,22 +369,23 @@ static int pagefault_data_segments(struct mlx5_ib_qp *qp,
 
                if (!inline_segment && total_wqe_bytes) {
                        *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
-                                       pfault->mpfault.bytes_committed);
+                                       pfault->bytes_committed);
                }
 
                /* A zero length data segment designates a length of 2GB. */
                if (bcnt == 0)
                        bcnt = 1U << 31;
 
-               if (inline_segment || bcnt <= pfault->mpfault.bytes_committed) {
-                       pfault->mpfault.bytes_committed -=
+               if (inline_segment || bcnt <= pfault->bytes_committed) {
+                       pfault->bytes_committed -=
                                min_t(size_t, bcnt,
-                                     pfault->mpfault.bytes_committed);
+                                     pfault->bytes_committed);
                        continue;
                }
 
-               ret = pagefault_single_data_segment(qp, pfault, key, io_virt,
-                                                   bcnt, bytes_mapped);
+               ret = pagefault_single_data_segment(dev, key, io_virt, bcnt,
+                                                   &pfault->bytes_committed,
+                                                   bytes_mapped);
                if (ret < 0)
                        break;
                npages += ret;
@@ -378,17 +394,29 @@ static int pagefault_data_segments(struct mlx5_ib_qp *qp,
        return ret < 0 ? ret : npages;
 }
 
+static const u32 mlx5_ib_odp_opcode_cap[] = {
+       [MLX5_OPCODE_SEND]             = IB_ODP_SUPPORT_SEND,
+       [MLX5_OPCODE_SEND_IMM]         = IB_ODP_SUPPORT_SEND,
+       [MLX5_OPCODE_SEND_INVAL]       = IB_ODP_SUPPORT_SEND,
+       [MLX5_OPCODE_RDMA_WRITE]       = IB_ODP_SUPPORT_WRITE,
+       [MLX5_OPCODE_RDMA_WRITE_IMM]   = IB_ODP_SUPPORT_WRITE,
+       [MLX5_OPCODE_RDMA_READ]        = IB_ODP_SUPPORT_READ,
+       [MLX5_OPCODE_ATOMIC_CS]        = IB_ODP_SUPPORT_ATOMIC,
+       [MLX5_OPCODE_ATOMIC_FA]        = IB_ODP_SUPPORT_ATOMIC,
+};
+
 /*
  * Parse initiator WQE. Advances the wqe pointer to point at the
  * scatter-gather list, and set wqe_end to the end of the WQE.
  */
 static int mlx5_ib_mr_initiator_pfault_handler(
-       struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault,
-       void **wqe, void **wqe_end, int wqe_length)
+       struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
+       struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
 {
-       struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
        struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
-       u16 wqe_index = pfault->mpfault.wqe.wqe_index;
+       u16 wqe_index = pfault->wqe.wqe_index;
+       u32 transport_caps;
+       struct mlx5_base_av *av;
        unsigned ds, opcode;
 #if defined(DEBUG)
        u32 ctrl_wqe_index, ctrl_qpn;
@@ -434,53 +462,49 @@ static int mlx5_ib_mr_initiator_pfault_handler(
 
        opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
                 MLX5_WQE_CTRL_OPCODE_MASK;
+
        switch (qp->ibqp.qp_type) {
        case IB_QPT_RC:
-               switch (opcode) {
-               case MLX5_OPCODE_SEND:
-               case MLX5_OPCODE_SEND_IMM:
-               case MLX5_OPCODE_SEND_INVAL:
-                       if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
-                             IB_ODP_SUPPORT_SEND))
-                               goto invalid_transport_or_opcode;
-                       break;
-               case MLX5_OPCODE_RDMA_WRITE:
-               case MLX5_OPCODE_RDMA_WRITE_IMM:
-                       if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
-                             IB_ODP_SUPPORT_WRITE))
-                               goto invalid_transport_or_opcode;
-                       *wqe += sizeof(struct mlx5_wqe_raddr_seg);
-                       break;
-               case MLX5_OPCODE_RDMA_READ:
-                       if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
-                             IB_ODP_SUPPORT_READ))
-                               goto invalid_transport_or_opcode;
-                       *wqe += sizeof(struct mlx5_wqe_raddr_seg);
-                       break;
-               default:
-                       goto invalid_transport_or_opcode;
-               }
+               transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps;
                break;
        case IB_QPT_UD:
-               switch (opcode) {
-               case MLX5_OPCODE_SEND:
-               case MLX5_OPCODE_SEND_IMM:
-                       if (!(dev->odp_caps.per_transport_caps.ud_odp_caps &
-                             IB_ODP_SUPPORT_SEND))
-                               goto invalid_transport_or_opcode;
-                       *wqe += sizeof(struct mlx5_wqe_datagram_seg);
-                       break;
-               default:
-                       goto invalid_transport_or_opcode;
-               }
+               transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps;
                break;
        default:
-invalid_transport_or_opcode:
-               mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode or transport. transport: 0x%x opcode: 0x%x.\n",
-                           qp->ibqp.qp_type, opcode);
+               mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n",
+                           qp->ibqp.qp_type);
                return -EFAULT;
        }
 
+       if (unlikely(opcode >= sizeof(mlx5_ib_odp_opcode_cap) /
+           sizeof(mlx5_ib_odp_opcode_cap[0]) ||
+           !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
+               mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
+                           opcode);
+               return -EFAULT;
+       }
+
+       if (qp->ibqp.qp_type != IB_QPT_RC) {
+               av = *wqe;
+               if (av->dqp_dct & be32_to_cpu(MLX5_WQE_AV_EXT))
+                       *wqe += sizeof(struct mlx5_av);
+               else
+                       *wqe += sizeof(struct mlx5_base_av);
+       }
+
+       switch (opcode) {
+       case MLX5_OPCODE_RDMA_WRITE:
+       case MLX5_OPCODE_RDMA_WRITE_IMM:
+       case MLX5_OPCODE_RDMA_READ:
+               *wqe += sizeof(struct mlx5_wqe_raddr_seg);
+               break;
+       case MLX5_OPCODE_ATOMIC_CS:
+       case MLX5_OPCODE_ATOMIC_FA:
+               *wqe += sizeof(struct mlx5_wqe_raddr_seg);
+               *wqe += sizeof(struct mlx5_wqe_atomic_seg);
+               break;
+       }
+
        return 0;
 }
 
@@ -489,10 +513,9 @@ invalid_transport_or_opcode:
  * scatter-gather list, and set wqe_end to the end of the WQE.
  */
 static int mlx5_ib_mr_responder_pfault_handler(
-       struct mlx5_ib_qp *qp, struct mlx5_ib_pfault *pfault,
-       void **wqe, void **wqe_end, int wqe_length)
+       struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
+       struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
 {
-       struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
        struct mlx5_ib_wq *wq = &qp->rq;
        int wqe_size = 1 << wq->wqe_shift;
 
@@ -529,70 +552,83 @@ invalid_transport_or_opcode:
        return 0;
 }
 
-static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp,
-                                         struct mlx5_ib_pfault *pfault)
+static struct mlx5_ib_qp *mlx5_ib_odp_find_qp(struct mlx5_ib_dev *dev,
+                                             u32 wq_num)
+{
+       struct mlx5_core_qp *mqp = __mlx5_qp_lookup(dev->mdev, wq_num);
+
+       if (!mqp) {
+               mlx5_ib_err(dev, "QPN 0x%6x not found\n", wq_num);
+               return NULL;
+       }
+
+       return to_mibqp(mqp);
+}
+
+static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
+                                         struct mlx5_pagefault *pfault)
 {
-       struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
        int ret;
        void *wqe, *wqe_end;
        u32 bytes_mapped, total_wqe_bytes;
        char *buffer = NULL;
-       int resume_with_error = 0;
-       u16 wqe_index = pfault->mpfault.wqe.wqe_index;
-       int requestor = pfault->mpfault.flags & MLX5_PFAULT_REQUESTOR;
-       u32 qpn = qp->trans_qp.base.mqp.qpn;
+       int resume_with_error = 1;
+       u16 wqe_index = pfault->wqe.wqe_index;
+       int requestor = pfault->type & MLX5_PFAULT_REQUESTOR;
+       struct mlx5_ib_qp *qp;
 
        buffer = (char *)__get_free_page(GFP_KERNEL);
        if (!buffer) {
                mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
-               resume_with_error = 1;
                goto resolve_page_fault;
        }
 
+       qp = mlx5_ib_odp_find_qp(dev, pfault->wqe.wq_num);
+       if (!qp)
+               goto resolve_page_fault;
+
        ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
                                    PAGE_SIZE, &qp->trans_qp.base);
        if (ret < 0) {
-               mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n",
-                           -ret, wqe_index, qpn);
-               resume_with_error = 1;
+               mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%d, wqe_index=%x, qpn=%x\n",
+                           ret, wqe_index, pfault->token);
                goto resolve_page_fault;
        }
 
        wqe = buffer;
        if (requestor)
-               ret = mlx5_ib_mr_initiator_pfault_handler(qp, pfault, &wqe,
+               ret = mlx5_ib_mr_initiator_pfault_handler(dev, pfault, qp, &wqe,
                                                          &wqe_end, ret);
        else
-               ret = mlx5_ib_mr_responder_pfault_handler(qp, pfault, &wqe,
+               ret = mlx5_ib_mr_responder_pfault_handler(dev, pfault, qp, &wqe,
                                                          &wqe_end, ret);
-       if (ret < 0) {
-               resume_with_error = 1;
+       if (ret < 0)
                goto resolve_page_fault;
-       }
 
        if (wqe >= wqe_end) {
                mlx5_ib_err(dev, "ODP fault on invalid WQE.\n");
-               resume_with_error = 1;
                goto resolve_page_fault;
        }
 
-       ret = pagefault_data_segments(qp, pfault, wqe, wqe_end, &bytes_mapped,
-                                     &total_wqe_bytes, !requestor);
+       ret = pagefault_data_segments(dev, pfault, qp, wqe, wqe_end,
+                                     &bytes_mapped, &total_wqe_bytes,
+                                     !requestor);
        if (ret == -EAGAIN) {
+               resume_with_error = 0;
                goto resolve_page_fault;
        } else if (ret < 0 || total_wqe_bytes > bytes_mapped) {
-               mlx5_ib_err(dev, "Error getting user pages for page fault. Error: 0x%x\n",
-                           -ret);
-               resume_with_error = 1;
+               if (ret != -ENOENT)
+                       mlx5_ib_err(dev, "Error getting user pages for page fault. Error: %d\n",
+                                   ret);
                goto resolve_page_fault;
        }
 
+       resume_with_error = 0;
 resolve_page_fault:
-       mlx5_ib_page_fault_resume(qp, pfault, resume_with_error);
-       mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n",
-                   qpn, resume_with_error,
-                   pfault->mpfault.flags);
-
+       mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
+       mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
+                   pfault->token, resume_with_error,
+                   pfault->type);
        free_page((unsigned long)buffer);
 }
 
@@ -602,15 +638,14 @@ static int pages_in_range(u64 address, u32 length)
                (address & PAGE_MASK)) >> PAGE_SHIFT;
 }
 
-static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp,
-                                          struct mlx5_ib_pfault *pfault)
+static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
+                                          struct mlx5_pagefault *pfault)
 {
-       struct mlx5_pagefault *mpfault = &pfault->mpfault;
        u64 address;
        u32 length;
-       u32 prefetch_len = mpfault->bytes_committed;
+       u32 prefetch_len = pfault->bytes_committed;
        int prefetch_activated = 0;
-       u32 rkey = mpfault->rdma.r_key;
+       u32 rkey = pfault->rdma.r_key;
        int ret;
 
        /* The RDMA responder handler handles the page fault in two parts.
@@ -619,38 +654,40 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp,
         * prefetches more pages. The second operation cannot use the pfault
         * context and therefore uses the dummy_pfault context allocated on
         * the stack */
-       struct mlx5_ib_pfault dummy_pfault = {};
-
-       dummy_pfault.mpfault.bytes_committed = 0;
+       pfault->rdma.rdma_va += pfault->bytes_committed;
+       pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
+                                        pfault->rdma.rdma_op_len);
+       pfault->bytes_committed = 0;
 
-       mpfault->rdma.rdma_va += mpfault->bytes_committed;
-       mpfault->rdma.rdma_op_len -= min(mpfault->bytes_committed,
-                                        mpfault->rdma.rdma_op_len);
-       mpfault->bytes_committed = 0;
-
-       address = mpfault->rdma.rdma_va;
-       length  = mpfault->rdma.rdma_op_len;
+       address = pfault->rdma.rdma_va;
+       length  = pfault->rdma.rdma_op_len;
 
        /* For some operations, the hardware cannot tell the exact message
         * length, and in those cases it reports zero. Use prefetch
         * logic. */
        if (length == 0) {
                prefetch_activated = 1;
-               length = mpfault->rdma.packet_size;
+               length = pfault->rdma.packet_size;
                prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
        }
 
-       ret = pagefault_single_data_segment(qp, pfault, rkey, address, length,
-                                           NULL);
+       ret = pagefault_single_data_segment(dev, rkey, address, length,
+                                           &pfault->bytes_committed, NULL);
        if (ret == -EAGAIN) {
                /* We're racing with an invalidation, don't prefetch */
                prefetch_activated = 0;
        } else if (ret < 0 || pages_in_range(address, length) > ret) {
-               mlx5_ib_page_fault_resume(qp, pfault, 1);
+               mlx5_ib_page_fault_resume(dev, pfault, 1);
+               if (ret != -ENOENT)
+                       mlx5_ib_warn(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
+                                    ret, pfault->token, pfault->type);
                return;
        }
 
-       mlx5_ib_page_fault_resume(qp, pfault, 0);
+       mlx5_ib_page_fault_resume(dev, pfault, 0);
+       mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
+                   pfault->token, pfault->type,
+                   prefetch_activated);
 
        /* At this point, there might be a new pagefault already arriving in
         * the eq, switch to the dummy pagefault for the rest of the
@@ -658,112 +695,39 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_qp *qp,
         * work-queue is being fenced. */
 
        if (prefetch_activated) {
-               ret = pagefault_single_data_segment(qp, &dummy_pfault, rkey,
-                                                   address,
+               u32 bytes_committed = 0;
+
+               ret = pagefault_single_data_segment(dev, rkey, address,
                                                    prefetch_len,
-                                                   NULL);
+                                                   &bytes_committed, NULL);
                if (ret < 0) {
-                       pr_warn("Prefetch failed (ret = %d, prefetch_activated = %d) for QPN %d, address: 0x%.16llx, length = 0x%.16x\n",
-                               ret, prefetch_activated,
-                               qp->ibqp.qp_num, address, prefetch_len);
+                       mlx5_ib_warn(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
+                                    ret, pfault->token, address,
+                                    prefetch_len);
                }
        }
 }
 
-void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
-                              struct mlx5_ib_pfault *pfault)
+void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
+                   struct mlx5_pagefault *pfault)
 {
-       u8 event_subtype = pfault->mpfault.event_subtype;
+       struct mlx5_ib_dev *dev = context;
+       u8 event_subtype = pfault->event_subtype;
 
        switch (event_subtype) {
        case MLX5_PFAULT_SUBTYPE_WQE:
-               mlx5_ib_mr_wqe_pfault_handler(qp, pfault);
+               mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
                break;
        case MLX5_PFAULT_SUBTYPE_RDMA:
-               mlx5_ib_mr_rdma_pfault_handler(qp, pfault);
+               mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
                break;
        default:
-               pr_warn("Invalid page fault event subtype: 0x%x\n",
-                       event_subtype);
-               mlx5_ib_page_fault_resume(qp, pfault, 1);
-               break;
+               mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
+                           event_subtype);
+               mlx5_ib_page_fault_resume(dev, pfault, 1);
        }
 }
 
-static void mlx5_ib_qp_pfault_action(struct work_struct *work)
-{
-       struct mlx5_ib_pfault *pfault = container_of(work,
-                                                    struct mlx5_ib_pfault,
-                                                    work);
-       enum mlx5_ib_pagefault_context context =
-               mlx5_ib_get_pagefault_context(&pfault->mpfault);
-       struct mlx5_ib_qp *qp = container_of(pfault, struct mlx5_ib_qp,
-                                            pagefaults[context]);
-       mlx5_ib_mr_pfault_handler(qp, pfault);
-}
-
-void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
-       qp->disable_page_faults = 1;
-       spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
-
-       /*
-        * Note that at this point, we are guarenteed that no more
-        * work queue elements will be posted to the work queue with
-        * the QP we are closing.
-        */
-       flush_workqueue(mlx5_ib_page_fault_wq);
-}
-
-void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&qp->disable_page_faults_lock, flags);
-       qp->disable_page_faults = 0;
-       spin_unlock_irqrestore(&qp->disable_page_faults_lock, flags);
-}
-
-static void mlx5_ib_pfault_handler(struct mlx5_core_qp *qp,
-                                  struct mlx5_pagefault *pfault)
-{
-       /*
-        * Note that we will only get one fault event per QP per context
-        * (responder/initiator, read/write), until we resolve the page fault
-        * with the mlx5_ib_page_fault_resume command. Since this function is
-        * called from within the work element, there is no risk of missing
-        * events.
-        */
-       struct mlx5_ib_qp *mibqp = to_mibqp(qp);
-       enum mlx5_ib_pagefault_context context =
-               mlx5_ib_get_pagefault_context(pfault);
-       struct mlx5_ib_pfault *qp_pfault = &mibqp->pagefaults[context];
-
-       qp_pfault->mpfault = *pfault;
-
-       /* No need to stop interrupts here since we are in an interrupt */
-       spin_lock(&mibqp->disable_page_faults_lock);
-       if (!mibqp->disable_page_faults)
-               queue_work(mlx5_ib_page_fault_wq, &qp_pfault->work);
-       spin_unlock(&mibqp->disable_page_faults_lock);
-}
-
-void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp)
-{
-       int i;
-
-       qp->disable_page_faults = 1;
-       spin_lock_init(&qp->disable_page_faults_lock);
-
-       qp->trans_qp.base.mqp.pfault_handler = mlx5_ib_pfault_handler;
-
-       for (i = 0; i < MLX5_IB_PAGEFAULT_CONTEXTS; ++i)
-               INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action);
-}
-
 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev)
 {
        int ret;
@@ -780,17 +744,3 @@ void mlx5_ib_odp_remove_one(struct mlx5_ib_dev *ibdev)
        cleanup_srcu_struct(&ibdev->mr_srcu);
 }
 
-int __init mlx5_ib_odp_init(void)
-{
-       mlx5_ib_page_fault_wq = alloc_ordered_workqueue("mlx5_ib_page_faults",
-                                                       WQ_MEM_RECLAIM);
-       if (!mlx5_ib_page_fault_wq)
-               return -ENOMEM;
-
-       return 0;
-}
-
-void mlx5_ib_odp_cleanup(void)
-{
-       destroy_workqueue(mlx5_ib_page_fault_wq);
-}
index a1b3125f0a6eb3a312539a32414202cefc877df8..e31bf11ae64fccdda3bb85863ea4da0da902b9bb 100644 (file)
@@ -475,60 +475,53 @@ static int qp_has_rq(struct ib_qp_init_attr *attr)
        return 1;
 }
 
-static int first_med_uuar(void)
+static int first_med_bfreg(void)
 {
        return 1;
 }
 
-static int next_uuar(int n)
-{
-       n++;
-
-       while (((n % 4) & 2))
-               n++;
+enum {
+       /* this is the first blue flame register in the array of bfregs assigned
+        * to a processes. Since we do not use it for blue flame but rather
+        * regular 64 bit doorbells, we do not need a lock for maintaiing
+        * "odd/even" order
+        */
+       NUM_NON_BLUE_FLAME_BFREGS = 1,
+};
 
-       return n;
+static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
+{
+       return get_num_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
 }
 
-static int num_med_uuar(struct mlx5_uuar_info *uuari)
+static int num_med_bfreg(struct mlx5_ib_dev *dev,
+                        struct mlx5_bfreg_info *bfregi)
 {
        int n;
 
-       n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
-               uuari->num_low_latency_uuars - 1;
+       n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs -
+           NUM_NON_BLUE_FLAME_BFREGS;
 
        return n >= 0 ? n : 0;
 }
 
-static int max_uuari(struct mlx5_uuar_info *uuari)
-{
-       return uuari->num_uars * 4;
-}
-
-static int first_hi_uuar(struct mlx5_uuar_info *uuari)
+static int first_hi_bfreg(struct mlx5_ib_dev *dev,
+                         struct mlx5_bfreg_info *bfregi)
 {
        int med;
-       int i;
-       int t;
-
-       med = num_med_uuar(uuari);
-       for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
-               t++;
-               if (t == med)
-                       return next_uuar(i);
-       }
 
-       return 0;
+       med = num_med_bfreg(dev, bfregi);
+       return ++med;
 }
 
-static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
+static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
+                                 struct mlx5_bfreg_info *bfregi)
 {
        int i;
 
-       for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
-               if (!test_bit(i, uuari->bitmap)) {
-                       set_bit(i, uuari->bitmap);
-                       uuari->count[i]++;
+       for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) {
+               if (!bfregi->count[i]) {
+                       bfregi->count[i]++;
                        return i;
                }
        }
@@ -536,87 +529,61 @@ static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
        return -ENOMEM;
 }
 
-static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
+static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
+                                struct mlx5_bfreg_info *bfregi)
 {
-       int minidx = first_med_uuar();
+       int minidx = first_med_bfreg();
        int i;
 
-       for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
-               if (uuari->count[i] < uuari->count[minidx])
+       for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) {
+               if (bfregi->count[i] < bfregi->count[minidx])
                        minidx = i;
+               if (!bfregi->count[minidx])
+                       break;
        }
 
-       uuari->count[minidx]++;
+       bfregi->count[minidx]++;
        return minidx;
 }
 
-static int alloc_uuar(struct mlx5_uuar_info *uuari,
-                     enum mlx5_ib_latency_class lat)
+static int alloc_bfreg(struct mlx5_ib_dev *dev,
+                      struct mlx5_bfreg_info *bfregi,
+                      enum mlx5_ib_latency_class lat)
 {
-       int uuarn = -EINVAL;
+       int bfregn = -EINVAL;
 
-       mutex_lock(&uuari->lock);
+       mutex_lock(&bfregi->lock);
        switch (lat) {
        case MLX5_IB_LATENCY_CLASS_LOW:
-               uuarn = 0;
-               uuari->count[uuarn]++;
+               BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
+               bfregn = 0;
+               bfregi->count[bfregn]++;
                break;
 
        case MLX5_IB_LATENCY_CLASS_MEDIUM:
-               if (uuari->ver < 2)
-                       uuarn = -ENOMEM;
+               if (bfregi->ver < 2)
+                       bfregn = -ENOMEM;
                else
-                       uuarn = alloc_med_class_uuar(uuari);
+                       bfregn = alloc_med_class_bfreg(dev, bfregi);
                break;
 
        case MLX5_IB_LATENCY_CLASS_HIGH:
-               if (uuari->ver < 2)
-                       uuarn = -ENOMEM;
+               if (bfregi->ver < 2)
+                       bfregn = -ENOMEM;
                else
-                       uuarn = alloc_high_class_uuar(uuari);
-               break;
-
-       case MLX5_IB_LATENCY_CLASS_FAST_PATH:
-               uuarn = 2;
+                       bfregn = alloc_high_class_bfreg(dev, bfregi);
                break;
        }
-       mutex_unlock(&uuari->lock);
-
-       return uuarn;
-}
+       mutex_unlock(&bfregi->lock);
 
-static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
-{
-       clear_bit(uuarn, uuari->bitmap);
-       --uuari->count[uuarn];
-}
-
-static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
-{
-       clear_bit(uuarn, uuari->bitmap);
-       --uuari->count[uuarn];
+       return bfregn;
 }
 
-static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
+static void free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
 {
-       int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
-       int high_uuar = nuuars - uuari->num_low_latency_uuars;
-
-       mutex_lock(&uuari->lock);
-       if (uuarn == 0) {
-               --uuari->count[uuarn];
-               goto out;
-       }
-
-       if (uuarn < high_uuar) {
-               free_med_class_uuar(uuari, uuarn);
-               goto out;
-       }
-
-       free_high_class_uuar(uuari, uuarn);
-
-out:
-       mutex_unlock(&uuari->lock);
+       mutex_lock(&bfregi->lock);
+       bfregi->count[bfregn]--;
+       mutex_unlock(&bfregi->lock);
 }
 
 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
@@ -657,9 +624,20 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
                               struct mlx5_ib_cq *recv_cq);
 
-static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
+static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
+                              struct mlx5_bfreg_info *bfregi, int bfregn)
 {
-       return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
+       int bfregs_per_sys_page;
+       int index_of_sys_page;
+       int offset;
+
+       bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
+                               MLX5_NON_FP_BFREGS_PER_UAR;
+       index_of_sys_page = bfregn / bfregs_per_sys_page;
+
+       offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
+
+       return bfregi->sys_pages[index_of_sys_page] + offset;
 }
 
 static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
@@ -762,6 +740,13 @@ err_umem:
        return err;
 }
 
+static int adjust_bfregn(struct mlx5_ib_dev *dev,
+                        struct mlx5_bfreg_info *bfregi, int bfregn)
+{
+       return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR +
+                               bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
+}
+
 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                          struct mlx5_ib_qp *qp, struct ib_udata *udata,
                          struct ib_qp_init_attr *attr,
@@ -776,7 +761,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
        int uar_index;
        int npages;
        u32 offset = 0;
-       int uuarn;
+       int bfregn;
        int ncont = 0;
        __be64 *pas;
        void *qpc;
@@ -794,27 +779,27 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
         */
        if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
                /* In CROSS_CHANNEL CQ and QP must use the same UAR */
-               uuarn = MLX5_CROSS_CHANNEL_UUAR;
+               bfregn = MLX5_CROSS_CHANNEL_BFREG;
        else {
-               uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
-               if (uuarn < 0) {
-                       mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
+               bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH);
+               if (bfregn < 0) {
+                       mlx5_ib_dbg(dev, "failed to allocate low latency BFREG\n");
                        mlx5_ib_dbg(dev, "reverting to medium latency\n");
-                       uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
-                       if (uuarn < 0) {
-                               mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
+                       bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_MEDIUM);
+                       if (bfregn < 0) {
+                               mlx5_ib_dbg(dev, "failed to allocate medium latency BFREG\n");
                                mlx5_ib_dbg(dev, "reverting to high latency\n");
-                               uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
-                               if (uuarn < 0) {
-                                       mlx5_ib_warn(dev, "uuar allocation failed\n");
-                                       return uuarn;
+                               bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_LOW);
+                               if (bfregn < 0) {
+                                       mlx5_ib_warn(dev, "bfreg allocation failed\n");
+                                       return bfregn;
                                }
                        }
                }
        }
 
-       uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
-       mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
+       uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn);
+       mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
 
        qp->rq.offset = 0;
        qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -822,7 +807,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 
        err = set_user_buf_size(dev, qp, &ucmd, base, attr);
        if (err)
-               goto err_uuar;
+               goto err_bfreg;
 
        if (ucmd.buf_addr && ubuffer->buf_size) {
                ubuffer->buf_addr = ucmd.buf_addr;
@@ -831,7 +816,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                                       &ubuffer->umem, &npages, &page_shift,
                                       &ncont, &offset);
                if (err)
-                       goto err_uuar;
+                       goto err_bfreg;
        } else {
                ubuffer->umem = NULL;
        }
@@ -854,8 +839,8 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
        MLX5_SET(qpc, qpc, page_offset, offset);
 
        MLX5_SET(qpc, qpc, uar_page, uar_index);
-       resp->uuar_index = uuarn;
-       qp->uuarn = uuarn;
+       resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
+       qp->bfregn = bfregn;
 
        err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
        if (err) {
@@ -882,13 +867,13 @@ err_umem:
        if (ubuffer->umem)
                ib_umem_release(ubuffer->umem);
 
-err_uuar:
-       free_uuar(&context->uuari, uuarn);
+err_bfreg:
+       free_bfreg(dev, &context->bfregi, bfregn);
        return err;
 }
 
-static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp,
-                           struct mlx5_ib_qp_base *base)
+static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+                           struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base)
 {
        struct mlx5_ib_ucontext *context;
 
@@ -896,7 +881,7 @@ static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp,
        mlx5_ib_db_unmap_user(context, &qp->db);
        if (base->ubuffer.umem)
                ib_umem_release(base->ubuffer.umem);
-       free_uuar(&context->uuari, qp->uuarn);
+       free_bfreg(dev, &context->bfregi, qp->bfregn);
 }
 
 static int create_kernel_qp(struct mlx5_ib_dev *dev,
@@ -905,14 +890,10 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
                            u32 **in, int *inlen,
                            struct mlx5_ib_qp_base *base)
 {
-       enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
-       struct mlx5_uuar_info *uuari;
        int uar_index;
        void *qpc;
-       int uuarn;
        int err;
 
-       uuari = &dev->mdev->priv.uuari;
        if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN |
                                        IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
                                        IB_QP_CREATE_IPOIB_UD_LSO |
@@ -920,21 +901,17 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
                return -EINVAL;
 
        if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
-               lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
-
-       uuarn = alloc_uuar(uuari, lc);
-       if (uuarn < 0) {
-               mlx5_ib_dbg(dev, "\n");
-               return -ENOMEM;
-       }
+               qp->bf.bfreg = &dev->fp_bfreg;
+       else
+               qp->bf.bfreg = &dev->bfreg;
 
-       qp->bf = &uuari->bfs[uuarn];
-       uar_index = qp->bf->uar->index;
+       qp->bf.buf_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
+       uar_index = qp->bf.bfreg->index;
 
        err = calc_sq_size(dev, init_attr, qp);
        if (err < 0) {
                mlx5_ib_dbg(dev, "err %d\n", err);
-               goto err_uuar;
+               return err;
        }
 
        qp->rq.offset = 0;
@@ -944,7 +921,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
        err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf);
        if (err) {
                mlx5_ib_dbg(dev, "err %d\n", err);
-               goto err_uuar;
+               return err;
        }
 
        qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
@@ -994,34 +971,30 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
        return 0;
 
 err_wrid:
-       mlx5_db_free(dev->mdev, &qp->db);
        kfree(qp->sq.wqe_head);
        kfree(qp->sq.w_list);
        kfree(qp->sq.wrid);
        kfree(qp->sq.wr_data);
        kfree(qp->rq.wrid);
+       mlx5_db_free(dev->mdev, &qp->db);
 
 err_free:
        kvfree(*in);
 
 err_buf:
        mlx5_buf_free(dev->mdev, &qp->buf);
-
-err_uuar:
-       free_uuar(&dev->mdev->priv.uuari, uuarn);
        return err;
 }
 
 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
 {
-       mlx5_db_free(dev->mdev, &qp->db);
        kfree(qp->sq.wqe_head);
        kfree(qp->sq.w_list);
        kfree(qp->sq.wrid);
        kfree(qp->sq.wr_data);
        kfree(qp->rq.wrid);
+       mlx5_db_free(dev->mdev, &qp->db);
        mlx5_buf_free(dev->mdev, &qp->buf);
-       free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
 }
 
 static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
@@ -1353,7 +1326,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        if (init_attr->create_flags || init_attr->send_cq)
                return -EINVAL;
 
-       min_resp_len = offsetof(typeof(resp), uuar_index) + sizeof(resp.uuar_index);
+       min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index);
        if (udata->outlen < min_resp_len)
                return -EINVAL;
 
@@ -1526,9 +1499,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
               &qp->raw_packet_qp.rq.base :
               &qp->trans_qp.base;
 
-       if (init_attr->qp_type != IB_QPT_RAW_PACKET)
-               mlx5_ib_odp_create_qp(qp);
-
        mutex_init(&qp->mutex);
        spin_lock_init(&qp->sq.lock);
        spin_lock_init(&qp->rq.lock);
@@ -1795,7 +1765,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 
 err_create:
        if (qp->create_type == MLX5_QP_USER)
-               destroy_qp_user(pd, qp, base);
+               destroy_qp_user(dev, pd, qp, base);
        else if (qp->create_type == MLX5_QP_KERNEL)
                destroy_qp_kernel(dev, qp);
 
@@ -1923,7 +1893,6 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
 
        if (qp->state != IB_QPS_RESET) {
                if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) {
-                       mlx5_ib_qp_disable_pagefaults(qp);
                        err = mlx5_core_qp_modify(dev->mdev,
                                                  MLX5_CMD_OP_2RST_QP, 0,
                                                  NULL, &base->mqp);
@@ -1974,7 +1943,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
        if (qp->create_type == MLX5_QP_KERNEL)
                destroy_qp_kernel(dev, qp);
        else if (qp->create_type == MLX5_QP_USER)
-               destroy_qp_user(&get_pd(qp)->ibpd, qp, base);
+               destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base);
 }
 
 static const char *ib_qp_type_str(enum ib_qp_type type)
@@ -2823,16 +2792,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        if (mlx5_st < 0)
                goto out;
 
-       /* If moving to a reset or error state, we must disable page faults on
-        * this QP and flush all current page faults. Otherwise a stale page
-        * fault may attempt to work on this QP after it is reset and moved
-        * again to RTS, and may cause the driver and the device to get out of
-        * sync. */
-       if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
-           (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR) &&
-           (qp->ibqp.qp_type != IB_QPT_RAW_PACKET))
-               mlx5_ib_qp_disable_pagefaults(qp);
-
        if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
            !optab[mlx5_cur][mlx5_new])
                goto out;
@@ -2864,10 +2823,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        if (err)
                goto out;
 
-       if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT &&
-           (qp->ibqp.qp_type != IB_QPT_RAW_PACKET))
-               mlx5_ib_qp_enable_pagefaults(qp);
-
        qp->state = new_state;
 
        if (attr_mask & IB_QP_ACCESS_FLAGS)
@@ -3029,20 +2984,20 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
 
        if (wr->opcode == IB_WR_LSO) {
                struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
-               int size_of_inl_hdr_start = sizeof(eseg->inline_hdr_start);
+               int size_of_inl_hdr_start = sizeof(eseg->inline_hdr.start);
                u64 left, leftlen, copysz;
                void *pdata = ud_wr->header;
 
                left = ud_wr->hlen;
                eseg->mss = cpu_to_be16(ud_wr->mss);
-               eseg->inline_hdr_sz = cpu_to_be16(left);
+               eseg->inline_hdr.sz = cpu_to_be16(left);
 
                /*
                 * check if there is space till the end of queue, if yes,
                 * copy all in one shot, otherwise copy till the end of queue,
                 * rollback and than the copy the left
                 */
-               leftlen = qend - (void *)eseg->inline_hdr_start;
+               leftlen = qend - (void *)eseg->inline_hdr.start;
                copysz = min_t(u64, leftlen, left);
 
                memcpy(seg - size_of_inl_hdr_start, pdata, copysz);
@@ -3080,9 +3035,10 @@ static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
        dseg->addr       = cpu_to_be64(sg->addr);
 }
 
-static __be16 get_klm_octo(int npages)
+static u64 get_xlt_octo(u64 bytes)
 {
-       return cpu_to_be16(ALIGN(npages, 8) / 2);
+       return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
+              MLX5_IB_UMR_OCTOWORD;
 }
 
 static __be64 frwr_mkey_mask(void)
@@ -3127,18 +3083,14 @@ static __be64 sig_mkey_mask(void)
 }
 
 static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
-                               struct mlx5_ib_mr *mr)
+                           struct mlx5_ib_mr *mr)
 {
-       int ndescs = mr->ndescs;
+       int size = mr->ndescs * mr->desc_size;
 
        memset(umr, 0, sizeof(*umr));
 
-       if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
-               /* KLMs take twice the size of MTTs */
-               ndescs *= 2;
-
        umr->flags = MLX5_UMR_CHECK_NOT_FREE;
-       umr->klm_octowords = get_klm_octo(ndescs);
+       umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
        umr->mkey_mask = frwr_mkey_mask();
 }
 
@@ -3149,37 +3101,17 @@ static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
        umr->flags = MLX5_UMR_INLINE;
 }
 
-static __be64 get_umr_reg_mr_mask(int atomic)
+static __be64 get_umr_enable_mr_mask(void)
 {
        u64 result;
 
-       result = MLX5_MKEY_MASK_LEN             |
-                MLX5_MKEY_MASK_PAGE_SIZE       |
-                MLX5_MKEY_MASK_START_ADDR      |
-                MLX5_MKEY_MASK_PD              |
-                MLX5_MKEY_MASK_LR              |
-                MLX5_MKEY_MASK_LW              |
-                MLX5_MKEY_MASK_KEY             |
-                MLX5_MKEY_MASK_RR              |
-                MLX5_MKEY_MASK_RW              |
+       result = MLX5_MKEY_MASK_KEY |
                 MLX5_MKEY_MASK_FREE;
 
-       if (atomic)
-               result |= MLX5_MKEY_MASK_A;
-
        return cpu_to_be64(result);
 }
 
-static __be64 get_umr_unreg_mr_mask(void)
-{
-       u64 result;
-
-       result = MLX5_MKEY_MASK_FREE;
-
-       return cpu_to_be64(result);
-}
-
-static __be64 get_umr_update_mtt_mask(void)
+static __be64 get_umr_disable_mr_mask(void)
 {
        u64 result;
 
@@ -3194,23 +3126,22 @@ static __be64 get_umr_update_translation_mask(void)
 
        result = MLX5_MKEY_MASK_LEN |
                 MLX5_MKEY_MASK_PAGE_SIZE |
-                MLX5_MKEY_MASK_START_ADDR |
-                MLX5_MKEY_MASK_KEY |
-                MLX5_MKEY_MASK_FREE;
+                MLX5_MKEY_MASK_START_ADDR;
 
        return cpu_to_be64(result);
 }
 
-static __be64 get_umr_update_access_mask(void)
+static __be64 get_umr_update_access_mask(int atomic)
 {
        u64 result;
 
-       result = MLX5_MKEY_MASK_LW |
+       result = MLX5_MKEY_MASK_LR |
+                MLX5_MKEY_MASK_LW |
                 MLX5_MKEY_MASK_RR |
-                MLX5_MKEY_MASK_RW |
-                MLX5_MKEY_MASK_A |
-                MLX5_MKEY_MASK_KEY |
-                MLX5_MKEY_MASK_FREE;
+                MLX5_MKEY_MASK_RW;
+
+       if (atomic)
+               result |= MLX5_MKEY_MASK_A;
 
        return cpu_to_be64(result);
 }
@@ -3219,9 +3150,7 @@ static __be64 get_umr_update_pd_mask(void)
 {
        u64 result;
 
-       result = MLX5_MKEY_MASK_PD |
-                MLX5_MKEY_MASK_KEY |
-                MLX5_MKEY_MASK_FREE;
+       result = MLX5_MKEY_MASK_PD;
 
        return cpu_to_be64(result);
 }
@@ -3238,24 +3167,24 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
        else
                umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
 
-       if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
-               umr->klm_octowords = get_klm_octo(umrwr->npages);
-               if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
-                       umr->mkey_mask = get_umr_update_mtt_mask();
-                       umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
-                       umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
-               }
-               if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
-                       umr->mkey_mask |= get_umr_update_translation_mask();
-               if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_ACCESS)
-                       umr->mkey_mask |= get_umr_update_access_mask();
-               if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD)
-                       umr->mkey_mask |= get_umr_update_pd_mask();
-               if (!umr->mkey_mask)
-                       umr->mkey_mask = get_umr_reg_mr_mask(atomic);
-       } else {
-               umr->mkey_mask = get_umr_unreg_mr_mask();
+       umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
+       if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
+               u64 offset = get_xlt_octo(umrwr->offset);
+
+               umr->xlt_offset = cpu_to_be16(offset & 0xffff);
+               umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
+               umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
+       }
+       if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
+               umr->mkey_mask |= get_umr_update_translation_mask();
+       if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
+               umr->mkey_mask |= get_umr_update_access_mask(atomic);
+               umr->mkey_mask |= get_umr_update_pd_mask();
        }
+       if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
+               umr->mkey_mask |= get_umr_enable_mr_mask();
+       if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
+               umr->mkey_mask |= get_umr_disable_mr_mask();
 
        if (!wr->num_sge)
                umr->flags |= MLX5_UMR_INLINE;
@@ -3303,17 +3232,17 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *w
        struct mlx5_umr_wr *umrwr = umr_wr(wr);
 
        memset(seg, 0, sizeof(*seg));
-       if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
+       if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
                seg->status = MLX5_MKEY_STATUS_FREE;
-               return;
-       }
 
        seg->flags = convert_access(umrwr->access_flags);
-       if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
-               if (umrwr->pd)
-                       seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
-               seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
-       }
+       if (umrwr->pd)
+               seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
+       if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
+           !umrwr->length)
+               seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);
+
+       seg->start_addr = cpu_to_be64(umrwr->virt_addr);
        seg->len = cpu_to_be64(umrwr->length);
        seg->log2_page_size = umrwr->page_shift;
        seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
@@ -3611,7 +3540,7 @@ static int set_sig_data_segment(struct ib_sig_handover_wr *wr,
 }
 
 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
-                                struct ib_sig_handover_wr *wr, u32 nelements,
+                                struct ib_sig_handover_wr *wr, u32 size,
                                 u32 length, u32 pdn)
 {
        struct ib_mr *sig_mr = wr->sig_mr;
@@ -3626,17 +3555,17 @@ static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
        seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
                                    MLX5_MKEY_BSF_EN | pdn);
        seg->len = cpu_to_be64(length);
-       seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
+       seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size));
        seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
 }
 
 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
-                               u32 nelements)
+                               u32 size)
 {
        memset(umr, 0, sizeof(*umr));
 
        umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
-       umr->klm_octowords = get_klm_octo(nelements);
+       umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
        umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
        umr->mkey_mask = sig_mkey_mask();
 }
@@ -3648,7 +3577,7 @@ static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
        struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
        struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
        u32 pdn = get_pd(qp)->pdn;
-       u32 klm_oct_size;
+       u32 xlt_size;
        int region_len, ret;
 
        if (unlikely(wr->wr.num_sge != 1) ||
@@ -3670,15 +3599,15 @@ static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
         * then we use strided block format (3 octowords),
         * else we use single KLM (1 octoword)
         **/
-       klm_oct_size = wr->prot ? 3 : 1;
+       xlt_size = wr->prot ? 0x30 : sizeof(struct mlx5_klm);
 
-       set_sig_umr_segment(*seg, klm_oct_size);
+       set_sig_umr_segment(*seg, xlt_size);
        *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
        *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
        if (unlikely((*seg == qp->sq.qend)))
                *seg = mlx5_get_send_wqe(qp, 0);
 
-       set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
+       set_sig_mkey_segment(*seg, wr, xlt_size, region_len, pdn);
        *seg += sizeof(struct mlx5_mkey_seg);
        *size += sizeof(struct mlx5_mkey_seg) / 16;
        if (unlikely((*seg == qp->sq.qend)))
@@ -3784,24 +3713,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
        }
 }
 
-static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
-                        unsigned bytecnt, struct mlx5_ib_qp *qp)
-{
-       while (bytecnt > 0) {
-               __iowrite64_copy(dst++, src++, 8);
-               __iowrite64_copy(dst++, src++, 8);
-               __iowrite64_copy(dst++, src++, 8);
-               __iowrite64_copy(dst++, src++, 8);
-               __iowrite64_copy(dst++, src++, 8);
-               __iowrite64_copy(dst++, src++, 8);
-               __iowrite64_copy(dst++, src++, 8);
-               __iowrite64_copy(dst++, src++, 8);
-               bytecnt -= 64;
-               if (unlikely(src == qp->sq.qend))
-                       src = mlx5_get_send_wqe(qp, 0);
-       }
-}
-
 static u8 get_fence(u8 fence, struct ib_send_wr *wr)
 {
        if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
@@ -3897,7 +3808,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
 
        qp = to_mqp(ibqp);
-       bf = qp->bf;
+       bf = &qp->bf;
        qend = qp->sq.qend;
 
        spin_lock_irqsave(&qp->sq.lock, flags);
@@ -4170,28 +4081,13 @@ out:
                 * we hit doorbell */
                wmb();
 
-               if (bf->need_lock)
-                       spin_lock(&bf->lock);
-               else
-                       __acquire(&bf->lock);
-
-               /* TBD enable WC */
-               if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
-                       mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
-                       /* wc_wmb(); */
-               } else {
-                       mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
-                                    MLX5_GET_DOORBELL_LOCK(&bf->lock32));
-                       /* Make sure doorbells don't leak out of SQ spinlock
-                        * and reach the HCA out of order.
-                        */
-                       mmiowb();
-               }
+               /* currently we support only regular doorbells */
+               mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset, NULL);
+               /* Make sure doorbells don't leak out of SQ spinlock
+                * and reach the HCA out of order.
+                */
+               mmiowb();
                bf->offset ^= bf->buf_size;
-               if (bf->need_lock)
-                       spin_unlock(&bf->lock);
-               else
-                       __release(&bf->lock);
        }
 
        spin_unlock_irqrestore(&qp->sq.lock, flags);
@@ -4559,14 +4455,6 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
                return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
                                            qp_init_attr);
 
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-       /*
-        * Wait for any outstanding page faults, in case the user frees memory
-        * based upon this query's result.
-        */
-       flush_workqueue(mlx5_ib_page_fault_wq);
-#endif
-
        mutex_lock(&qp->mutex);
 
        if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
index 4abdeb359fb4f52cbacde5c3e1f488296c6064c5..d9d15561eb5d13ceea3472c9e3c943b42333f473 100644 (file)
@@ -118,7 +118,7 @@ static struct device *dma_device(struct rxe_dev *rxe)
 
        ndev = rxe->ndev;
 
-       if (ndev->priv_flags & IFF_802_1Q_VLAN)
+       if (is_vlan_dev(ndev))
                ndev = vlan_dev_real_dev(ndev);
 
        return ndev->dev.parent;
index 296f1411fe84208d8c2511b8866d3f6936f8254a..3b11422b1ccead13edafb6b5406910f2eda50c87 100644 (file)
@@ -147,7 +147,7 @@ static word plci_remove_check(PLCI *);
 static void listen_check(DIVA_CAPI_ADAPTER *);
 static byte AddInfo(byte **, byte **, byte *, byte *);
 static byte getChannel(API_PARSE *);
-static void IndParse(PLCI *, word *, byte **, byte);
+static void IndParse(PLCI *, const word *, byte **, byte);
 static byte ie_compare(byte *, byte *);
 static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *);
 static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *, word);
@@ -4858,7 +4858,7 @@ static void sig_ind(PLCI *plci)
        /* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */
        /* SMSG is situated at the end because its 0 (for compatibility reasons */
        /* (see Info_Mask Bit 4, first IE. then the message type)           */
-       word parms_id[] =
+       static const word parms_id[] =
                {MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA,
                 UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW,
                 RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR,
@@ -4866,12 +4866,12 @@ static void sig_ind(PLCI *plci)
        /* 14 FTY repl by ESC_CHI */
        /* 18 PI  repl by ESC_LAW */
        /* removed OAD changed to 0xff for future use, OAD is multiIE now */
-       word multi_fac_id[] = {1, FTY};
-       word multi_pi_id[]  = {1, PI};
-       word multi_CiPN_id[]  = {1, OAD};
-       word multi_ssext_id[]  = {1, ESC_SSEXT};
+       static const word multi_fac_id[] = {1, FTY};
+       static const word multi_pi_id[]  = {1, PI};
+       static const word multi_CiPN_id[]  = {1, OAD};
+       static const word multi_ssext_id[]  = {1, ESC_SSEXT};
 
-       word multi_vswitch_id[]  = {1, ESC_VSWITCH};
+       static const word multi_vswitch_id[]  = {1, ESC_VSWITCH};
 
        byte *cau;
        word ncci;
@@ -8924,7 +8924,7 @@ static void listen_check(DIVA_CAPI_ADAPTER *a)
 /* functions for all parameters sent in INDs                        */
 /*------------------------------------------------------------------*/
 
-static void IndParse(PLCI *plci, word *parms_id, byte **parms, byte multiIEsize)
+static void IndParse(PLCI *plci, const word *parms_id, byte **parms, byte multiIEsize)
 {
        word ploc;            /* points to current location within packet */
        byte w;
index 95c32f2d7601b9180d43b77e1c143d4988f56962..100fbdc9b95c8475a2ab42928254996b1b2beef6 100644 (file)
@@ -135,6 +135,7 @@ config MACVTAP
        tristate "MAC-VLAN based tap driver"
        depends on MACVLAN
        depends on INET
+       select TAP
        help
          This adds a specialized tap character device driver that is based
          on the MAC-VLAN network interface, called macvtap. A macvtap device
@@ -165,11 +166,25 @@ config IPVLAN
       To compile this driver as a module, choose M here: the module
       will be called ipvlan.
 
+config IPVTAP
+       tristate "IP-VLAN based tap driver"
+       depends on IPVLAN
+       depends on INET
+       select TAP
+       ---help---
+         This adds a specialized tap character device driver that is based
+         on the IP-VLAN network interface, called ipvtap. An ipvtap device
+         can be added in the same way as a ipvlan device, using 'type
+         ipvtap', and then be accessed through the tap user space interface.
+
+         To compile this driver as a module, choose M here: the module
+         will be called ipvtap.
 
 config VXLAN
        tristate "Virtual eXtensible Local Area Network (VXLAN)"
        depends on INET
        select NET_UDP_TUNNEL
+       select GRO_CELLS
        ---help---
          This allows one to create vxlan virtual interfaces that provide
          Layer 2 Networks over Layer 3 Networks. VXLAN is often used
@@ -184,6 +199,7 @@ config GENEVE
        tristate "Generic Network Virtualization Encapsulation"
        depends on INET && NET_UDP_TUNNEL
        select NET_IP_TUNNEL
+       select GRO_CELLS
        ---help---
          This allows one to create geneve virtual interfaces that provide
          Layer 2 Networks over Layer 3 Networks. GENEVE is often used
@@ -216,6 +232,7 @@ config MACSEC
        select CRYPTO
        select CRYPTO_AES
        select CRYPTO_GCM
+       select GRO_CELLS
        ---help---
           MACsec is an encryption standard for Ethernet.
 
@@ -284,6 +301,12 @@ config TUN
 
          If you don't know what to use this for, you don't need it.
 
+config TAP
+       tristate
+       ---help---
+         This option is selected by any driver implementing tap user space
+         interface for a virtual interface to re-use core tap functionality.
+
 config TUN_VNET_CROSS_LE
        bool "Support for cross-endian vnet headers on little-endian kernels"
        default n
@@ -437,6 +460,9 @@ config XEN_NETDEV_BACKEND
 config VMXNET3
        tristate "VMware VMXNET3 ethernet driver"
        depends on PCI && INET
+       depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \
+                    IA64_PAGE_SIZE_64KB || MICROBLAZE_64K_PAGES || \
+                    PARISC_PAGE_SIZE_64KB || PPC_64K_PAGES)
        help
          This driver supports VMware's vmxnet3 virtual ethernet NIC.
          To compile this driver as a module, choose M here: the
index 7336cbd3ef5d94d4d28a6e2913e0e8439b205bf3..98ed4d96987c87fda074a219a234cbf8e48f2b9a 100644 (file)
@@ -7,6 +7,7 @@
 #
 obj-$(CONFIG_BONDING) += bonding/
 obj-$(CONFIG_IPVLAN) += ipvlan/
+obj-$(CONFIG_IPVTAP) += ipvlan/
 obj-$(CONFIG_DUMMY) += dummy.o
 obj-$(CONFIG_EQUALIZER) += eql.o
 obj-$(CONFIG_IFB) += ifb.o
@@ -21,6 +22,7 @@ obj-$(CONFIG_PHYLIB) += phy/
 obj-$(CONFIG_RIONET) += rionet.o
 obj-$(CONFIG_NET_TEAM) += team/
 obj-$(CONFIG_TUN) += tun.o
+obj-$(CONFIG_TAP) += tap.o
 obj-$(CONFIG_VETH) += veth.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
 obj-$(CONFIG_VXLAN) += vxlan.o
index 8029dd4912b6f950e8ab5f06d5de9747d6832a02..6321f12630c8c5fb6277097dea1a847549f92d4f 100644 (file)
@@ -211,8 +211,8 @@ static int lacp_fast;
 
 static int bond_init(struct net_device *bond_dev);
 static void bond_uninit(struct net_device *bond_dev);
-static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
-                                               struct rtnl_link_stats64 *stats);
+static void bond_get_stats(struct net_device *bond_dev,
+                          struct rtnl_link_stats64 *stats);
 static void bond_slave_arr_handler(struct work_struct *work);
 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
                                  int mod);
@@ -1993,11 +1993,10 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
        return ret;
 }
 
-static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
+static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        bond_fill_ifbond(bond, info);
-       return 0;
 }
 
 static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
@@ -3337,8 +3336,8 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
        }
 }
 
-static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
-                                               struct rtnl_link_stats64 *stats)
+static void bond_get_stats(struct net_device *bond_dev,
+                          struct rtnl_link_stats64 *stats)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct rtnl_link_stats64 temp;
@@ -3362,8 +3361,6 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
 
        memcpy(&bond->bond_stats, stats, sizeof(*stats));
        spin_unlock(&bond->stats_lock);
-
-       return stats;
 }
 
 static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
@@ -3411,12 +3408,11 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
                if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
                        return -EFAULT;
 
-               res = bond_info_query(bond_dev, &k_binfo);
-               if (res == 0 &&
-                   copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
+               bond_info_query(bond_dev, &k_binfo);
+               if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
                        return -EFAULT;
 
-               return res;
+               return 0;
        case BOND_SLAVE_INFO_QUERY_OLD:
        case SIOCBONDSLAVEINFOQUERY:
                u_sinfo = (struct ifslave __user *)ifr->ifr_data;
@@ -4149,8 +4145,6 @@ static const struct net_device_ops bond_netdev_ops = {
        .ndo_add_slave          = bond_enslave,
        .ndo_del_slave          = bond_release,
        .ndo_fix_features       = bond_fix_features,
-       .ndo_neigh_construct    = netdev_default_l2upper_neigh_construct,
-       .ndo_neigh_destroy      = netdev_default_l2upper_neigh_destroy,
        .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
        .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
        .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
index 7a85495dbb0c436f1f8ba30285c2f304c08a21ac..0da4f2f5c7e3eaef08f9a3ee3e5a0ab8081a80a9 100644 (file)
@@ -6,7 +6,8 @@ obj-$(CONFIG_CAN_VCAN)          += vcan.o
 obj-$(CONFIG_CAN_SLCAN)                += slcan.o
 
 obj-$(CONFIG_CAN_DEV)          += can-dev.o
-can-dev-y                      := dev.o
+can-dev-y                      += dev.o
+can-dev-y                      += rx-offload.o
 
 can-dev-$(CONFIG_CAN_LEDS)     += led.o
 
index 8f5e93cb79752703c141704011490535664b941e..0e0df0ba288cadf9cd3cde474284f14ee791157c 100644 (file)
@@ -813,7 +813,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
                u32 reg_ier = AT91_IRQ_ERR_FRAME;
                reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
 
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                at91_write(priv, AT91_IER, reg_ier);
        }
 
index e3dccd3200d5d834f13ad036c290c51e3091052e..606b7d8ffe138f880bfc830eff10b84ba6a01cfe 100644 (file)
@@ -1070,7 +1070,7 @@ static int c_can_poll(struct napi_struct *napi, int quota)
 
 end:
        if (work_done < quota) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                /* enable all IRQs if we are not in bus off state */
                if (priv->can.state != CAN_STATE_BUS_OFF)
                        c_can_irq_control(priv, true);
index 8d6208c0b4003a9b19646e7d653a9501b6660e3c..611d16a7061de5cb45f5e7bf7903b45472657965 100644 (file)
@@ -279,25 +279,45 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
        return 0;
 }
 
+/* Checks the validity of predefined bitrate settings */
+static int can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
+                               const u32 *bitrate_const,
+                               const unsigned int bitrate_const_cnt)
+{
+       struct can_priv *priv = netdev_priv(dev);
+       unsigned int i;
+
+       for (i = 0; i < bitrate_const_cnt; i++) {
+               if (bt->bitrate == bitrate_const[i])
+                       break;
+       }
+
+       if (i >= priv->bitrate_const_cnt)
+               return -EINVAL;
+
+       return 0;
+}
+
 static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
-                            const struct can_bittiming_const *btc)
+                            const struct can_bittiming_const *btc,
+                            const u32 *bitrate_const,
+                            const unsigned int bitrate_const_cnt)
 {
        int err;
 
-       /* Check if the CAN device has bit-timing parameters */
-       if (!btc)
-               return -EOPNOTSUPP;
-
        /*
         * Depending on the given can_bittiming parameter structure the CAN
         * timing parameters are calculated based on the provided bitrate OR
         * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
         * provided directly which are then checked and fixed up.
         */
-       if (!bt->tq && bt->bitrate)
+       if (!bt->tq && bt->bitrate && btc)
                err = can_calc_bittiming(dev, bt, btc);
-       else if (bt->tq && !bt->bitrate)
+       else if (bt->tq && !bt->bitrate && btc)
                err = can_fixup_bittiming(dev, bt, btc);
+       else if (!bt->tq && bt->bitrate && bitrate_const)
+               err = can_validate_bitrate(dev, bt, bitrate_const,
+                                          bitrate_const_cnt);
        else
                err = -EINVAL;
 
@@ -872,8 +892,20 @@ static int can_changelink(struct net_device *dev,
                /* Do not allow changing bittiming while running */
                if (dev->flags & IFF_UP)
                        return -EBUSY;
+
+               /* Calculate bittiming parameters based on
+                * bittiming_const if set, otherwise pass bitrate
+                * directly via do_set_bitrate(). Bail out if neither
+                * is given.
+                */
+               if (!priv->bittiming_const && !priv->do_set_bittiming)
+                       return -EOPNOTSUPP;
+
                memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
-               err = can_get_bittiming(dev, &bt, priv->bittiming_const);
+               err = can_get_bittiming(dev, &bt,
+                                       priv->bittiming_const,
+                                       priv->bitrate_const,
+                                       priv->bitrate_const_cnt);
                if (err)
                        return err;
                memcpy(&priv->bittiming, &bt, sizeof(bt));
@@ -943,9 +975,21 @@ static int can_changelink(struct net_device *dev,
                /* Do not allow changing bittiming while running */
                if (dev->flags & IFF_UP)
                        return -EBUSY;
+
+               /* Calculate bittiming parameters based on
+                * data_bittiming_const if set, otherwise pass bitrate
+                * directly via do_set_bitrate(). Bail out if neither
+                * is given.
+                */
+               if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
+                       return -EOPNOTSUPP;
+
                memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
                       sizeof(dbt));
-               err = can_get_bittiming(dev, &dbt, priv->data_bittiming_const);
+               err = can_get_bittiming(dev, &dbt,
+                                       priv->data_bittiming_const,
+                                       priv->data_bitrate_const,
+                                       priv->data_bitrate_const_cnt);
                if (err)
                        return err;
                memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
@@ -958,6 +1002,30 @@ static int can_changelink(struct net_device *dev,
                }
        }
 
+       if (data[IFLA_CAN_TERMINATION]) {
+               const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
+               const unsigned int num_term = priv->termination_const_cnt;
+               unsigned int i;
+
+               if (!priv->do_set_termination)
+                       return -EOPNOTSUPP;
+
+               /* check whether given value is supported by the interface */
+               for (i = 0; i < num_term; i++) {
+                       if (termval == priv->termination_const[i])
+                               break;
+               }
+               if (i >= num_term)
+                       return -EINVAL;
+
+               /* Finally, set the termination value */
+               err = priv->do_set_termination(dev, termval);
+               if (err)
+                       return err;
+
+               priv->termination = termval;
+       }
+
        return 0;
 }
 
@@ -980,6 +1048,17 @@ static size_t can_get_size(const struct net_device *dev)
                size += nla_total_size(sizeof(struct can_bittiming));
        if (priv->data_bittiming_const)                         /* IFLA_CAN_DATA_BITTIMING_CONST */
                size += nla_total_size(sizeof(struct can_bittiming_const));
+       if (priv->termination_const) {
+               size += nla_total_size(sizeof(priv->termination));              /* IFLA_CAN_TERMINATION */
+               size += nla_total_size(sizeof(*priv->termination_const) *       /* IFLA_CAN_TERMINATION_CONST */
+                                      priv->termination_const_cnt);
+       }
+       if (priv->bitrate_const)                                /* IFLA_CAN_BITRATE_CONST */
+               size += nla_total_size(sizeof(*priv->bitrate_const) *
+                                      priv->bitrate_const_cnt);
+       if (priv->data_bitrate_const)                           /* IFLA_CAN_DATA_BITRATE_CONST */
+               size += nla_total_size(sizeof(*priv->data_bitrate_const) *
+                                      priv->data_bitrate_const_cnt);
 
        return size;
 }
@@ -1018,7 +1097,28 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
            (priv->data_bittiming_const &&
             nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
                     sizeof(*priv->data_bittiming_const),
-                    priv->data_bittiming_const)))
+                    priv->data_bittiming_const)) ||
+
+           (priv->termination_const &&
+            (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
+             nla_put(skb, IFLA_CAN_TERMINATION_CONST,
+                     sizeof(*priv->termination_const) *
+                     priv->termination_const_cnt,
+                     priv->termination_const))) ||
+
+           (priv->bitrate_const &&
+            nla_put(skb, IFLA_CAN_BITRATE_CONST,
+                    sizeof(*priv->bitrate_const) *
+                    priv->bitrate_const_cnt,
+                    priv->bitrate_const)) ||
+
+           (priv->data_bitrate_const &&
+            nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
+                    sizeof(*priv->data_bitrate_const) *
+                    priv->data_bitrate_const_cnt,
+                    priv->data_bitrate_const))
+           )
+
                return -EMSGSIZE;
 
        return 0;
@@ -1073,6 +1173,22 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
  */
 int register_candev(struct net_device *dev)
 {
+       struct can_priv *priv = netdev_priv(dev);
+
+       /* Ensure termination_const, termination_const_cnt and
+        * do_set_termination consistency. All must be either set or
+        * unset.
+        */
+       if ((!priv->termination_const != !priv->termination_const_cnt) ||
+           (!priv->termination_const != !priv->do_set_termination))
+               return -EINVAL;
+
+       if (!priv->bitrate_const != !priv->bitrate_const_cnt)
+               return -EINVAL;
+
+       if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
+               return -EINVAL;
+
        dev->rtnl_link_ops = &can_link_ops;
        return register_netdev(dev);
 }
index 16f7cadda5c32b430c0d25aea9746a93a063bd39..ea57fed375c634cdc9a913a1d471fb14a13b1cb4 100644 (file)
@@ -3,7 +3,8 @@
  *
  * Copyright (c) 2005-2006 Varma Electronics Oy
  * Copyright (c) 2009 Sascha Hauer, Pengutronix
- * Copyright (c) 2010 Marc Kleine-Budde, Pengutronix
+ * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2014 David Jander, Protonic Holland
  *
  * Based on code originally by Andrey Volkov <avolkov@varma-el.com>
  *
@@ -24,6 +25,7 @@
 #include <linux/can/dev.h>
 #include <linux/can/error.h>
 #include <linux/can/led.h>
+#include <linux/can/rx-offload.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #define FLEXCAN_MCR_WAK_SRC            BIT(19)
 #define FLEXCAN_MCR_DOZE               BIT(18)
 #define FLEXCAN_MCR_SRX_DIS            BIT(17)
-#define FLEXCAN_MCR_BCC                        BIT(16)
+#define FLEXCAN_MCR_IRMQ               BIT(16)
 #define FLEXCAN_MCR_LPRIO_EN           BIT(13)
 #define FLEXCAN_MCR_AEN                        BIT(12)
+/* MCR_MAXMB: maximum used MBs is MAXMB + 1 */
 #define FLEXCAN_MCR_MAXMB(x)           ((x) & 0x7f)
 #define FLEXCAN_MCR_IDAM_A             (0x0 << 8)
 #define FLEXCAN_MCR_IDAM_B             (0x1 << 8)
 
 /* FLEXCAN interrupt flag register (IFLAG) bits */
 /* Errata ERR005829 step7: Reserve first valid MB */
-#define FLEXCAN_TX_BUF_RESERVED                8
-#define FLEXCAN_TX_BUF_ID              9
-#define FLEXCAN_IFLAG_BUF(x)           BIT(x)
+#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO        8
+#define FLEXCAN_TX_MB_OFF_FIFO         9
+#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP   0
+#define FLEXCAN_TX_MB_OFF_TIMESTAMP            1
+#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST      (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1)
+#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST       63
+#define FLEXCAN_IFLAG_MB(x)            BIT(x)
 #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
 #define FLEXCAN_IFLAG_RX_FIFO_WARN     BIT(6)
 #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE        BIT(5)
-#define FLEXCAN_IFLAG_DEFAULT \
-       (FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | FLEXCAN_IFLAG_RX_FIFO_AVAILABLE | \
-        FLEXCAN_IFLAG_BUF(FLEXCAN_TX_BUF_ID))
 
 /* FLEXCAN message buffers */
+#define FLEXCAN_MB_CODE_MASK           (0xf << 24)
+#define FLEXCAN_MB_CODE_RX_BUSY_BIT    (0x1 << 24)
 #define FLEXCAN_MB_CODE_RX_INACTIVE    (0x0 << 24)
 #define FLEXCAN_MB_CODE_RX_EMPTY       (0x4 << 24)
 #define FLEXCAN_MB_CODE_RX_FULL                (0x2 << 24)
  */
 #define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */
 #define FLEXCAN_QUIRK_DISABLE_RXFG     BIT(2) /* Disable RX FIFO Global mask */
-#define FLEXCAN_QUIRK_DISABLE_MECR     BIT(3) /* Disble Memory error detection */
+#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
+#define FLEXCAN_QUIRK_DISABLE_MECR     BIT(4) /* Disble Memory error detection */
+#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP        BIT(5) /* Use timestamp based offloading */
 
 /* Structure of the message buffer */
 struct flexcan_mb {
@@ -213,7 +221,10 @@ struct flexcan_regs {
        u32 imask1;             /* 0x28 */
        u32 iflag2;             /* 0x2c */
        u32 iflag1;             /* 0x30 */
-       u32 ctrl2;              /* 0x34 */
+       union {                 /* 0x34 */
+               u32 gfwr_mx28;  /* MX28, MX53 */
+               u32 ctrl2;      /* MX6, VF610 */
+       };
        u32 esr2;               /* 0x38 */
        u32 imeur;              /* 0x3c */
        u32 lrfr;               /* 0x40 */
@@ -232,7 +243,11 @@ struct flexcan_regs {
         *                              size conf'ed via ctrl2::RFFN
         *                              (mx6, vf610)
         */
-       u32 _reserved4[408];
+       u32 _reserved4[256];    /* 0x480 */
+       u32 rximr[64];          /* 0x880 */
+       u32 _reserved5[24];     /* 0x980 */
+       u32 gfwr_mx6;           /* 0x9e0 - MX6 */
+       u32 _reserved6[63];     /* 0x9e4 */
        u32 mecr;               /* 0xae0 */
        u32 erriar;             /* 0xae4 */
        u32 erridpr;            /* 0xae8 */
@@ -249,31 +264,36 @@ struct flexcan_devtype_data {
 
 struct flexcan_priv {
        struct can_priv can;
-       struct napi_struct napi;
+       struct can_rx_offload offload;
 
        struct flexcan_regs __iomem *regs;
-       u32 reg_esr;
+       struct flexcan_mb __iomem *tx_mb;
+       struct flexcan_mb __iomem *tx_mb_reserved;
+       u8 tx_mb_idx;
        u32 reg_ctrl_default;
+       u32 reg_imask1_default;
+       u32 reg_imask2_default;
 
        struct clk *clk_ipg;
        struct clk *clk_per;
-       struct flexcan_platform_data *pdata;
        const struct flexcan_devtype_data *devtype_data;
        struct regulator *reg_xceiver;
 };
 
-static struct flexcan_devtype_data fsl_p1010_devtype_data = {
+static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
        .quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE,
 };
 
-static struct flexcan_devtype_data fsl_imx28_devtype_data;
+static const struct flexcan_devtype_data fsl_imx28_devtype_data;
 
-static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
-       .quirks = FLEXCAN_QUIRK_DISABLE_RXFG,
+static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
+       .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+               FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
 };
 
-static struct flexcan_devtype_data fsl_vf610_devtype_data = {
-       .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_DISABLE_MECR,
+static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
+       .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+               FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
 };
 
 static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -331,13 +351,6 @@ static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
        return regulator_disable(priv->reg_xceiver);
 }
 
-static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
-                                             u32 reg_esr)
-{
-       return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
-               (reg_esr & FLEXCAN_ESR_ERR_BUS);
-}
-
 static int flexcan_chip_enable(struct flexcan_priv *priv)
 {
        struct flexcan_regs __iomem *regs = priv->regs;
@@ -468,7 +481,6 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
 static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        const struct flexcan_priv *priv = netdev_priv(dev);
-       struct flexcan_regs __iomem *regs = priv->regs;
        struct can_frame *cf = (struct can_frame *)skb->data;
        u32 can_id;
        u32 data;
@@ -491,68 +503,73 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        if (cf->can_dlc > 0) {
                data = be32_to_cpup((__be32 *)&cf->data[0]);
-               flexcan_write(data, &regs->mb[FLEXCAN_TX_BUF_ID].data[0]);
+               flexcan_write(data, &priv->tx_mb->data[0]);
        }
        if (cf->can_dlc > 3) {
                data = be32_to_cpup((__be32 *)&cf->data[4]);
-               flexcan_write(data, &regs->mb[FLEXCAN_TX_BUF_ID].data[1]);
+               flexcan_write(data, &priv->tx_mb->data[1]);
        }
 
        can_put_echo_skb(skb, dev, 0);
 
-       flexcan_write(can_id, &regs->mb[FLEXCAN_TX_BUF_ID].can_id);
-       flexcan_write(ctrl, &regs->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
+       flexcan_write(can_id, &priv->tx_mb->can_id);
+       flexcan_write(ctrl, &priv->tx_mb->can_ctrl);
 
        /* Errata ERR005829 step8:
         * Write twice INACTIVE(0x8) code to first MB.
         */
        flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-                     &regs->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+                     &priv->tx_mb_reserved->can_ctrl);
        flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-                     &regs->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+                     &priv->tx_mb_reserved->can_ctrl);
 
        return NETDEV_TX_OK;
 }
 
-static void do_bus_err(struct net_device *dev,
-                      struct can_frame *cf, u32 reg_esr)
+static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
 {
        struct flexcan_priv *priv = netdev_priv(dev);
-       int rx_errors = 0, tx_errors = 0;
+       struct sk_buff *skb;
+       struct can_frame *cf;
+       bool rx_errors = false, tx_errors = false;
+
+       skb = alloc_can_err_skb(dev, &cf);
+       if (unlikely(!skb))
+               return;
 
        cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
 
        if (reg_esr & FLEXCAN_ESR_BIT1_ERR) {
                netdev_dbg(dev, "BIT1_ERR irq\n");
                cf->data[2] |= CAN_ERR_PROT_BIT1;
-               tx_errors = 1;
+               tx_errors = true;
        }
        if (reg_esr & FLEXCAN_ESR_BIT0_ERR) {
                netdev_dbg(dev, "BIT0_ERR irq\n");
                cf->data[2] |= CAN_ERR_PROT_BIT0;
-               tx_errors = 1;
+               tx_errors = true;
        }
        if (reg_esr & FLEXCAN_ESR_ACK_ERR) {
                netdev_dbg(dev, "ACK_ERR irq\n");
                cf->can_id |= CAN_ERR_ACK;
                cf->data[3] = CAN_ERR_PROT_LOC_ACK;
-               tx_errors = 1;
+               tx_errors = true;
        }
        if (reg_esr & FLEXCAN_ESR_CRC_ERR) {
                netdev_dbg(dev, "CRC_ERR irq\n");
                cf->data[2] |= CAN_ERR_PROT_BIT;
                cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
-               rx_errors = 1;
+               rx_errors = true;
        }
        if (reg_esr & FLEXCAN_ESR_FRM_ERR) {
                netdev_dbg(dev, "FRM_ERR irq\n");
                cf->data[2] |= CAN_ERR_PROT_FORM;
-               rx_errors = 1;
+               rx_errors = true;
        }
        if (reg_esr & FLEXCAN_ESR_STF_ERR) {
                netdev_dbg(dev, "STF_ERR irq\n");
                cf->data[2] |= CAN_ERR_PROT_STUFF;
-               rx_errors = 1;
+               rx_errors = true;
        }
 
        priv->can.can_stats.bus_error++;
@@ -560,32 +577,16 @@ static void do_bus_err(struct net_device *dev,
                dev->stats.rx_errors++;
        if (tx_errors)
                dev->stats.tx_errors++;
-}
-
-static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
-{
-       struct sk_buff *skb;
-       struct can_frame *cf;
-
-       skb = alloc_can_err_skb(dev, &cf);
-       if (unlikely(!skb))
-               return 0;
-
-       do_bus_err(dev, cf, reg_esr);
-
-       dev->stats.rx_packets++;
-       dev->stats.rx_bytes += cf->can_dlc;
-       netif_receive_skb(skb);
 
-       return 1;
+       can_rx_offload_irq_queue_err_skb(&priv->offload, skb);
 }
 
-static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
+static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
 {
        struct flexcan_priv *priv = netdev_priv(dev);
        struct sk_buff *skb;
        struct can_frame *cf;
-       enum can_state new_state = 0, rx_state = 0, tx_state = 0;
+       enum can_state new_state, rx_state, tx_state;
        int flt;
        struct can_berr_counter bec;
 
@@ -606,33 +607,63 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
 
        /* state hasn't changed */
        if (likely(new_state == priv->can.state))
-               return 0;
+               return;
 
        skb = alloc_can_err_skb(dev, &cf);
        if (unlikely(!skb))
-               return 0;
+               return;
 
        can_change_state(dev, cf, tx_state, rx_state);
 
        if (unlikely(new_state == CAN_STATE_BUS_OFF))
                can_bus_off(dev);
 
-       dev->stats.rx_packets++;
-       dev->stats.rx_bytes += cf->can_dlc;
-       netif_receive_skb(skb);
+       can_rx_offload_irq_queue_err_skb(&priv->offload, skb);
+}
 
-       return 1;
+static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
+{
+       return container_of(offload, struct flexcan_priv, offload);
 }
 
-static void flexcan_read_fifo(const struct net_device *dev,
-                             struct can_frame *cf)
+static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
+                                        struct can_frame *cf,
+                                        u32 *timestamp, unsigned int n)
 {
-       const struct flexcan_priv *priv = netdev_priv(dev);
+       struct flexcan_priv *priv = rx_offload_to_priv(offload);
        struct flexcan_regs __iomem *regs = priv->regs;
-       struct flexcan_mb __iomem *mb = &regs->mb[0];
-       u32 reg_ctrl, reg_id;
+       struct flexcan_mb __iomem *mb = &regs->mb[n];
+       u32 reg_ctrl, reg_id, reg_iflag1;
+
+       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+               u32 code;
+
+               do {
+                       reg_ctrl = flexcan_read(&mb->can_ctrl);
+               } while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT);
+
+               /* is this MB empty? */
+               code = reg_ctrl & FLEXCAN_MB_CODE_MASK;
+               if ((code != FLEXCAN_MB_CODE_RX_FULL) &&
+                   (code != FLEXCAN_MB_CODE_RX_OVERRUN))
+                       return 0;
+
+               if (code == FLEXCAN_MB_CODE_RX_OVERRUN) {
+                       /* This MB was overrun, we lost data */
+                       offload->dev->stats.rx_over_errors++;
+                       offload->dev->stats.rx_errors++;
+               }
+       } else {
+               reg_iflag1 = flexcan_read(&regs->iflag1);
+               if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
+                       return 0;
+
+               reg_ctrl = flexcan_read(&mb->can_ctrl);
+       }
+
+       /* increase timstamp to full 32 bit */
+       *timestamp = reg_ctrl << 16;
 
-       reg_ctrl = flexcan_read(&mb->can_ctrl);
        reg_id = flexcan_read(&mb->can_id);
        if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
                cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
@@ -647,69 +678,31 @@ static void flexcan_read_fifo(const struct net_device *dev,
        *(__be32 *)(cf->data + 4) = cpu_to_be32(flexcan_read(&mb->data[1]));
 
        /* mark as read */
-       flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
-       flexcan_read(&regs->timer);
-}
-
-static int flexcan_read_frame(struct net_device *dev)
-{
-       struct net_device_stats *stats = &dev->stats;
-       struct can_frame *cf;
-       struct sk_buff *skb;
-
-       skb = alloc_can_skb(dev, &cf);
-       if (unlikely(!skb)) {
-               stats->rx_dropped++;
-               return 0;
+       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+               /* Clear IRQ */
+               if (n < 32)
+                       flexcan_write(BIT(n), &regs->iflag1);
+               else
+                       flexcan_write(BIT(n - 32), &regs->iflag2);
+       } else {
+               flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
+               flexcan_read(&regs->timer);
        }
 
-       flexcan_read_fifo(dev, cf);
-
-       stats->rx_packets++;
-       stats->rx_bytes += cf->can_dlc;
-       netif_receive_skb(skb);
-
-       can_led_event(dev, CAN_LED_EVENT_RX);
-
        return 1;
 }
 
-static int flexcan_poll(struct napi_struct *napi, int quota)
+
+static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
 {
-       struct net_device *dev = napi->dev;
-       const struct flexcan_priv *priv = netdev_priv(dev);
        struct flexcan_regs __iomem *regs = priv->regs;
-       u32 reg_iflag1, reg_esr;
-       int work_done = 0;
-
-       /* The error bits are cleared on read,
-        * use saved value from irq handler.
-        */
-       reg_esr = flexcan_read(&regs->esr) | priv->reg_esr;
-
-       /* handle state changes */
-       work_done += flexcan_poll_state(dev, reg_esr);
+       u32 iflag1, iflag2;
 
-       /* handle RX-FIFO */
-       reg_iflag1 = flexcan_read(&regs->iflag1);
-       while (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE &&
-              work_done < quota) {
-               work_done += flexcan_read_frame(dev);
-               reg_iflag1 = flexcan_read(&regs->iflag1);
-       }
-
-       /* report bus errors */
-       if (flexcan_has_and_handle_berr(priv, reg_esr) && work_done < quota)
-               work_done += flexcan_poll_bus_err(dev, reg_esr);
+       iflag2 = flexcan_read(&regs->iflag2) & priv->reg_imask2_default;
+       iflag1 = flexcan_read(&regs->iflag1) & priv->reg_imask1_default &
+               ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
 
-       if (work_done < quota) {
-               napi_complete(napi);
-               /* enable IRQs */
-               flexcan_write(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
-               flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
-       }
-
-       return work_done;
+       return (u64)iflag2 << 32 | iflag1;
 }
 
 static irqreturn_t flexcan_irq(int irq, void *dev_id)
@@ -718,55 +711,70 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
        struct net_device_stats *stats = &dev->stats;
        struct flexcan_priv *priv = netdev_priv(dev);
        struct flexcan_regs __iomem *regs = priv->regs;
+       irqreturn_t handled = IRQ_NONE;
        u32 reg_iflag1, reg_esr;
 
        reg_iflag1 = flexcan_read(&regs->iflag1);
-       reg_esr = flexcan_read(&regs->esr);
 
-       /* ACK all bus error and state change IRQ sources */
-       if (reg_esr & FLEXCAN_ESR_ALL_INT)
-               flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
-
-       /* schedule NAPI in case of:
-        * - rx IRQ
-        * - state change IRQ
-        * - bus error IRQ and bus error reporting is activated
-        */
-       if ((reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) ||
-           (reg_esr & FLEXCAN_ESR_ERR_STATE) ||
-           flexcan_has_and_handle_berr(priv, reg_esr)) {
-               /* The error bits are cleared on read,
-                * save them for later use.
-                */
-               priv->reg_esr = reg_esr & FLEXCAN_ESR_ERR_BUS;
-               flexcan_write(FLEXCAN_IFLAG_DEFAULT &
-                             ~FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->imask1);
-               flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
-                             &regs->ctrl);
-               napi_schedule(&priv->napi);
-       }
+       /* reception interrupt */
+       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+               u64 reg_iflag;
+               int ret;
+
+               while ((reg_iflag = flexcan_read_reg_iflag_rx(priv))) {
+                       handled = IRQ_HANDLED;
+                       ret = can_rx_offload_irq_offload_timestamp(&priv->offload,
+                                                                  reg_iflag);
+                       if (!ret)
+                               break;
+               }
+       } else {
+               if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) {
+                       handled = IRQ_HANDLED;
+                       can_rx_offload_irq_offload_fifo(&priv->offload);
+               }
 
-       /* FIFO overflow */
-       if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
-               flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
-               dev->stats.rx_over_errors++;
-               dev->stats.rx_errors++;
+               /* FIFO overflow interrupt */
+               if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
+                       handled = IRQ_HANDLED;
+                       flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
+                       dev->stats.rx_over_errors++;
+                       dev->stats.rx_errors++;
+               }
        }
 
        /* transmission complete interrupt */
-       if (reg_iflag1 & (1 << FLEXCAN_TX_BUF_ID)) {
+       if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) {
+               handled = IRQ_HANDLED;
                stats->tx_bytes += can_get_echo_skb(dev, 0);
                stats->tx_packets++;
                can_led_event(dev, CAN_LED_EVENT_TX);
 
                /* after sending a RTR frame MB is in RX mode */
                flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-                             &regs->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
-               flexcan_write((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
+                             &priv->tx_mb->can_ctrl);
+               flexcan_write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
                netif_wake_queue(dev);
        }
 
-       return IRQ_HANDLED;
+       reg_esr = flexcan_read(&regs->esr);
+
+       /* ACK all bus error and state change IRQ sources */
+       if (reg_esr & FLEXCAN_ESR_ALL_INT) {
+               handled = IRQ_HANDLED;
+               flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
+       }
+
+       /* state change interrupt */
+       if (reg_esr & FLEXCAN_ESR_ERR_STATE)
+               flexcan_irq_state(dev, reg_esr);
+
+       /* bus error IRQ - handle if bus error reporting is activated */
+       if ((reg_esr & FLEXCAN_ESR_ERR_BUS) &&
+           (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+               flexcan_irq_bus_err(dev, reg_esr);
+
+       return handled;
 }
 
 static void flexcan_set_bittiming(struct net_device *dev)
@@ -839,14 +847,23 @@ static int flexcan_chip_start(struct net_device *dev)
         * only supervisor access
         * enable warning int
         * disable local echo
+        * enable individual RX masking
         * choose format C
         * set max mailbox number
         */
        reg_mcr = flexcan_read(&regs->mcr);
        reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
-       reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_FEN | FLEXCAN_MCR_HALT |
-               FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS |
-               FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_BUF_ID);
+       reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
+               FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
+               FLEXCAN_MCR_IDAM_C;
+
+       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+               reg_mcr &= ~FLEXCAN_MCR_FEN;
+               reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last);
+       } else {
+               reg_mcr |= FLEXCAN_MCR_FEN |
+                       FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
+       }
        netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
        flexcan_write(reg_mcr, &regs->mcr);
 
@@ -883,19 +900,31 @@ static int flexcan_chip_start(struct net_device *dev)
        netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
        flexcan_write(reg_ctrl, &regs->ctrl);
 
+       if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
+               reg_ctrl2 = flexcan_read(&regs->ctrl2);
+               reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS;
+               flexcan_write(reg_ctrl2, &regs->ctrl2);
+       }
+
        /* clear and invalidate all mailboxes first */
-       for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->mb); i++) {
+       for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
                flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
                              &regs->mb[i].can_ctrl);
        }
 
+       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+               for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++)
+                       flexcan_write(FLEXCAN_MB_CODE_RX_EMPTY,
+                                     &regs->mb[i].can_ctrl);
+       }
+
        /* Errata ERR005829: mark first TX mailbox as INACTIVE */
        flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-                     &regs->mb[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
+                     &priv->tx_mb_reserved->can_ctrl);
 
        /* mark TX mailbox as INACTIVE */
        flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
-                     &regs->mb[FLEXCAN_TX_BUF_ID].can_ctrl);
+                     &priv->tx_mb->can_ctrl);
 
        /* acceptance mask/acceptance code (accept everything) */
        flexcan_write(0x0, &regs->rxgmask);
@@ -905,6 +934,10 @@ static int flexcan_chip_start(struct net_device *dev)
        if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
                flexcan_write(0x0, &regs->rxfgmask);
 
+       /* clear acceptance filters */
+       for (i = 0; i < ARRAY_SIZE(regs->mb); i++)
+               flexcan_write(0, &regs->rximr[i]);
+
        /* On Vybrid, disable memory error detection interrupts
         * and freeze mode.
         * This also works around errata e5295 which generates
@@ -942,7 +975,8 @@ static int flexcan_chip_start(struct net_device *dev)
        /* enable interrupts atomically */
        disable_irq(dev->irq);
        flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
-       flexcan_write(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
+       flexcan_write(priv->reg_imask1_default, &regs->imask1);
+       flexcan_write(priv->reg_imask2_default, &regs->imask2);
        enable_irq(dev->irq);
 
        /* print chip status */
@@ -972,6 +1006,7 @@ static void flexcan_chip_stop(struct net_device *dev)
        flexcan_chip_disable(priv);
 
        /* Disable all interrupts */
+       flexcan_write(0, &regs->imask2);
        flexcan_write(0, &regs->imask1);
        flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
                      &regs->ctrl);
@@ -1008,7 +1043,7 @@ static int flexcan_open(struct net_device *dev)
 
        can_led_event(dev, CAN_LED_EVENT_OPEN);
 
-       napi_enable(&priv->napi);
+       can_rx_offload_enable(&priv->offload);
        netif_start_queue(dev);
 
        return 0;
@@ -1030,7 +1065,7 @@ static int flexcan_close(struct net_device *dev)
        struct flexcan_priv *priv = netdev_priv(dev);
 
        netif_stop_queue(dev);
-       napi_disable(&priv->napi);
+       can_rx_offload_disable(&priv->offload);
        flexcan_chip_stop(dev);
 
        free_irq(dev->irq, dev);
@@ -1104,8 +1139,9 @@ static int register_flexcandev(struct net_device *dev)
        flexcan_write(reg, &regs->mcr);
 
        /* Currently we only support newer versions of this core
-        * featuring a RX FIFO. Older cores found on some Coldfire
-        * derivates are not yet supported.
+        * featuring a RX hardware FIFO (although this driver doesn't
+        * make use of it on some cores). Older cores, found on some
+        * Coldfire derivates are not tested.
         */
        reg = flexcan_read(&regs->mcr);
        if (!(reg & FLEXCAN_MCR_FEN)) {
@@ -1208,6 +1244,9 @@ static int flexcan_probe(struct platform_device *pdev)
        if (!dev)
                return -ENOMEM;
 
+       platform_set_drvdata(pdev, dev);
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
        dev->netdev_ops = &flexcan_netdev_ops;
        dev->irq = irq;
        dev->flags |= IFF_ECHO;
@@ -1223,14 +1262,41 @@ static int flexcan_probe(struct platform_device *pdev)
        priv->regs = regs;
        priv->clk_ipg = clk_ipg;
        priv->clk_per = clk_per;
-       priv->pdata = dev_get_platdata(&pdev->dev);
        priv->devtype_data = devtype_data;
        priv->reg_xceiver = reg_xceiver;
 
-       netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
+       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+               priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP;
+               priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP];
+       } else {
+               priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO;
+               priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO];
+       }
+       priv->tx_mb = &regs->mb[priv->tx_mb_idx];
 
-       platform_set_drvdata(pdev, dev);
-       SET_NETDEV_DEV(dev, &pdev->dev);
+       priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
+       priv->reg_imask2_default = 0;
+
+       priv->offload.mailbox_read = flexcan_mailbox_read;
+
+       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+               u64 imask;
+
+               priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST;
+               priv->offload.mb_last = FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST;
+
+               imask = GENMASK_ULL(priv->offload.mb_last, priv->offload.mb_first);
+               priv->reg_imask1_default |= imask;
+               priv->reg_imask2_default |= imask >> 32;
+
+               err = can_rx_offload_add_timestamp(dev, &priv->offload);
+       } else {
+               priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW |
+                       FLEXCAN_IFLAG_RX_FIFO_AVAILABLE;
+               err = can_rx_offload_add_fifo(dev, &priv->offload, FLEXCAN_NAPI_WEIGHT);
+       }
+       if (err)
+               goto failed_offload;
 
        err = register_flexcandev(dev);
        if (err) {
@@ -1245,6 +1311,7 @@ static int flexcan_probe(struct platform_device *pdev)
 
        return 0;
 
+ failed_offload:
  failed_register:
        free_candev(dev);
        return err;
@@ -1256,7 +1323,7 @@ static int flexcan_remove(struct platform_device *pdev)
        struct flexcan_priv *priv = netdev_priv(dev);
 
        unregister_flexcandev(dev);
-       netif_napi_del(&priv->napi);
+       can_rx_offload_del(&priv->offload);
        free_candev(dev);
 
        return 0;
index 368bb0710d8f2157f4d94bd5d332adf9290cc53f..138f5ae75c0bc6fcf912d30975197caedb0e0b3e 100644 (file)
@@ -578,7 +578,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
                work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done);
 
        if (work_done < quota) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                ifi_canfd_irq_enable(ndev, 1);
        }
 
index f13bb8d9bb8429e46be9d950df27e016ce450d26..2ba1a81500c126da0a9aee4333786ec5b9f289c3 100644 (file)
@@ -1475,7 +1475,7 @@ static int ican3_napi(struct napi_struct *napi, int budget)
        /* We have processed all packets that the adapter had, but it
         * was less than our budget, stop polling */
        if (received < budget)
-               napi_complete(napi);
+               napi_complete_done(napi, received);
 
        spin_lock_irqsave(&mod->lock, flags);
 
index 195f15edb32e3cf03b552d7cdb9a98d76f565591..7a6554efd42bfb10dbbfdf67d405507fa46488e8 100644 (file)
@@ -730,7 +730,7 @@ static int m_can_poll(struct napi_struct *napi, int quota)
                work_done += m_can_do_rx_poll(dev, (quota - work_done));
 
        if (work_done < quota) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                m_can_enable_all_interrupts(priv);
        }
 
index 788459f6bf5cc6a71c5e914d282a0e7bd5542791..caed4e6960f8c77fdca254f5ed4e7ccbe69792fc 100644 (file)
@@ -695,7 +695,7 @@ static int rcar_can_rx_poll(struct napi_struct *napi, int quota)
        }
        /* All packets processed */
        if (num_pkts < quota) {
-               napi_complete(napi);
+               napi_complete_done(napi, num_pkts);
                priv->ier |= RCAR_CAN_IER_RXFIE;
                writeb(priv->ier, &priv->regs->ier);
        }
index 43cdd5544b0c46bfa0741ea6d4977ea7d069c16c..4ef07d97156da838edaad4661f094d2e38d9111f 100644 (file)
@@ -1512,7 +1512,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
 
        /* All packets processed */
        if (num_pkts < quota) {
-               napi_complete(napi);
+               napi_complete_done(napi, num_pkts);
                /* Enable Rx FIFO interrupts */
                rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
                                   RCANFD_RFCC_RFIE);
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
new file mode 100644 (file)
index 0000000..f394f77
--- /dev/null
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2014 David Jander, Protonic Holland
+ * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/can/dev.h>
+#include <linux/can/rx-offload.h>
+
+struct can_rx_offload_cb {
+       u32 timestamp;
+};
+
+static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
+{
+       BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
+
+       return (struct can_rx_offload_cb *)skb->cb;
+}
+
+static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
+{
+       if (offload->inc)
+               return a <= b;
+       else
+               return a >= b;
+}
+
+static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
+{
+       if (offload->inc)
+               return (*val)++;
+       else
+               return (*val)--;
+}
+
+static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
+{
+       struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
+       struct net_device *dev = offload->dev;
+       struct net_device_stats *stats = &dev->stats;
+       struct sk_buff *skb;
+       int work_done = 0;
+
+       while ((work_done < quota) &&
+              (skb = skb_dequeue(&offload->skb_queue))) {
+               struct can_frame *cf = (struct can_frame *)skb->data;
+
+               work_done++;
+               stats->rx_packets++;
+               stats->rx_bytes += cf->can_dlc;
+               netif_receive_skb(skb);
+       }
+
+       if (work_done < quota) {
+               napi_complete_done(napi, work_done);
+
+               /* Check if there was another interrupt */
+               if (!skb_queue_empty(&offload->skb_queue))
+                       napi_reschedule(&offload->napi);
+       }
+
+       can_led_event(offload->dev, CAN_LED_EVENT_RX);
+
+       return work_done;
+}
+
+static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
+                                       int (*compare)(struct sk_buff *a, struct sk_buff *b))
+{
+       struct sk_buff *pos, *insert = (struct sk_buff *)head;
+
+       skb_queue_reverse_walk(head, pos) {
+               const struct can_rx_offload_cb *cb_pos, *cb_new;
+
+               cb_pos = can_rx_offload_get_cb(pos);
+               cb_new = can_rx_offload_get_cb(new);
+
+               netdev_dbg(new->dev,
+                          "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
+                          __func__,
+                          cb_pos->timestamp, cb_new->timestamp,
+                          cb_new->timestamp - cb_pos->timestamp,
+                          skb_queue_len(head));
+
+               if (compare(pos, new) < 0)
+                       continue;
+               insert = pos;
+               break;
+       }
+
+       __skb_queue_after(head, insert, new);
+}
+
+static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
+{
+       const struct can_rx_offload_cb *cb_a, *cb_b;
+
+       cb_a = can_rx_offload_get_cb(a);
+       cb_b = can_rx_offload_get_cb(b);
+
+       /* Substract two u32 and return result as int, to keep
+        * difference steady around the u32 overflow.
+        */
+       return cb_b->timestamp - cb_a->timestamp;
+}
+
+static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
+{
+       struct sk_buff *skb = NULL;
+       struct can_rx_offload_cb *cb;
+       struct can_frame *cf;
+       int ret;
+
+       /* If queue is full or skb not available, read to discard mailbox */
+       if (likely(skb_queue_len(&offload->skb_queue) <=
+                  offload->skb_queue_len_max))
+               skb = alloc_can_skb(offload->dev, &cf);
+
+       if (!skb) {
+               struct can_frame cf_overflow;
+               u32 timestamp;
+
+               ret = offload->mailbox_read(offload, &cf_overflow,
+                                           &timestamp, n);
+               if (ret)
+                       offload->dev->stats.rx_dropped++;
+
+               return NULL;
+       }
+
+       cb = can_rx_offload_get_cb(skb);
+       ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
+       if (!ret) {
+               kfree_skb(skb);
+               return NULL;
+       }
+
+       return skb;
+}
+
+int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
+{
+       struct sk_buff_head skb_queue;
+       unsigned int i;
+
+       __skb_queue_head_init(&skb_queue);
+
+       for (i = offload->mb_first;
+            can_rx_offload_le(offload, i, offload->mb_last);
+            can_rx_offload_inc(offload, &i)) {
+               struct sk_buff *skb;
+
+               if (!(pending & BIT_ULL(i)))
+                       continue;
+
+               skb = can_rx_offload_offload_one(offload, i);
+               if (!skb)
+                       break;
+
+               __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
+       }
+
+       if (!skb_queue_empty(&skb_queue)) {
+               unsigned long flags;
+               u32 queue_len;
+
+               spin_lock_irqsave(&offload->skb_queue.lock, flags);
+               skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
+               spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+               if ((queue_len = skb_queue_len(&offload->skb_queue)) >
+                   (offload->skb_queue_len_max / 8))
+                       netdev_dbg(offload->dev, "%s: queue_len=%d\n",
+                                  __func__, queue_len);
+
+               can_rx_offload_schedule(offload);
+       }
+
+       return skb_queue_len(&skb_queue);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
+
+int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
+{
+       struct sk_buff *skb;
+       int received = 0;
+
+       while ((skb = can_rx_offload_offload_one(offload, 0))) {
+               skb_queue_tail(&offload->skb_queue, skb);
+               received++;
+       }
+
+       if (received)
+               can_rx_offload_schedule(offload);
+
+       return received;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
+
+int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb)
+{
+       if (skb_queue_len(&offload->skb_queue) >
+           offload->skb_queue_len_max)
+               return -ENOMEM;
+
+       skb_queue_tail(&offload->skb_queue, skb);
+       can_rx_offload_schedule(offload);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb);
+
+static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
+{
+       offload->dev = dev;
+
+       /* Limit queue len to 4x the weight (rounted to next power of two) */
+       offload->skb_queue_len_max = 2 << fls(weight);
+       offload->skb_queue_len_max *= 4;
+       skb_queue_head_init(&offload->skb_queue);
+
+       can_rx_offload_reset(offload);
+       netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
+
+       dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
+               __func__, offload->skb_queue_len_max);
+
+       return 0;
+}
+
+int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
+{
+       unsigned int weight;
+
+       if (offload->mb_first > BITS_PER_LONG_LONG ||
+           offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
+               return -EINVAL;
+
+       if (offload->mb_first < offload->mb_last) {
+               offload->inc = true;
+               weight = offload->mb_last - offload->mb_first;
+       } else {
+               offload->inc = false;
+               weight = offload->mb_first - offload->mb_last;
+       }
+
+       return can_rx_offload_init_queue(dev, offload, weight);;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
+
+int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
+{
+       if (!offload->mailbox_read)
+               return -EINVAL;
+
+       return can_rx_offload_init_queue(dev, offload, weight);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
+
+void can_rx_offload_enable(struct can_rx_offload *offload)
+{
+       can_rx_offload_reset(offload);
+       napi_enable(&offload->napi);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_enable);
+
+void can_rx_offload_del(struct can_rx_offload *offload)
+{
+       netif_napi_del(&offload->napi);
+       skb_queue_purge(&offload->skb_queue);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_del);
+
+void can_rx_offload_reset(struct can_rx_offload *offload)
+{
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_reset);
index cdc0c7433a4b534255b01cdbafa43c0cca831aac..4d4492884e0b003cc5c8c0a1067f7c1a5acc85a3 100644 (file)
@@ -310,7 +310,7 @@ pcmcia_bad:
 pcmcia_failed:
        pcmcia_disable_device(pcmcia);
        pcmcia->priv = NULL;
-       return ret ?: -ENODEV;
+       return ret;
 }
 
 static const struct pcmcia_device_id softingcs_ids[] = {
index c71a03593595adb24a11e13053e2b3d26be8a4ec..89aec07c225f58d26a80ce4795afbaa6c19d9d84 100644 (file)
@@ -726,7 +726,7 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
                can_led_event(ndev, CAN_LED_EVENT_RX);
 
        if (work_done < quota) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                ier = priv->read_reg(priv, XCAN_IER_OFFSET);
                ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
                priv->write_reg(priv, XCAN_IER_OFFSET, ier);
index 8346e4f9737af50cfacae7e2639649d2255796cf..a3c94163221723c610791bd0e0a98df05c7dc221 100644 (file)
@@ -1,5 +1,6 @@
 obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_BCM_SF2)  += bcm_sf2.o
+obj-$(CONFIG_NET_DSA_BCM_SF2)  += bcm-sf2.o
+bcm-sf2-objs                   := bcm_sf2.o bcm_sf2_cfp.o
 obj-$(CONFIG_NET_DSA_QCA8K)    += qca8k.o
 
 obj-y                          += b53/
index 947adda3397d64ce9e86f5cfe8e300b4e8650827..8cf4801994e883be64010934a0413cfbdb86ed16 100644 (file)
@@ -712,7 +712,7 @@ static unsigned int b53_get_mib_size(struct b53_device *dev)
                return B53_MIBS_SIZE;
 }
 
-static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
 {
        struct b53_device *dev = ds->priv;
        const struct b53_mib_desc *mibs = b53_get_mib(dev);
@@ -723,9 +723,9 @@ static void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
                memcpy(data + i * ETH_GSTRING_LEN,
                       mibs[i].name, ETH_GSTRING_LEN);
 }
+EXPORT_SYMBOL(b53_get_strings);
 
-static void b53_get_ethtool_stats(struct dsa_switch *ds, int port,
-                                 uint64_t *data)
+void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
 {
        struct b53_device *dev = ds->priv;
        const struct b53_mib_desc *mibs = b53_get_mib(dev);
@@ -756,13 +756,15 @@ static void b53_get_ethtool_stats(struct dsa_switch *ds, int port,
 
        mutex_unlock(&dev->stats_mutex);
 }
+EXPORT_SYMBOL(b53_get_ethtool_stats);
 
-static int b53_get_sset_count(struct dsa_switch *ds)
+int b53_get_sset_count(struct dsa_switch *ds)
 {
        struct b53_device *dev = ds->priv;
 
        return b53_get_mib_size(dev);
 }
+EXPORT_SYMBOL(b53_get_sset_count);
 
 static int b53_setup(struct dsa_switch *ds)
 {
@@ -921,15 +923,15 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
        }
 }
 
-static int b53_vlan_filtering(struct dsa_switch *ds, int port,
-                             bool vlan_filtering)
+int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
 {
        return 0;
 }
+EXPORT_SYMBOL(b53_vlan_filtering);
 
-static int b53_vlan_prepare(struct dsa_switch *ds, int port,
-                           const struct switchdev_obj_port_vlan *vlan,
-                           struct switchdev_trans *trans)
+int b53_vlan_prepare(struct dsa_switch *ds, int port,
+                    const struct switchdev_obj_port_vlan *vlan,
+                    struct switchdev_trans *trans)
 {
        struct b53_device *dev = ds->priv;
 
@@ -943,10 +945,11 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
 
        return 0;
 }
+EXPORT_SYMBOL(b53_vlan_prepare);
 
-static void b53_vlan_add(struct dsa_switch *ds, int port,
-                        const struct switchdev_obj_port_vlan *vlan,
-                        struct switchdev_trans *trans)
+void b53_vlan_add(struct dsa_switch *ds, int port,
+                 const struct switchdev_obj_port_vlan *vlan,
+                 struct switchdev_trans *trans)
 {
        struct b53_device *dev = ds->priv;
        bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -977,9 +980,10 @@ static void b53_vlan_add(struct dsa_switch *ds, int port,
                b53_fast_age_vlan(dev, vid);
        }
 }
+EXPORT_SYMBOL(b53_vlan_add);
 
-static int b53_vlan_del(struct dsa_switch *ds, int port,
-                       const struct switchdev_obj_port_vlan *vlan)
+int b53_vlan_del(struct dsa_switch *ds, int port,
+                const struct switchdev_obj_port_vlan *vlan)
 {
        struct b53_device *dev = ds->priv;
        bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1015,10 +1019,11 @@ static int b53_vlan_del(struct dsa_switch *ds, int port,
 
        return 0;
 }
+EXPORT_SYMBOL(b53_vlan_del);
 
-static int b53_vlan_dump(struct dsa_switch *ds, int port,
-                        struct switchdev_obj_port_vlan *vlan,
-                        int (*cb)(struct switchdev_obj *obj))
+int b53_vlan_dump(struct dsa_switch *ds, int port,
+                 struct switchdev_obj_port_vlan *vlan,
+                 int (*cb)(struct switchdev_obj *obj))
 {
        struct b53_device *dev = ds->priv;
        u16 vid, vid_start = 0, pvid;
@@ -1057,6 +1062,7 @@ static int b53_vlan_dump(struct dsa_switch *ds, int port,
 
        return err;
 }
+EXPORT_SYMBOL(b53_vlan_dump);
 
 /* Address Resolution Logic routines */
 static int b53_arl_op_wait(struct b53_device *dev)
@@ -1137,7 +1143,7 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
        int ret;
 
        /* Convert the array into a 64-bit MAC */
-       mac = b53_mac_to_u64(addr);
+       mac = ether_addr_to_u64(addr);
 
        /* Perform a read for the given MAC and VID */
        b53_write48(dev, B53_ARLIO_PAGE, B53_MAC_ADDR_IDX, mac);
@@ -1175,9 +1181,9 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
        return b53_arl_rw_op(dev, 0);
 }
 
-static int b53_fdb_prepare(struct dsa_switch *ds, int port,
-                          const struct switchdev_obj_port_fdb *fdb,
-                          struct switchdev_trans *trans)
+int b53_fdb_prepare(struct dsa_switch *ds, int port,
+                   const struct switchdev_obj_port_fdb *fdb,
+                   struct switchdev_trans *trans)
 {
        struct b53_device *priv = ds->priv;
 
@@ -1189,24 +1195,27 @@ static int b53_fdb_prepare(struct dsa_switch *ds, int port,
 
        return 0;
 }
+EXPORT_SYMBOL(b53_fdb_prepare);
 
-static void b53_fdb_add(struct dsa_switch *ds, int port,
-                       const struct switchdev_obj_port_fdb *fdb,
-                       struct switchdev_trans *trans)
+void b53_fdb_add(struct dsa_switch *ds, int port,
+                const struct switchdev_obj_port_fdb *fdb,
+                struct switchdev_trans *trans)
 {
        struct b53_device *priv = ds->priv;
 
        if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
                pr_err("%s: failed to add MAC address\n", __func__);
 }
+EXPORT_SYMBOL(b53_fdb_add);
 
-static int b53_fdb_del(struct dsa_switch *ds, int port,
-                      const struct switchdev_obj_port_fdb *fdb)
+int b53_fdb_del(struct dsa_switch *ds, int port,
+               const struct switchdev_obj_port_fdb *fdb)
 {
        struct b53_device *priv = ds->priv;
 
        return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false);
 }
+EXPORT_SYMBOL(b53_fdb_del);
 
 static int b53_arl_search_wait(struct b53_device *dev)
 {
@@ -1258,9 +1267,9 @@ static int b53_fdb_copy(struct net_device *dev, int port,
        return cb(&fdb->obj);
 }
 
-static int b53_fdb_dump(struct dsa_switch *ds, int port,
-                       struct switchdev_obj_port_fdb *fdb,
-                       int (*cb)(struct switchdev_obj *obj))
+int b53_fdb_dump(struct dsa_switch *ds, int port,
+                struct switchdev_obj_port_fdb *fdb,
+                int (*cb)(struct switchdev_obj *obj))
 {
        struct b53_device *priv = ds->priv;
        struct net_device *dev = ds->ports[port].netdev;
@@ -1297,9 +1306,9 @@ static int b53_fdb_dump(struct dsa_switch *ds, int port,
 
        return 0;
 }
+EXPORT_SYMBOL(b53_fdb_dump);
 
-static int b53_br_join(struct dsa_switch *ds, int port,
-                      struct net_device *bridge)
+int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
 {
        struct b53_device *dev = ds->priv;
        s8 cpu_port = ds->dst->cpu_port;
@@ -1317,11 +1326,10 @@ static int b53_br_join(struct dsa_switch *ds, int port,
                b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
        }
 
-       dev->ports[port].bridge_dev = bridge;
        b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
 
        b53_for_each_port(dev, i) {
-               if (dev->ports[i].bridge_dev != bridge)
+               if (ds->ports[i].bridge_dev != br)
                        continue;
 
                /* Add this local port to the remote port VLAN control
@@ -1343,11 +1351,11 @@ static int b53_br_join(struct dsa_switch *ds, int port,
 
        return 0;
 }
+EXPORT_SYMBOL(b53_br_join);
 
-static void b53_br_leave(struct dsa_switch *ds, int port)
+void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
 {
        struct b53_device *dev = ds->priv;
-       struct net_device *bridge = dev->ports[port].bridge_dev;
        struct b53_vlan *vl = &dev->vlans[0];
        s8 cpu_port = ds->dst->cpu_port;
        unsigned int i;
@@ -1357,7 +1365,7 @@ static void b53_br_leave(struct dsa_switch *ds, int port)
 
        b53_for_each_port(dev, i) {
                /* Don't touch the remaining ports */
-               if (dev->ports[i].bridge_dev != bridge)
+               if (ds->ports[i].bridge_dev != br)
                        continue;
 
                b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
@@ -1372,7 +1380,6 @@ static void b53_br_leave(struct dsa_switch *ds, int port)
 
        b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
        dev->ports[port].vlan_ctl_mask = pvlan;
-       dev->ports[port].bridge_dev = NULL;
 
        if (is5325(dev) || is5365(dev))
                pvid = 1;
@@ -1393,8 +1400,9 @@ static void b53_br_leave(struct dsa_switch *ds, int port)
                b53_set_vlan_entry(dev, pvid, vl);
        }
 }
+EXPORT_SYMBOL(b53_br_leave);
 
-static void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
+void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
 {
        struct b53_device *dev = ds->priv;
        u8 hw_state;
@@ -1426,21 +1434,88 @@ static void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state)
        reg |= hw_state;
        b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg);
 }
+EXPORT_SYMBOL(b53_br_set_stp_state);
 
-static void b53_br_fast_age(struct dsa_switch *ds, int port)
+void b53_br_fast_age(struct dsa_switch *ds, int port)
 {
        struct b53_device *dev = ds->priv;
 
        if (b53_fast_age_port(dev, port))
                dev_err(ds->dev, "fast ageing failed\n");
 }
+EXPORT_SYMBOL(b53_br_fast_age);
 
 static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds)
 {
        return DSA_TAG_PROTO_NONE;
 }
 
-static struct dsa_switch_ops b53_switch_ops = {
+int b53_mirror_add(struct dsa_switch *ds, int port,
+                  struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
+{
+       struct b53_device *dev = ds->priv;
+       u16 reg, loc;
+
+       if (ingress)
+               loc = B53_IG_MIR_CTL;
+       else
+               loc = B53_EG_MIR_CTL;
+
+       b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
+       reg &= ~MIRROR_MASK;
+       reg |= BIT(port);
+       b53_write16(dev, B53_MGMT_PAGE, loc, reg);
+
+       b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
+       reg &= ~CAP_PORT_MASK;
+       reg |= mirror->to_local_port;
+       reg |= MIRROR_EN;
+       b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
+
+       return 0;
+}
+EXPORT_SYMBOL(b53_mirror_add);
+
+void b53_mirror_del(struct dsa_switch *ds, int port,
+                   struct dsa_mall_mirror_tc_entry *mirror)
+{
+       struct b53_device *dev = ds->priv;
+       bool loc_disable = false, other_loc_disable = false;
+       u16 reg, loc;
+
+       if (mirror->ingress)
+               loc = B53_IG_MIR_CTL;
+       else
+               loc = B53_EG_MIR_CTL;
+
+       /* Update the desired ingress/egress register */
+       b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
+       reg &= ~BIT(port);
+       if (!(reg & MIRROR_MASK))
+               loc_disable = true;
+       b53_write16(dev, B53_MGMT_PAGE, loc, reg);
+
+       /* Now look at the other one to know if we can disable mirroring
+        * entirely
+        */
+       if (mirror->ingress)
+               b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, &reg);
+       else
+               b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, &reg);
+       if (!(reg & MIRROR_MASK))
+               other_loc_disable = true;
+
+       b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
+       /* Both no longer have ports, let's disable mirroring */
+       if (loc_disable && other_loc_disable) {
+               reg &= ~MIRROR_EN;
+               reg &= ~mirror->to_local_port;
+       }
+       b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
+}
+EXPORT_SYMBOL(b53_mirror_del);
+
+static const struct dsa_switch_ops b53_switch_ops = {
        .get_tag_protocol       = b53_get_tag_protocol,
        .setup                  = b53_setup,
        .get_strings            = b53_get_strings,
@@ -1464,6 +1539,8 @@ static struct dsa_switch_ops b53_switch_ops = {
        .port_fdb_dump          = b53_fdb_dump,
        .port_fdb_add           = b53_fdb_add,
        .port_fdb_del           = b53_fdb_del,
+       .port_mirror_add        = b53_mirror_add,
+       .port_mirror_del        = b53_mirror_del,
 };
 
 struct b53_chip_data {
@@ -1672,6 +1749,18 @@ static const struct b53_chip_data b53_switch_chips[] = {
                .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
                .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
        },
+       {
+               .chip_id = BCM7278_DEVICE_ID,
+               .dev_name = "BCM7278",
+               .vlans = 4096,
+               .enabled_ports = 0x1ff,
+               .arl_entries= 4,
+               .cpu_port = B53_CPU_PORT,
+               .vta_regs = B53_VTA_REGS,
+               .duplex_reg = B53_DUPLEX_STAT_GE,
+               .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+               .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+       },
 };
 
 static int b53_switch_init(struct b53_device *dev)
@@ -1765,14 +1854,15 @@ struct b53_device *b53_switch_alloc(struct device *base,
        struct dsa_switch *ds;
        struct b53_device *dev;
 
-       ds = devm_kzalloc(base, sizeof(*ds) + sizeof(*dev), GFP_KERNEL);
+       ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
        if (!ds)
                return NULL;
 
-       dev = (struct b53_device *)(ds + 1);
+       dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return NULL;
 
        ds->priv = dev;
-       ds->dev = base;
        dev->dev = base;
 
        dev->ds = ds;
@@ -1869,7 +1959,7 @@ int b53_switch_register(struct b53_device *dev)
 
        pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
 
-       return dsa_register_switch(dev->ds, dev->ds->dev->of_node);
+       return dsa_register_switch(dev->ds, dev->ds->dev);
 }
 EXPORT_SYMBOL(b53_switch_register);
 
index 477a16b5660ab61f7bda3dce2438d95b5806513c..fa7556f5d4fb1b98a1da56c18bccad65f020171f 100644 (file)
@@ -375,18 +375,7 @@ static struct mdio_driver b53_mdio_driver = {
                .of_match_table = b53_of_match,
        },
 };
-
-static int __init b53_mdio_driver_register(void)
-{
-       return mdio_driver_register(&b53_mdio_driver);
-}
-module_init(b53_mdio_driver_register);
-
-static void __exit b53_mdio_driver_unregister(void)
-{
-       mdio_driver_unregister(&b53_mdio_driver);
-}
-module_exit(b53_mdio_driver_unregister);
+mdio_module_driver(b53_mdio_driver);
 
 MODULE_DESCRIPTION("B53 MDIO access driver");
 MODULE_LICENSE("Dual BSD/GPL");
index f192a673caba4f0c687216934dbbb78190f5d0c8..a9dc90a01438d26ca1d974c5150416988a67a7ba 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/kernel.h>
 #include <linux/mutex.h>
 #include <linux/phy.h>
+#include <linux/etherdevice.h>
 #include <net/dsa.h>
 
 #include "b53_regs.h"
@@ -61,6 +62,7 @@ enum {
        BCM53019_DEVICE_ID = 0x53019,
        BCM58XX_DEVICE_ID = 0x5800,
        BCM7445_DEVICE_ID = 0x7445,
+       BCM7278_DEVICE_ID = 0x7278,
 };
 
 #define B53_N_PORTS    9
@@ -68,7 +70,6 @@ enum {
 
 struct b53_port {
        u16             vlan_ctl_mask;
-       struct net_device *bridge_dev;
 };
 
 struct b53_vlan {
@@ -178,7 +179,8 @@ static inline int is5301x(struct b53_device *dev)
 static inline int is58xx(struct b53_device *dev)
 {
        return dev->chip_id == BCM58XX_DEVICE_ID ||
-               dev->chip_id == BCM7445_DEVICE_ID;
+               dev->chip_id == BCM7445_DEVICE_ID ||
+               dev->chip_id == BCM7278_DEVICE_ID;
 }
 
 #define B53_CPU_PORT_25        5
@@ -325,25 +327,6 @@ struct b53_arl_entry {
        u8 is_static:1;
 };
 
-static inline void b53_mac_from_u64(u64 src, u8 *dst)
-{
-       unsigned int i;
-
-       for (i = 0; i < ETH_ALEN; i++)
-               dst[ETH_ALEN - 1 - i] = (src >> (8 * i)) & 0xff;
-}
-
-static inline u64 b53_mac_to_u64(const u8 *src)
-{
-       unsigned int i;
-       u64 dst = 0;
-
-       for (i = 0; i < ETH_ALEN; i++)
-               dst |= (u64)src[ETH_ALEN - 1 - i] << (8 * i);
-
-       return dst;
-}
-
 static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
                                    u64 mac_vid, u32 fwd_entry)
 {
@@ -352,14 +335,14 @@ static inline void b53_arl_to_entry(struct b53_arl_entry *ent,
        ent->is_valid = !!(fwd_entry & ARLTBL_VALID);
        ent->is_age = !!(fwd_entry & ARLTBL_AGE);
        ent->is_static = !!(fwd_entry & ARLTBL_STATIC);
-       b53_mac_from_u64(mac_vid, ent->mac);
+       u64_to_ether_addr(mac_vid, ent->mac);
        ent->vid = mac_vid >> ARLTBL_VID_S;
 }
 
 static inline void b53_arl_from_entry(u64 *mac_vid, u32 *fwd_entry,
                                      const struct b53_arl_entry *ent)
 {
-       *mac_vid = b53_mac_to_u64(ent->mac);
+       *mac_vid = ether_addr_to_u64(ent->mac);
        *mac_vid |= (u64)(ent->vid & ARLTBL_VID_MASK) << ARLTBL_VID_S;
        *fwd_entry = ent->port & ARLTBL_DATA_PORT_ID_MASK;
        if (ent->is_valid)
@@ -392,4 +375,41 @@ static inline int b53_switch_get_reset_gpio(struct b53_device *dev)
        return -ENOENT;
 }
 #endif
+
+/* Exported functions towards other drivers */
+void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
+void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
+int b53_get_sset_count(struct dsa_switch *ds);
+int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
+void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
+void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
+void b53_br_fast_age(struct dsa_switch *ds, int port);
+int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
+int b53_vlan_prepare(struct dsa_switch *ds, int port,
+                    const struct switchdev_obj_port_vlan *vlan,
+                    struct switchdev_trans *trans);
+void b53_vlan_add(struct dsa_switch *ds, int port,
+                 const struct switchdev_obj_port_vlan *vlan,
+                 struct switchdev_trans *trans);
+int b53_vlan_del(struct dsa_switch *ds, int port,
+                const struct switchdev_obj_port_vlan *vlan);
+int b53_vlan_dump(struct dsa_switch *ds, int port,
+                 struct switchdev_obj_port_vlan *vlan,
+                 int (*cb)(struct switchdev_obj *obj));
+int b53_fdb_prepare(struct dsa_switch *ds, int port,
+                   const struct switchdev_obj_port_fdb *fdb,
+                   struct switchdev_trans *trans);
+void b53_fdb_add(struct dsa_switch *ds, int port,
+                const struct switchdev_obj_port_fdb *fdb,
+                struct switchdev_trans *trans);
+int b53_fdb_del(struct dsa_switch *ds, int port,
+               const struct switchdev_obj_port_fdb *fdb);
+int b53_fdb_dump(struct dsa_switch *ds, int port,
+                struct switchdev_obj_port_fdb *fdb,
+                int (*cb)(struct switchdev_obj *obj));
+int b53_mirror_add(struct dsa_switch *ds, int port,
+                  struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
+void b53_mirror_del(struct dsa_switch *ds, int port,
+                   struct dsa_mall_mirror_tc_entry *mirror);
+
 #endif
index dac0af4e2cd010c98dd05e9c328ee03b1fb583d6..9fd24c418fa4256e8517d5dec1dfa97681ba72db 100644 (file)
 #define   BRCM_HDR_P8_EN               BIT(0) /* Enable tagging on port 8 */
 #define   BRCM_HDR_P5_EN               BIT(1) /* Enable tagging on port 5 */
 
+/* Mirror capture control register (16 bit) */
+#define B53_MIR_CAP_CTL                        0x10
+#define  CAP_PORT_MASK                 0xf
+#define  BLK_NOT_MIR                   BIT(14)
+#define  MIRROR_EN                     BIT(15)
+
+/* Ingress mirror control register (16 bit) */
+#define B53_IG_MIR_CTL                 0x12
+#define  MIRROR_MASK                   0x1ff
+#define  DIV_EN                                BIT(13)
+#define  MIRROR_FILTER_MASK            0x3
+#define  MIRROR_FILTER_SHIFT           14
+#define  MIRROR_ALL                    0
+#define  MIRROR_DA                     1
+#define  MIRROR_SA                     2
+
+/* Ingress mirror divider register (16 bit) */
+#define B53_IG_MIR_DIV                 0x14
+#define  IN_MIRROR_DIV_MASK            0x3ff
+
+/* Ingress mirror MAC address register (48 bit) */
+#define B53_IG_MIR_MAC                 0x16
+
+/* Egress mirror control register (16 bit) */
+#define B53_EG_MIR_CTL                 0x1C
+
+/* Egress mirror divider register (16 bit) */
+#define B53_EG_MIR_DIV                 0x1E
+
+/* Egress mirror MAC address register (48 bit) */
+#define B53_EG_MIR_MAC                 0x20
+
 /* Device ID register (8 or 32 bit) */
 #define B53_DEVICE_ID                  0x30
 
index 2ce7ae97ac9148d39137eff66954dfc3b4cf239b..2be963252ca56a6caeb64434a694f08e1cc7719f 100644 (file)
@@ -61,30 +61,10 @@ static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
        }
 }
 
-static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
+static void bcm_sf2_brcm_hdr_setup(struct bcm_sf2_priv *priv, int port)
 {
-       struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
        u32 reg, val;
 
-       /* Enable the port memories */
-       reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
-       reg &= ~P_TXQ_PSM_VDD(port);
-       core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
-
-       /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
-       reg = core_readl(priv, CORE_IMP_CTL);
-       reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
-       reg &= ~(RX_DIS | TX_DIS);
-       core_writel(priv, reg, CORE_IMP_CTL);
-
-       /* Enable forwarding */
-       core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
-
-       /* Enable IMP port in dumb mode */
-       reg = core_readl(priv, CORE_SWITCH_CTRL);
-       reg |= MII_DUMB_FWDG_EN;
-       core_writel(priv, reg, CORE_SWITCH_CTRL);
-
        /* Resolve which bit controls the Broadcom tag */
        switch (port) {
        case 8:
@@ -119,11 +99,43 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
        reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
        reg &= ~(1 << port);
        core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
+}
+
+static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
+{
+       struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+       u32 reg, offset;
+
+       if (priv->type == BCM7445_DEVICE_ID)
+               offset = CORE_STS_OVERRIDE_IMP;
+       else
+               offset = CORE_STS_OVERRIDE_IMP2;
+
+       /* Enable the port memories */
+       reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
+       reg &= ~P_TXQ_PSM_VDD(port);
+       core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+
+       /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
+       reg = core_readl(priv, CORE_IMP_CTL);
+       reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
+       reg &= ~(RX_DIS | TX_DIS);
+       core_writel(priv, reg, CORE_IMP_CTL);
+
+       /* Enable forwarding */
+       core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
+
+       /* Enable IMP port in dumb mode */
+       reg = core_readl(priv, CORE_SWITCH_CTRL);
+       reg |= MII_DUMB_FWDG_EN;
+       core_writel(priv, reg, CORE_SWITCH_CTRL);
+
+       bcm_sf2_brcm_hdr_setup(priv, port);
 
        /* Force link status for IMP port */
-       reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
+       reg = core_readl(priv, offset);
        reg |= (MII_SW_OR | LINK_STS);
-       core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
+       core_writel(priv, reg, offset);
 }
 
 static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
@@ -217,6 +229,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
 {
        struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
        s8 cpu_port = ds->dst[ds->index].cpu_port;
+       unsigned int i;
        u32 reg;
 
        /* Clear the memory power down */
@@ -224,6 +237,18 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
        reg &= ~P_TXQ_PSM_VDD(port);
        core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
 
+       /* Enable Broadcom tags for that port if requested */
+       if (priv->brcm_tag_mask & BIT(port))
+               bcm_sf2_brcm_hdr_setup(priv, port);
+
+       /* Configure Traffic Class to QoS mapping, allow each priority to map
+        * to a different queue number
+        */
+       reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
+       for (i = 0; i < 8; i++)
+               reg |= i << (PRT_TO_QID_SHIFT * i);
+       core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
+
        /* Clear the Rx and Tx disable bits and set to no spanning tree */
        core_writel(priv, 0, CORE_G_PCTL_PORT(port));
 
@@ -503,6 +528,9 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
 
                if (mode == PHY_INTERFACE_MODE_MOCA)
                        priv->moca_port = port_num;
+
+               if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
+                       priv->brcm_tag_mask |= 1 << port_num;
        }
 }
 
@@ -591,7 +619,12 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
        struct ethtool_eee *p = &priv->port_sts[port].eee;
        u32 id_mode_dis = 0, port_mode;
        const char *str = NULL;
-       u32 reg;
+       u32 reg, offset;
+
+       if (priv->type == BCM7445_DEVICE_ID)
+               offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
+       else
+               offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
 
        switch (phydev->interface) {
        case PHY_INTERFACE_MODE_RGMII:
@@ -662,7 +695,7 @@ force_link:
        if (phydev->duplex == DUPLEX_FULL)
                reg |= DUPLX_MODE;
 
-       core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
+       core_writel(priv, reg, offset);
 
        if (!phydev->is_pseudo_fixed_link)
                p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
@@ -672,9 +705,14 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
                                         struct fixed_phy_status *status)
 {
        struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
-       u32 duplex, pause;
+       u32 duplex, pause, offset;
        u32 reg;
 
+       if (priv->type == BCM7445_DEVICE_ID)
+               offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
+       else
+               offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+
        duplex = core_readl(priv, CORE_DUPSTS);
        pause = core_readl(priv, CORE_PAUSESTS);
 
@@ -703,13 +741,13 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
                status->duplex = !!(duplex & (1 << port));
        }
 
-       reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
+       reg = core_readl(priv, offset);
        reg |= SW_OVERRIDE;
        if (status->link)
                reg |= LINK_STS;
        else
                reg &= ~LINK_STS;
-       core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
+       core_writel(priv, reg, offset);
 
        if ((pause & (1 << port)) &&
            (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
@@ -977,10 +1015,110 @@ static struct b53_io_ops bcm_sf2_io_ops = {
        .write64 = bcm_sf2_core_write64,
 };
 
+static const struct dsa_switch_ops bcm_sf2_ops = {
+       .get_tag_protocol       = bcm_sf2_sw_get_tag_protocol,
+       .setup                  = bcm_sf2_sw_setup,
+       .get_strings            = b53_get_strings,
+       .get_ethtool_stats      = b53_get_ethtool_stats,
+       .get_sset_count         = b53_get_sset_count,
+       .get_phy_flags          = bcm_sf2_sw_get_phy_flags,
+       .adjust_link            = bcm_sf2_sw_adjust_link,
+       .fixed_link_update      = bcm_sf2_sw_fixed_link_update,
+       .suspend                = bcm_sf2_sw_suspend,
+       .resume                 = bcm_sf2_sw_resume,
+       .get_wol                = bcm_sf2_sw_get_wol,
+       .set_wol                = bcm_sf2_sw_set_wol,
+       .port_enable            = bcm_sf2_port_setup,
+       .port_disable           = bcm_sf2_port_disable,
+       .get_eee                = bcm_sf2_sw_get_eee,
+       .set_eee                = bcm_sf2_sw_set_eee,
+       .port_bridge_join       = b53_br_join,
+       .port_bridge_leave      = b53_br_leave,
+       .port_stp_state_set     = b53_br_set_stp_state,
+       .port_fast_age          = b53_br_fast_age,
+       .port_vlan_filtering    = b53_vlan_filtering,
+       .port_vlan_prepare      = b53_vlan_prepare,
+       .port_vlan_add          = b53_vlan_add,
+       .port_vlan_del          = b53_vlan_del,
+       .port_vlan_dump         = b53_vlan_dump,
+       .port_fdb_prepare       = b53_fdb_prepare,
+       .port_fdb_dump          = b53_fdb_dump,
+       .port_fdb_add           = b53_fdb_add,
+       .port_fdb_del           = b53_fdb_del,
+       .get_rxnfc              = bcm_sf2_get_rxnfc,
+       .set_rxnfc              = bcm_sf2_set_rxnfc,
+       .port_mirror_add        = b53_mirror_add,
+       .port_mirror_del        = b53_mirror_del,
+};
+
+struct bcm_sf2_of_data {
+       u32 type;
+       const u16 *reg_offsets;
+       unsigned int core_reg_align;
+};
+
+/* Register offsets for the SWITCH_REG_* block */
+static const u16 bcm_sf2_7445_reg_offsets[] = {
+       [REG_SWITCH_CNTRL]      = 0x00,
+       [REG_SWITCH_STATUS]     = 0x04,
+       [REG_DIR_DATA_WRITE]    = 0x08,
+       [REG_DIR_DATA_READ]     = 0x0C,
+       [REG_SWITCH_REVISION]   = 0x18,
+       [REG_PHY_REVISION]      = 0x1C,
+       [REG_SPHY_CNTRL]        = 0x2C,
+       [REG_RGMII_0_CNTRL]     = 0x34,
+       [REG_RGMII_1_CNTRL]     = 0x40,
+       [REG_RGMII_2_CNTRL]     = 0x4c,
+       [REG_LED_0_CNTRL]       = 0x90,
+       [REG_LED_1_CNTRL]       = 0x94,
+       [REG_LED_2_CNTRL]       = 0x98,
+};
+
+static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
+       .type           = BCM7445_DEVICE_ID,
+       .core_reg_align = 0,
+       .reg_offsets    = bcm_sf2_7445_reg_offsets,
+};
+
+static const u16 bcm_sf2_7278_reg_offsets[] = {
+       [REG_SWITCH_CNTRL]      = 0x00,
+       [REG_SWITCH_STATUS]     = 0x04,
+       [REG_DIR_DATA_WRITE]    = 0x08,
+       [REG_DIR_DATA_READ]     = 0x0c,
+       [REG_SWITCH_REVISION]   = 0x10,
+       [REG_PHY_REVISION]      = 0x14,
+       [REG_SPHY_CNTRL]        = 0x24,
+       [REG_RGMII_0_CNTRL]     = 0xe0,
+       [REG_RGMII_1_CNTRL]     = 0xec,
+       [REG_RGMII_2_CNTRL]     = 0xf8,
+       [REG_LED_0_CNTRL]       = 0x40,
+       [REG_LED_1_CNTRL]       = 0x4c,
+       [REG_LED_2_CNTRL]       = 0x58,
+};
+
+static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
+       .type           = BCM7278_DEVICE_ID,
+       .core_reg_align = 1,
+       .reg_offsets    = bcm_sf2_7278_reg_offsets,
+};
+
+static const struct of_device_id bcm_sf2_of_match[] = {
+       { .compatible = "brcm,bcm7445-switch-v4.0",
+         .data = &bcm_sf2_7445_data
+       },
+       { .compatible = "brcm,bcm7278-switch-v4.0",
+         .data = &bcm_sf2_7278_data
+       },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
+
 static int bcm_sf2_sw_probe(struct platform_device *pdev)
 {
        const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
        struct device_node *dn = pdev->dev.of_node;
+       const struct of_device_id *of_id = NULL;
+       const struct bcm_sf2_of_data *data;
        struct b53_platform_data *pdata;
        struct dsa_switch_ops *ops;
        struct bcm_sf2_priv *priv;
@@ -1008,42 +1146,38 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
        if (!pdata)
                return -ENOMEM;
 
+       of_id = of_match_node(bcm_sf2_of_match, dn);
+       if (!of_id || !of_id->data)
+               return -EINVAL;
+
+       data = of_id->data;
+
+       /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
+       priv->type = data->type;
+       priv->reg_offsets = data->reg_offsets;
+       priv->core_reg_align = data->core_reg_align;
+
        /* Auto-detection using standard registers will not work, so
         * provide an indication of what kind of device we are for
         * b53_common to work with
         */
-       pdata->chip_id = BCM7445_DEVICE_ID;
+       pdata->chip_id = priv->type;
        dev->pdata = pdata;
 
        priv->dev = dev;
        ds = dev->ds;
-
-       /* Override the parts that are non-standard wrt. normal b53 devices */
-       memcpy(ops, ds->ops, sizeof(*ops));
-       ds->ops = ops;
-       ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol;
-       ds->ops->setup = bcm_sf2_sw_setup;
-       ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags;
-       ds->ops->adjust_link = bcm_sf2_sw_adjust_link;
-       ds->ops->fixed_link_update = bcm_sf2_sw_fixed_link_update;
-       ds->ops->suspend = bcm_sf2_sw_suspend;
-       ds->ops->resume = bcm_sf2_sw_resume;
-       ds->ops->get_wol = bcm_sf2_sw_get_wol;
-       ds->ops->set_wol = bcm_sf2_sw_set_wol;
-       ds->ops->port_enable = bcm_sf2_port_setup;
-       ds->ops->port_disable = bcm_sf2_port_disable;
-       ds->ops->get_eee = bcm_sf2_sw_get_eee;
-       ds->ops->set_eee = bcm_sf2_sw_set_eee;
-
-       /* Avoid having DSA free our slave MDIO bus (checking for
-        * ds->slave_mii_bus and ds->ops->phy_read being non-NULL)
-        */
-       ds->ops->phy_read = NULL;
+       ds->ops = &bcm_sf2_ops;
 
        dev_set_drvdata(&pdev->dev, priv);
 
        spin_lock_init(&priv->indir_lock);
        mutex_init(&priv->stats_mutex);
+       mutex_init(&priv->cfp.lock);
+
+       /* CFP rule #0 cannot be used for specific classifications, flag it as
+        * permanently used
+        */
+       set_bit(0, priv->cfp.used);
 
        bcm_sf2_identify_ports(priv, dn->child);
 
@@ -1073,6 +1207,12 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
                return ret;
        }
 
+       ret = bcm_sf2_cfp_rst(priv);
+       if (ret) {
+               pr_err("failed to reset CFP\n");
+               goto out_mdio;
+       }
+
        /* Disable all interrupts and request them */
        bcm_sf2_intr_disable(priv);
 
@@ -1179,11 +1319,6 @@ static int bcm_sf2_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
                         bcm_sf2_suspend, bcm_sf2_resume);
 
-static const struct of_device_id bcm_sf2_of_match[] = {
-       { .compatible = "brcm,bcm7445-switch-v4.0" },
-       { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
 
 static struct platform_driver bcm_sf2_driver = {
        .probe  = bcm_sf2_sw_probe,
index 44692673e1d5612588c957c82f2ad83b5ea5ff30..7d3030e04f1127bcfd8e98d32d0b0837093f52da 100644 (file)
@@ -52,6 +52,13 @@ struct bcm_sf2_port_status {
        struct ethtool_eee eee;
 };
 
+struct bcm_sf2_cfp_priv {
+       /* Mutex protecting concurrent accesses to the CFP registers */
+       struct mutex lock;
+       DECLARE_BITMAP(used, CFP_NUM_RULES);
+       unsigned int rules_cnt;
+};
+
 struct bcm_sf2_priv {
        /* Base registers, keep those in order with BCM_SF2_REGS_NAME */
        void __iomem                    *core;
@@ -61,6 +68,11 @@ struct bcm_sf2_priv {
        void __iomem                    *fcb;
        void __iomem                    *acb;
 
+       /* Register offsets indirection tables */
+       u32                             type;
+       const u16                       *reg_offsets;
+       unsigned int                    core_reg_align;
+
        /* spinlock protecting access to the indirect registers */
        spinlock_t                      indir_lock;
 
@@ -95,6 +107,12 @@ struct bcm_sf2_priv {
        struct device_node              *master_mii_dn;
        struct mii_bus                  *slave_mii_bus;
        struct mii_bus                  *master_mii_bus;
+
+       /* Bitmask of ports needing BRCM tags */
+       unsigned int                    brcm_tag_mask;
+
+       /* CFP rules context */
+       struct bcm_sf2_cfp_priv         cfp;
 };
 
 static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds)
@@ -104,6 +122,11 @@ static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds)
        return dev->priv;
 }
 
+static inline u32 bcm_sf2_mangle_addr(struct bcm_sf2_priv *priv, u32 off)
+{
+       return off << priv->core_reg_align;
+}
+
 #define SF2_IO_MACRO(name) \
 static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off)     \
 {                                                                      \
@@ -125,7 +148,7 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off)  \
 {                                                                      \
        u32 indir, dir;                                                 \
        spin_lock(&priv->indir_lock);                                   \
-       dir = __raw_readl(priv->name + off);                            \
+       dir = name##_readl(priv, off);                                  \
        indir = reg_readl(priv, REG_DIR_DATA_READ);                     \
        spin_unlock(&priv->indir_lock);                                 \
        return (u64)indir << 32 | dir;                                  \
@@ -135,7 +158,7 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val,        \
 {                                                                      \
        spin_lock(&priv->indir_lock);                                   \
        reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE);       \
-       __raw_writel(lower_32_bits(val), priv->name + off);             \
+       name##_writel(priv, lower_32_bits(val), off);                   \
        spin_unlock(&priv->indir_lock);                                 \
 }
 
@@ -153,8 +176,28 @@ static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
        priv->irq##which##_mask |= (mask);                              \
 }                                                                      \
 
-SF2_IO_MACRO(core);
-SF2_IO_MACRO(reg);
+static inline u32 core_readl(struct bcm_sf2_priv *priv, u32 off)
+{
+       u32 tmp = bcm_sf2_mangle_addr(priv, off);
+       return __raw_readl(priv->core + tmp);
+}
+
+static inline void core_writel(struct bcm_sf2_priv *priv, u32 val, u32 off)
+{
+       u32 tmp = bcm_sf2_mangle_addr(priv, off);
+       __raw_writel(val, priv->core + tmp);
+}
+
+static inline u32 reg_readl(struct bcm_sf2_priv *priv, u16 off)
+{
+       return __raw_readl(priv->reg + priv->reg_offsets[off]);
+}
+
+static inline void reg_writel(struct bcm_sf2_priv *priv, u32 val, u16 off)
+{
+       __raw_writel(val, priv->reg + priv->reg_offsets[off]);
+}
+
 SF2_IO64_MACRO(core);
 SF2_IO_MACRO(intrl2_0);
 SF2_IO_MACRO(intrl2_1);
@@ -164,4 +207,11 @@ SF2_IO_MACRO(acb);
 SWITCH_INTR_L2(0);
 SWITCH_INTR_L2(1);
 
+/* RXNFC */
+int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
+                     struct ethtool_rxnfc *nfc, u32 *rule_locs);
+int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
+                     struct ethtool_rxnfc *nfc);
+int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv);
+
 #endif /* __BCM_SF2_H */
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
new file mode 100644 (file)
index 0000000..346dd9a
--- /dev/null
@@ -0,0 +1,613 @@
+/*
+ * Broadcom Starfighter 2 DSA switch CFP support
+ *
+ * Copyright (C) 2016, Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <net/dsa.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/bitmap.h>
+
+#include "bcm_sf2.h"
+#include "bcm_sf2_regs.h"
+
+struct cfp_udf_layout {
+       u8 slices[UDF_NUM_SLICES];
+       u32 mask_value;
+
+};
+
+/* UDF slices layout for a TCPv4/UDPv4 specification */
+static const struct cfp_udf_layout udf_tcpip4_layout = {
+       .slices = {
+               /* End of L2, byte offset 12, src IP[0:15] */
+               CFG_UDF_EOL2 | 6,
+               /* End of L2, byte offset 14, src IP[16:31] */
+               CFG_UDF_EOL2 | 7,
+               /* End of L2, byte offset 16, dst IP[0:15] */
+               CFG_UDF_EOL2 | 8,
+               /* End of L2, byte offset 18, dst IP[16:31] */
+               CFG_UDF_EOL2 | 9,
+               /* End of L3, byte offset 0, src port */
+               CFG_UDF_EOL3 | 0,
+               /* End of L3, byte offset 2, dst port */
+               CFG_UDF_EOL3 | 1,
+               0, 0, 0
+       },
+       .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
+};
+
+static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
+{
+       unsigned int i, count = 0;
+
+       for (i = 0; i < UDF_NUM_SLICES; i++) {
+               if (layout[i] != 0)
+                       count++;
+       }
+
+       return count;
+}
+
+static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
+                               unsigned int slice_num,
+                               const u8 *layout)
+{
+       u32 offset = CORE_UDF_0_A_0_8_PORT_0 + slice_num * UDF_SLICE_OFFSET;
+       unsigned int i;
+
+       for (i = 0; i < UDF_NUM_SLICES; i++)
+               core_writel(priv, layout[i], offset + i * 4);
+}
+
+static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
+{
+       unsigned int timeout = 1000;
+       u32 reg;
+
+       reg = core_readl(priv, CORE_CFP_ACC);
+       reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
+       reg |= OP_STR_DONE | op;
+       core_writel(priv, reg, CORE_CFP_ACC);
+
+       do {
+               reg = core_readl(priv, CORE_CFP_ACC);
+               if (!(reg & OP_STR_DONE))
+                       break;
+
+               cpu_relax();
+       } while (timeout--);
+
+       if (!timeout)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
+static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
+                                            unsigned int addr)
+{
+       u32 reg;
+
+       WARN_ON(addr >= CFP_NUM_RULES);
+
+       reg = core_readl(priv, CORE_CFP_ACC);
+       reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
+       reg |= addr << XCESS_ADDR_SHIFT;
+       core_writel(priv, reg, CORE_CFP_ACC);
+}
+
+static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
+{
+       /* Entry #0 is reserved */
+       return CFP_NUM_RULES - 1;
+}
+
+static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
+                               struct ethtool_rx_flow_spec *fs)
+{
+       struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+       struct ethtool_tcpip4_spec *v4_spec;
+       const struct cfp_udf_layout *layout;
+       unsigned int slice_num, rule_index;
+       unsigned int queue_num, port_num;
+       u8 ip_proto, ip_frag;
+       u8 num_udf;
+       u32 reg;
+       int ret;
+
+       /* Check for unsupported extensions */
+       if ((fs->flow_type & FLOW_EXT) &&
+           (fs->m_ext.vlan_etype || fs->m_ext.data[1]))
+               return -EINVAL;
+
+       if (fs->location != RX_CLS_LOC_ANY &&
+           test_bit(fs->location, priv->cfp.used))
+               return -EBUSY;
+
+       if (fs->location != RX_CLS_LOC_ANY &&
+           fs->location > bcm_sf2_cfp_rule_size(priv))
+               return -EINVAL;
+
+       ip_frag = be32_to_cpu(fs->m_ext.data[0]);
+
+       /* We do not support discarding packets, check that the
+        * destination port is enabled and that we are within the
+        * number of ports supported by the switch
+        */
+       port_num = fs->ring_cookie / 8;
+
+       if (fs->ring_cookie == RX_CLS_FLOW_DISC ||
+           !(BIT(port_num) & ds->enabled_port_mask) ||
+           port_num >= priv->hw_params.num_ports)
+               return -EINVAL;
+
+       switch (fs->flow_type & ~FLOW_EXT) {
+       case TCP_V4_FLOW:
+               ip_proto = IPPROTO_TCP;
+               v4_spec = &fs->h_u.tcp_ip4_spec;
+               break;
+       case UDP_V4_FLOW:
+               ip_proto = IPPROTO_UDP;
+               v4_spec = &fs->h_u.udp_ip4_spec;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* We only use one UDF slice for now */
+       slice_num = 1;
+       layout = &udf_tcpip4_layout;
+       num_udf = bcm_sf2_get_num_udf_slices(layout->slices);
+
+       /* Apply the UDF layout for this filter */
+       bcm_sf2_cfp_udf_set(priv, slice_num, layout->slices);
+
+       /* Apply to all packets received through this port */
+       core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
+
+       /* S-Tag status         [31:30]
+        * C-Tag status         [29:28]
+        * L2 framing           [27:26]
+        * L3 framing           [25:24]
+        * IP ToS               [23:16]
+        * IP proto             [15:08]
+        * IP Fragm             [7]
+        * Non 1st frag         [6]
+        * IP Authen            [5]
+        * TTL range            [4:3]
+        * PPPoE session        [2]
+        * Reserved             [1]
+        * UDF_Valid[8]         [0]
+        */
+       core_writel(priv, v4_spec->tos << 16 | ip_proto << 8 | ip_frag << 7,
+                   CORE_CFP_DATA_PORT(6));
+
+       /* UDF_Valid[7:0]       [31:24]
+        * S-Tag                [23:8]
+        * C-Tag                [7:0]
+        */
+       core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_DATA_PORT(5));
+
+       /* C-Tag                [31:24]
+        * UDF_n_A8             [23:8]
+        * UDF_n_A7             [7:0]
+        */
+       core_writel(priv, 0, CORE_CFP_DATA_PORT(4));
+
+       /* UDF_n_A7             [31:24]
+        * UDF_n_A6             [23:8]
+        * UDF_n_A5             [7:0]
+        */
+       core_writel(priv, be16_to_cpu(v4_spec->pdst) >> 8,
+                   CORE_CFP_DATA_PORT(3));
+
+       /* UDF_n_A5             [31:24]
+        * UDF_n_A4             [23:8]
+        * UDF_n_A3             [7:0]
+        */
+       reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
+             (u32)be16_to_cpu(v4_spec->psrc) << 8 |
+             (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
+       core_writel(priv, reg, CORE_CFP_DATA_PORT(2));
+
+       /* UDF_n_A3             [31:24]
+        * UDF_n_A2             [23:8]
+        * UDF_n_A1             [7:0]
+        */
+       reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
+             (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
+             (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
+       core_writel(priv, reg, CORE_CFP_DATA_PORT(1));
+
+       /* UDF_n_A1             [31:24]
+        * UDF_n_A0             [23:8]
+        * Reserved             [7:4]
+        * Slice ID             [3:2]
+        * Slice valid          [1:0]
+        */
+       reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
+             (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
+             SLICE_NUM(slice_num) | SLICE_VALID;
+       core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
+
+       /* Source port map match */
+       core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
+
+       /* Mask with the specific layout for IPv4 packets */
+       core_writel(priv, layout->mask_value, CORE_CFP_MASK_PORT(6));
+
+       /* Mask all but valid UDFs */
+       core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_MASK_PORT(5));
+
+       /* Mask all */
+       core_writel(priv, 0, CORE_CFP_MASK_PORT(4));
+
+       /* All other UDFs should be matched with the filter */
+       core_writel(priv, 0xff, CORE_CFP_MASK_PORT(3));
+       core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(2));
+       core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(1));
+       core_writel(priv, 0xffffff0f, CORE_CFP_MASK_PORT(0));
+
+       /* Locate the first rule available */
+       if (fs->location == RX_CLS_LOC_ANY)
+               rule_index = find_first_zero_bit(priv->cfp.used,
+                                                bcm_sf2_cfp_rule_size(priv));
+       else
+               rule_index = fs->location;
+
+       /* Insert into TCAM now */
+       bcm_sf2_cfp_rule_addr_set(priv, rule_index);
+
+       ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
+       if (ret) {
+               pr_err("TCAM entry at addr %d failed\n", rule_index);
+               return ret;
+       }
+
+       /* Replace ARL derived destination with DST_MAP derived, define
+        * which port and queue this should be forwarded to.
+        *
+        * We have a small oddity where Port 6 just does not have a
+        * valid bit here (so we subtract by one).
+        */
+       queue_num = fs->ring_cookie % 8;
+       if (port_num >= 7)
+               port_num -= 1;
+
+       reg = CHANGE_FWRD_MAP_IB_REP_ARL | BIT(port_num + DST_MAP_IB_SHIFT) |
+               CHANGE_TC | queue_num << NEW_TC_SHIFT;
+
+       core_writel(priv, reg, CORE_ACT_POL_DATA0);
+
+       /* Set classification ID that needs to be put in Broadcom tag */
+       core_writel(priv, rule_index << CHAIN_ID_SHIFT,
+                   CORE_ACT_POL_DATA1);
+
+       core_writel(priv, 0, CORE_ACT_POL_DATA2);
+
+       /* Configure policer RAM now */
+       ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
+       if (ret) {
+               pr_err("Policer entry at %d failed\n", rule_index);
+               return ret;
+       }
+
+       /* Disable the policer */
+       core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
+
+       /* Now the rate meter */
+       ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
+       if (ret) {
+               pr_err("Meter entry at %d failed\n", rule_index);
+               return ret;
+       }
+
+       /* Turn on CFP for this rule now */
+       reg = core_readl(priv, CORE_CFP_CTL_REG);
+       reg |= BIT(port);
+       core_writel(priv, reg, CORE_CFP_CTL_REG);
+
+       /* Flag the rule as being used and return it */
+       set_bit(rule_index, priv->cfp.used);
+       fs->location = rule_index;
+
+       return 0;
+}
+
+static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
+                               u32 loc)
+{
+       int ret;
+       u32 reg;
+
+       /* Refuse deletion of unused rules, and the default reserved rule */
+       if (!test_bit(loc, priv->cfp.used) || loc == 0)
+               return -EINVAL;
+
+       /* Indicate which rule we want to read */
+       bcm_sf2_cfp_rule_addr_set(priv, loc);
+
+       ret =  bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
+       if (ret)
+               return ret;
+
+       /* Clear its valid bits */
+       reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
+       reg &= ~SLICE_VALID;
+       core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
+
+       /* Write back this entry into the TCAM now */
+       ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
+       if (ret)
+               return ret;
+
+       clear_bit(loc, priv->cfp.used);
+
+       return 0;
+}
+
+static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
+{
+       unsigned int i;
+
+       for (i = 0; i < sizeof(flow->m_u); i++)
+               flow->m_u.hdata[i] ^= 0xff;
+
+       flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
+       flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
+       flow->m_ext.data[0] ^= cpu_to_be32(~0);
+       flow->m_ext.data[1] ^= cpu_to_be32(~0);
+}
+
+static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
+                               struct ethtool_rxnfc *nfc, bool search)
+{
+       struct ethtool_tcpip4_spec *v4_spec;
+       unsigned int queue_num;
+       u16 src_dst_port;
+       u32 reg, ipv4;
+       int ret;
+
+       if (!search) {
+               bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
+
+               ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
+               if (ret)
+                       return ret;
+
+               reg = core_readl(priv, CORE_ACT_POL_DATA0);
+
+               ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
+               if (ret)
+                       return ret;
+       } else {
+               reg = core_readl(priv, CORE_ACT_POL_DATA0);
+       }
+
+       /* Extract the destination port */
+       nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
+                                 DST_MAP_IB_MASK) - 1;
+
+       /* There is no Port 6, so we compensate for that here */
+       if (nfc->fs.ring_cookie >= 6)
+               nfc->fs.ring_cookie++;
+       nfc->fs.ring_cookie *= 8;
+
+       /* Extract the destination queue */
+       queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
+       nfc->fs.ring_cookie += queue_num;
+
+       /* Extract the IP protocol */
+       reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
+       switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
+       case IPPROTO_TCP:
+               nfc->fs.flow_type = TCP_V4_FLOW;
+               v4_spec = &nfc->fs.h_u.tcp_ip4_spec;
+               break;
+       case IPPROTO_UDP:
+               nfc->fs.flow_type = UDP_V4_FLOW;
+               v4_spec = &nfc->fs.h_u.udp_ip4_spec;
+               break;
+       default:
+               /* Clear to exit the search process */
+               if (search)
+                       core_readl(priv, CORE_CFP_DATA_PORT(7));
+               return -EINVAL;
+       }
+
+       v4_spec->tos = (reg >> 16) & IPPROTO_MASK;
+       nfc->fs.m_ext.data[0] = cpu_to_be32((reg >> 7) & 1);
+
+       reg = core_readl(priv, CORE_CFP_DATA_PORT(3));
+       /* src port [15:8] */
+       src_dst_port = reg << 8;
+
+       reg = core_readl(priv, CORE_CFP_DATA_PORT(2));
+       /* src port [7:0] */
+       src_dst_port |= (reg >> 24);
+
+       v4_spec->pdst = cpu_to_be16(src_dst_port);
+       nfc->fs.m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+       v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
+       nfc->fs.m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+
+       /* IPv4 dst [15:8] */
+       ipv4 = (reg & 0xff) << 8;
+       reg = core_readl(priv, CORE_CFP_DATA_PORT(1));
+       /* IPv4 dst [31:16] */
+       ipv4 |= ((reg >> 8) & 0xffff) << 16;
+       /* IPv4 dst [7:0] */
+       ipv4 |= (reg >> 24) & 0xff;
+       v4_spec->ip4dst = cpu_to_be32(ipv4);
+       nfc->fs.m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+
+       /* IPv4 src [15:8] */
+       ipv4 = (reg & 0xff) << 8;
+       reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
+
+       if (!(reg & SLICE_VALID))
+               return -EINVAL;
+
+       /* IPv4 src [7:0] */
+       ipv4 |= (reg >> 24) & 0xff;
+       /* IPv4 src [31:16] */
+       ipv4 |= ((reg >> 8) & 0xffff) << 16;
+       v4_spec->ip4src = cpu_to_be32(ipv4);
+       nfc->fs.m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+
+       /* Read last to avoid next entry clobbering the results during search
+        * operations
+        */
+       reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
+       if (!(reg & 1 << port))
+               return -EINVAL;
+
+       bcm_sf2_invert_masks(&nfc->fs);
+
+       /* Put the TCAM size here */
+       nfc->data = bcm_sf2_cfp_rule_size(priv);
+
+       return 0;
+}
+
+/* We implement the search doing a TCAM search operation */
+static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
+                                   int port, struct ethtool_rxnfc *nfc,
+                                   u32 *rule_locs)
+{
+       unsigned int index = 1, rules_cnt = 0;
+       int ret;
+       u32 reg;
+
+       /* Do not poll on OP_STR_DONE to be self-clearing for search
+        * operations, we cannot use bcm_sf2_cfp_op here because it completes
+        * on clearing OP_STR_DONE which won't clear until the entire search
+        * operation is over.
+        */
+       reg = core_readl(priv, CORE_CFP_ACC);
+       reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
+       reg |= index << XCESS_ADDR_SHIFT;
+       reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
+       reg |= OP_SEL_SEARCH | TCAM_SEL | OP_STR_DONE;
+       core_writel(priv, reg, CORE_CFP_ACC);
+
+       do {
+               /* Wait for results to be ready */
+               reg = core_readl(priv, CORE_CFP_ACC);
+
+               /* Extract the address we are searching */
+               index = reg >> XCESS_ADDR_SHIFT;
+               index &= XCESS_ADDR_MASK;
+
+               /* We have a valid search result, so flag it accordingly */
+               if (reg & SEARCH_STS) {
+                       ret = bcm_sf2_cfp_rule_get(priv, port, nfc, true);
+                       if (ret)
+                               continue;
+
+                       rule_locs[rules_cnt] = index;
+                       rules_cnt++;
+               }
+
+               /* Search is over break out */
+               if (!(reg & OP_STR_DONE))
+                       break;
+
+       } while (index < CFP_NUM_RULES);
+
+       /* Put the TCAM size here */
+       nfc->data = bcm_sf2_cfp_rule_size(priv);
+       nfc->rule_cnt = rules_cnt;
+
+       return 0;
+}
+
+int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
+                     struct ethtool_rxnfc *nfc, u32 *rule_locs)
+{
+       struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+       int ret = 0;
+
+       mutex_lock(&priv->cfp.lock);
+
+       switch (nfc->cmd) {
+       case ETHTOOL_GRXCLSRLCNT:
+               /* Subtract the default, unusable rule */
+               nfc->rule_cnt = bitmap_weight(priv->cfp.used,
+                                             CFP_NUM_RULES) - 1;
+               /* We support specifying rule locations */
+               nfc->data |= RX_CLS_LOC_SPECIAL;
+               break;
+       case ETHTOOL_GRXCLSRULE:
+               ret = bcm_sf2_cfp_rule_get(priv, port, nfc, false);
+               break;
+       case ETHTOOL_GRXCLSRLALL:
+               ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       mutex_unlock(&priv->cfp.lock);
+
+       return ret;
+}
+
+int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
+                     struct ethtool_rxnfc *nfc)
+{
+       struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+       int ret = 0;
+
+       mutex_lock(&priv->cfp.lock);
+
+       switch (nfc->cmd) {
+       case ETHTOOL_SRXCLSRLINS:
+               ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
+               break;
+
+       case ETHTOOL_SRXCLSRLDEL:
+               ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       mutex_unlock(&priv->cfp.lock);
+
+       return ret;
+}
+
+int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
+{
+       unsigned int timeout = 1000;
+       u32 reg;
+
+       reg = core_readl(priv, CORE_CFP_ACC);
+       reg |= TCAM_RESET;
+       core_writel(priv, reg, CORE_CFP_ACC);
+
+       do {
+               reg = core_readl(priv, CORE_CFP_ACC);
+               if (!(reg & TCAM_RESET))
+                       break;
+
+               cpu_relax();
+       } while (timeout--);
+
+       if (!timeout)
+               return -ETIMEDOUT;
+
+       return 0;
+}
index 838fe373cd6f73c61d98ceaab92bacc77af0d97a..26052450091ecf33111e587bbd86c4f629ea9f56 100644 (file)
 #define __BCM_SF2_REGS_H
 
 /* Register set relative to 'REG' */
-#define REG_SWITCH_CNTRL               0x00
-#define  MDIO_MASTER_SEL               (1 << 0)
 
-#define REG_SWITCH_STATUS              0x04
-#define REG_DIR_DATA_WRITE             0x08
-#define REG_DIR_DATA_READ              0x0C
+enum bcm_sf2_reg_offs {
+       REG_SWITCH_CNTRL = 0,
+       REG_SWITCH_STATUS,
+       REG_DIR_DATA_WRITE,
+       REG_DIR_DATA_READ,
+       REG_SWITCH_REVISION,
+       REG_PHY_REVISION,
+       REG_SPHY_CNTRL,
+       REG_RGMII_0_CNTRL,
+       REG_RGMII_1_CNTRL,
+       REG_RGMII_2_CNTRL,
+       REG_LED_0_CNTRL,
+       REG_LED_1_CNTRL,
+       REG_LED_2_CNTRL,
+       REG_SWITCH_REG_MAX,
+};
+
+/* Relative to REG_SWITCH_CNTRL */
+#define  MDIO_MASTER_SEL               (1 << 0)
 
-#define REG_SWITCH_REVISION            0x18
+/* Relative to REG_SWITCH_REVISION */
 #define  SF2_REV_MASK                  0xffff
 #define  SWITCH_TOP_REV_SHIFT          16
 #define  SWITCH_TOP_REV_MASK           0xffff
 
-#define REG_PHY_REVISION               0x1C
+/* Relative to REG_PHY_REVISION */
 #define  PHY_REVISION_MASK             0xffff
 
-#define REG_SPHY_CNTRL                 0x2C
+/* Relative to REG_SPHY_CNTRL */
 #define  IDDQ_BIAS                     (1 << 0)
 #define  EXT_PWR_DOWN                  (1 << 1)
 #define  FORCE_DLL_EN                  (1 << 2)
 #define  PHY_PHYAD_SHIFT               8
 #define  PHY_PHYAD_MASK                        0x1F
 
-#define REG_RGMII_0_BASE               0x34
-#define REG_RGMII_CNTRL                        0x00
-#define REG_RGMII_IB_STATUS            0x04
-#define REG_RGMII_RX_CLOCK_DELAY_CNTRL 0x08
-#define REG_RGMII_CNTRL_SIZE           0x0C
-#define REG_RGMII_CNTRL_P(x)           (REG_RGMII_0_BASE + \
-                                       ((x) * REG_RGMII_CNTRL_SIZE))
+#define REG_RGMII_CNTRL_P(x)           (REG_RGMII_0_CNTRL + (x))
+
 /* Relative to REG_RGMII_CNTRL */
 #define  RGMII_MODE_EN                 (1 << 0)
 #define  ID_MODE_DIS                   (1 << 1)
@@ -61,8 +70,8 @@
 #define  LPI_COUNT_SHIFT               9
 #define  LPI_COUNT_MASK                        0x3F
 
-#define REG_LED_CNTRL_BASE             0x90
-#define REG_LED_CNTRL(x)               (REG_LED_CNTRL_BASE + (x) * 4)
+#define REG_LED_CNTRL(x)               (REG_LED_0_CNTRL + (x))
+
 #define  SPDLNK_SRC_SEL                        (1 << 24)
 
 /* Register set relative to 'INTRL2_0' and 'INTRL2_1' */
 #define  GMII_SPEED_UP_2G              (1 << 6)
 #define  MII_SW_OR                     (1 << 7)
 
+/* Alternate layout for e.g: 7278 */
+#define CORE_STS_OVERRIDE_IMP2         0x39040
+
 #define CORE_NEW_CTRL                  0x00084
 #define  IP_MC                         (1 << 0)
 #define  OUTRANGEERR_DISCARD           (1 << 1)
 #define  SW_LEARN_CNTL(x)              (1 << (x))
 
 #define CORE_STS_OVERRIDE_GMIIP_PORT(x)        (0x160 + (x) * 4)
+#define CORE_STS_OVERRIDE_GMIIP2_PORT(x) (0x39000 + (x) * 8)
 #define  LINK_STS                      (1 << 0)
 #define  DUPLX_MODE                    (1 << 1)
 #define  SPEED_SHIFT                   2
 #define  P_TXQ_PSM_VDD(x)              (P_TXQ_PSM_VDD_MASK << \
                                        ((x) * P_TXQ_PSM_VDD_SHIFT))
 
+#define CORE_PORT_TC2_QOS_MAP_PORT(x)  (0xc1c0 + ((x) * 0x10))
+#define  PRT_TO_QID_MASK               0x3
+#define  PRT_TO_QID_SHIFT              3
+
 #define CORE_PORT_VLAN_CTL_PORT(x)     (0xc400 + ((x) * 0x8))
 #define  PORT_VLAN_CTRL_MASK           0x1ff
 
 #define CORE_EEE_EN_CTRL               0x24800
 #define CORE_EEE_LPI_INDICATE          0x24810
 
+#define CORE_CFP_ACC                   0x28000
+#define  OP_STR_DONE                   (1 << 0)
+#define  OP_SEL_SHIFT                  1
+#define  OP_SEL_READ                   (1 << OP_SEL_SHIFT)
+#define  OP_SEL_WRITE                  (2 << OP_SEL_SHIFT)
+#define  OP_SEL_SEARCH                 (4 << OP_SEL_SHIFT)
+#define  OP_SEL_MASK                   (7 << OP_SEL_SHIFT)
+#define  CFP_RAM_CLEAR                 (1 << 4)
+#define  RAM_SEL_SHIFT                 10
+#define  TCAM_SEL                      (1 << RAM_SEL_SHIFT)
+#define  ACT_POL_RAM                   (2 << RAM_SEL_SHIFT)
+#define  RATE_METER_RAM                        (4 << RAM_SEL_SHIFT)
+#define  GREEN_STAT_RAM                        (8 << RAM_SEL_SHIFT)
+#define  YELLOW_STAT_RAM               (16 << RAM_SEL_SHIFT)
+#define  RED_STAT_RAM                  (24 << RAM_SEL_SHIFT)
+#define  RAM_SEL_MASK                  (0x1f << RAM_SEL_SHIFT)
+#define  TCAM_RESET                    (1 << 15)
+#define  XCESS_ADDR_SHIFT              16
+#define  XCESS_ADDR_MASK               0xff
+#define  SEARCH_STS                    (1 << 27)
+#define  RD_STS_SHIFT                  28
+#define  RD_STS_TCAM                   (1 << RD_STS_SHIFT)
+#define  RD_STS_ACT_POL_RAM            (2 << RD_STS_SHIFT)
+#define  RD_STS_RATE_METER_RAM         (4 << RD_STS_SHIFT)
+#define  RD_STS_STAT_RAM               (8 << RD_STS_SHIFT)
+
+#define CORE_CFP_RATE_METER_GLOBAL_CTL 0x28010
+
+#define CORE_CFP_DATA_PORT_0           0x28040
+#define CORE_CFP_DATA_PORT(x)          (CORE_CFP_DATA_PORT_0 + \
+                                       (x) * 0x10)
+
+/* UDF_DATA7 */
+#define L3_FRAMING_SHIFT               24
+#define L3_FRAMING_MASK                        (0x3 << L3_FRAMING_SHIFT)
+#define IPPROTO_SHIFT                  8
+#define IPPROTO_MASK                   (0xff << IPPROTO_SHIFT)
+#define IP_FRAG                                (1 << 7)
+
+/* UDF_DATA0 */
+#define  SLICE_VALID                   3
+#define  SLICE_NUM_SHIFT               2
+#define  SLICE_NUM(x)                  ((x) << SLICE_NUM_SHIFT)
+
+#define CORE_CFP_MASK_PORT_0           0x280c0
+
+#define CORE_CFP_MASK_PORT(x)          (CORE_CFP_MASK_PORT_0 + \
+                                       (x) * 0x10)
+
+#define CORE_ACT_POL_DATA0             0x28140
+#define  VLAN_BYP                      (1 << 0)
+#define  EAP_BYP                       (1 << 1)
+#define  STP_BYP                       (1 << 2)
+#define  REASON_CODE_SHIFT             3
+#define  REASON_CODE_MASK              0x3f
+#define  LOOP_BK_EN                    (1 << 9)
+#define  NEW_TC_SHIFT                  10
+#define  NEW_TC_MASK                   0x7
+#define  CHANGE_TC                     (1 << 13)
+#define  DST_MAP_IB_SHIFT              14
+#define  DST_MAP_IB_MASK               0x1ff
+#define  CHANGE_FWRD_MAP_IB_SHIFT      24
+#define  CHANGE_FWRD_MAP_IB_MASK       0x3
+#define  CHANGE_FWRD_MAP_IB_NO_DEST    (0 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define  CHANGE_FWRD_MAP_IB_REM_ARL    (1 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define  CHANGE_FWRD_MAP_IB_REP_ARL    (2 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define  CHANGE_FWRD_MAP_IB_ADD_DST    (3 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define  NEW_DSCP_IB_SHIFT             26
+#define  NEW_DSCP_IB_MASK              0x3f
+
+#define CORE_ACT_POL_DATA1             0x28150
+#define  CHANGE_DSCP_IB                        (1 << 0)
+#define  DST_MAP_OB_SHIFT              1
+#define  DST_MAP_OB_MASK               0x3ff
+#define  CHANGE_FWRD_MAP_OB_SHIT       11
+#define  CHANGE_FWRD_MAP_OB_MASK       0x3
+#define  NEW_DSCP_OB_SHIFT             13
+#define  NEW_DSCP_OB_MASK              0x3f
+#define  CHANGE_DSCP_OB                        (1 << 19)
+#define  CHAIN_ID_SHIFT                        20
+#define  CHAIN_ID_MASK                 0xff
+#define  CHANGE_COLOR                  (1 << 28)
+#define  NEW_COLOR_SHIFT               29
+#define  NEW_COLOR_MASK                        0x3
+#define  NEW_COLOR_GREEN               (0 << NEW_COLOR_SHIFT)
+#define  NEW_COLOR_YELLOW              (1 << NEW_COLOR_SHIFT)
+#define  NEW_COLOR_RED                 (2 << NEW_COLOR_SHIFT)
+#define  RED_DEFAULT                   (1 << 31)
+
+#define CORE_ACT_POL_DATA2             0x28160
+#define  MAC_LIMIT_BYPASS              (1 << 0)
+#define  CHANGE_TC_O                   (1 << 1)
+#define  NEW_TC_O_SHIFT                        2
+#define  NEW_TC_O_MASK                 0x7
+#define  SPCP_RMK_DISABLE              (1 << 5)
+#define  CPCP_RMK_DISABLE              (1 << 6)
+#define  DEI_RMK_DISABLE               (1 << 7)
+
+#define CORE_RATE_METER0               0x28180
+#define  COLOR_MODE                    (1 << 0)
+#define  POLICER_ACTION                        (1 << 1)
+#define  COUPLING_FLAG                 (1 << 2)
+#define  POLICER_MODE_SHIFT            3
+#define  POLICER_MODE_MASK             0x3
+#define  POLICER_MODE_RFC2698          (0 << POLICER_MODE_SHIFT)
+#define  POLICER_MODE_RFC4115          (1 << POLICER_MODE_SHIFT)
+#define  POLICER_MODE_MEF              (2 << POLICER_MODE_SHIFT)
+#define  POLICER_MODE_DISABLE          (3 << POLICER_MODE_SHIFT)
+
+#define CORE_RATE_METER1               0x28190
+#define  EIR_TK_BKT_MASK               0x7fffff
+
+#define CORE_RATE_METER2               0x281a0
+#define  EIR_BKT_SIZE_MASK             0xfffff
+
+#define CORE_RATE_METER3               0x281b0
+#define  EIR_REF_CNT_MASK              0x7ffff
+
+#define CORE_RATE_METER4               0x281c0
+#define  CIR_TK_BKT_MASK               0x7fffff
+
+#define CORE_RATE_METER5               0x281d0
+#define  CIR_BKT_SIZE_MASK             0xfffff
+
+#define CORE_RATE_METER6               0x281e0
+#define  CIR_REF_CNT_MASK              0x7ffff
+
+#define CORE_CFP_CTL_REG               0x28400
+#define  CFP_EN_MAP_MASK               0x1ff
+
+/* IPv4 slices, 3 of them */
+#define CORE_UDF_0_A_0_8_PORT_0                0x28440
+#define  CFG_UDF_OFFSET_MASK           0x1f
+#define  CFG_UDF_OFFSET_BASE_SHIFT     5
+#define  CFG_UDF_SOF                   (0 << CFG_UDF_OFFSET_BASE_SHIFT)
+#define  CFG_UDF_EOL2                  (2 << CFG_UDF_OFFSET_BASE_SHIFT)
+#define  CFG_UDF_EOL3                  (3 << CFG_UDF_OFFSET_BASE_SHIFT)
+
+/* Number of slices for IPv4, IPv6 and non-IP */
+#define UDF_NUM_SLICES                 9
+
+/* Spacing between different slices */
+#define UDF_SLICE_OFFSET               0x40
+
+#define CFP_NUM_RULES                  256
+
 #endif /* __BCM_SF2_REGS_H */
index 7ce36dbd9b624ef0846c26ef8c1a19da299688da..5934b7a4c448e9654c1a8ea5ef147756d5d56fe1 100644 (file)
@@ -252,7 +252,7 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
        return reg_write(ds, addr, regnum, val);
 }
 
-static struct dsa_switch_ops mv88e6060_switch_ops = {
+static const struct dsa_switch_ops mv88e6060_switch_ops = {
        .get_tag_protocol = mv88e6060_get_tag_protocol,
        .probe          = mv88e6060_drv_probe,
        .setup          = mv88e6060_setup,
@@ -261,16 +261,20 @@ static struct dsa_switch_ops mv88e6060_switch_ops = {
        .phy_write      = mv88e6060_phy_write,
 };
 
+static struct dsa_switch_driver mv88e6060_switch_drv = {
+       .ops            = &mv88e6060_switch_ops,
+};
+
 static int __init mv88e6060_init(void)
 {
-       register_switch_driver(&mv88e6060_switch_ops);
+       register_switch_driver(&mv88e6060_switch_drv);
        return 0;
 }
 module_init(mv88e6060_init);
 
 static void __exit mv88e6060_cleanup(void)
 {
-       unregister_switch_driver(&mv88e6060_switch_ops);
+       unregister_switch_driver(&mv88e6060_switch_drv);
 }
 module_exit(mv88e6060_cleanup);
 
index f7222dc6581de1e75d439550520f8108b1d2744e..03dc886ed3d6be1747d5cef7616f2eb3074a5492 100644 (file)
@@ -222,26 +222,62 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
        return 0;
 }
 
+static int mv88e6165_phy_read(struct mv88e6xxx_chip *chip,
+                             struct mii_bus *bus,
+                             int addr, int reg, u16 *val)
+{
+       return mv88e6xxx_read(chip, addr, reg, val);
+}
+
+static int mv88e6165_phy_write(struct mv88e6xxx_chip *chip,
+                              struct mii_bus *bus,
+                              int addr, int reg, u16 val)
+{
+       return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+static struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
+{
+       struct mv88e6xxx_mdio_bus *mdio_bus;
+
+       mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus,
+                                   list);
+       if (!mdio_bus)
+               return NULL;
+
+       return mdio_bus->bus;
+}
+
 static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
                              int reg, u16 *val)
 {
        int addr = phy; /* PHY devices addresses start at 0x0 */
+       struct mii_bus *bus;
+
+       bus = mv88e6xxx_default_mdio_bus(chip);
+       if (!bus)
+               return -EOPNOTSUPP;
 
        if (!chip->info->ops->phy_read)
                return -EOPNOTSUPP;
 
-       return chip->info->ops->phy_read(chip, addr, reg, val);
+       return chip->info->ops->phy_read(chip, bus, addr, reg, val);
 }
 
 static int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
                               int reg, u16 val)
 {
        int addr = phy; /* PHY devices addresses start at 0x0 */
+       struct mii_bus *bus;
+
+       bus = mv88e6xxx_default_mdio_bus(chip);
+       if (!bus)
+               return -EOPNOTSUPP;
 
        if (!chip->info->ops->phy_write)
                return -EOPNOTSUPP;
 
-       return chip->info->ops->phy_write(chip, addr, reg, val);
+       return chip->info->ops->phy_write(chip, bus, addr, reg, val);
 }
 
 static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
@@ -611,8 +647,9 @@ static void mv88e6xxx_ppu_state_destroy(struct mv88e6xxx_chip *chip)
        del_timer_sync(&chip->ppu_timer);
 }
 
-static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, int addr,
-                                 int reg, u16 *val)
+static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip,
+                                 struct mii_bus *bus,
+                                 int addr, int reg, u16 *val)
 {
        int err;
 
@@ -625,8 +662,9 @@ static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, int addr,
        return err;
 }
 
-static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, int addr,
-                                  int reg, u16 val)
+static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip,
+                                  struct mii_bus *bus,
+                                  int addr, int reg, u16 val)
 {
        int err;
 
@@ -639,11 +677,6 @@ static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, int addr,
        return err;
 }
 
-static bool mv88e6xxx_6095_family(struct mv88e6xxx_chip *chip)
-{
-       return chip->info->family == MV88E6XXX_FAMILY_6095;
-}
-
 static bool mv88e6xxx_6097_family(struct mv88e6xxx_chip *chip)
 {
        return chip->info->family == MV88E6XXX_FAMILY_6097;
@@ -654,14 +687,14 @@ static bool mv88e6xxx_6165_family(struct mv88e6xxx_chip *chip)
        return chip->info->family == MV88E6XXX_FAMILY_6165;
 }
 
-static bool mv88e6xxx_6185_family(struct mv88e6xxx_chip *chip)
+static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip)
 {
-       return chip->info->family == MV88E6XXX_FAMILY_6185;
+       return chip->info->family == MV88E6XXX_FAMILY_6320;
 }
 
-static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip)
+static bool mv88e6xxx_6341_family(struct mv88e6xxx_chip *chip)
 {
-       return chip->info->family == MV88E6XXX_FAMILY_6320;
+       return chip->info->family == MV88E6XXX_FAMILY_6341;
 }
 
 static bool mv88e6xxx_6351_family(struct mv88e6xxx_chip *chip)
@@ -706,6 +739,12 @@ static int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port,
                        goto restore_link;
        }
 
+       if (chip->info->ops->port_set_cmode) {
+               err = chip->info->ops->port_set_cmode(chip, port, mode);
+               if (err && err != -EOPNOTSUPP)
+                       goto restore_link;
+       }
+
        err = 0;
 restore_link:
        if (chip->info->ops->port_set_link(chip, port, link))
@@ -1209,8 +1248,8 @@ static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid,
 
 static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
 {
-       struct net_device *bridge = chip->ports[port].bridge_dev;
        struct dsa_switch *ds = chip->ds;
+       struct net_device *bridge = ds->ports[port].bridge_dev;
        u16 output_ports = 0;
        int i;
 
@@ -1220,7 +1259,7 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
        } else {
                for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
                        /* allow sending frames to every group member */
-                       if (bridge && chip->ports[i].bridge_dev == bridge)
+                       if (bridge && ds->ports[i].bridge_dev == bridge)
                                output_ports |= BIT(i);
 
                        /* allow sending frames to CPU port and DSA link(s) */
@@ -1688,7 +1727,8 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
                        : GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
 
        if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
-           mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) {
+           mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip) ||
+           mv88e6xxx_6341_family(chip)) {
                struct mv88e6xxx_vtu_entry vstp;
 
                /* Adding a VTU entry requires a valid STU entry. As VSTP is not
@@ -1782,17 +1822,17 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
                            GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
                                continue;
 
-                       if (chip->ports[i].bridge_dev ==
-                           chip->ports[port].bridge_dev)
+                       if (ds->ports[i].bridge_dev ==
+                           ds->ports[port].bridge_dev)
                                break; /* same bridge, check next VLAN */
 
-                       if (!chip->ports[i].bridge_dev)
+                       if (!ds->ports[i].bridge_dev)
                                continue;
 
                        netdev_warn(ds->ports[port].netdev,
                                    "hardware VLAN %d already used by %s\n",
                                    vlan.vid,
-                                   netdev_name(chip->ports[i].bridge_dev));
+                                   netdev_name(ds->ports[i].bridge_dev));
                        err = -EOPNOTSUPP;
                        goto unlock;
                }
@@ -2023,7 +2063,8 @@ static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid,
        struct mv88e6xxx_atu_entry next;
        int err;
 
-       eth_broadcast_addr(next.mac);
+       memcpy(next.mac, addr, ETH_ALEN);
+       eth_addr_dec(next.mac);
 
        err = _mv88e6xxx_atu_mac_write(chip, next.mac);
        if (err)
@@ -2041,7 +2082,7 @@ static int mv88e6xxx_atu_get(struct mv88e6xxx_chip *chip, int fid,
                        *entry = next;
                        return 0;
                }
-       } while (!is_broadcast_ether_addr(next.mac));
+       } while (ether_addr_greater(addr, next.mac));
 
        memset(entry, 0, sizeof(*entry));
        entry->fid = fid;
@@ -2281,18 +2322,16 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
 }
 
 static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
-                                     struct net_device *bridge)
+                                     struct net_device *br)
 {
        struct mv88e6xxx_chip *chip = ds->priv;
        int i, err = 0;
 
        mutex_lock(&chip->reg_lock);
 
-       /* Assign the bridge and remap each port's VLANTable */
-       chip->ports[port].bridge_dev = bridge;
-
+       /* Remap each port's VLANTable */
        for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
-               if (chip->ports[i].bridge_dev == bridge) {
+               if (ds->ports[i].bridge_dev == br) {
                        err = _mv88e6xxx_port_based_vlan_map(chip, i);
                        if (err)
                                break;
@@ -2304,19 +2343,17 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
        return err;
 }
 
-static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
+static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
+                                       struct net_device *br)
 {
        struct mv88e6xxx_chip *chip = ds->priv;
-       struct net_device *bridge = chip->ports[port].bridge_dev;
        int i;
 
        mutex_lock(&chip->reg_lock);
 
-       /* Unassign the bridge and remap each port's VLANTable */
-       chip->ports[port].bridge_dev = NULL;
-
+       /* Remap each port's VLANTable */
        for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
-               if (i == port || chip->ports[i].bridge_dev == bridge)
+               if (i == port || ds->ports[i].bridge_dev == br)
                        if (_mv88e6xxx_port_based_vlan_map(chip, i))
                                netdev_warn(ds->ports[i].netdev,
                                            "failed to remap\n");
@@ -2538,31 +2575,23 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
         * received packets as usual, disable ARP mirroring and don't send a
         * copy of all transmitted/received frames on this port to the CPU.
         */
-       reg = 0;
-       if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
-           mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
-           mv88e6xxx_6095_family(chip) || mv88e6xxx_6320_family(chip) ||
-           mv88e6xxx_6185_family(chip))
-               reg = PORT_CONTROL_2_MAP_DA;
-
-       if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) {
-               /* Set the upstream port this port should use */
-               reg |= dsa_upstream_port(ds);
-               /* enable forwarding of unknown multicast addresses to
-                * the upstream port
-                */
-               if (port == dsa_upstream_port(ds))
-                       reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
-       }
-
-       reg |= PORT_CONTROL_2_8021Q_DISABLED;
+       err = mv88e6xxx_port_set_map_da(chip, port);
+       if (err)
+               return err;
 
-       if (reg) {
-               err = mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+       reg = 0;
+       if (chip->info->ops->port_set_upstream_port) {
+               err = chip->info->ops->port_set_upstream_port(
+                       chip, port, dsa_upstream_port(ds));
                if (err)
                        return err;
        }
 
+       err = mv88e6xxx_port_set_8021q_mode(chip, port,
+                                           PORT_CONTROL_2_8021Q_DISABLED);
+       if (err)
+               return err;
+
        if (chip->info->ops->port_jumbo_config) {
                err = chip->info->ops->port_jumbo_config(chip, port);
                if (err)
@@ -2596,7 +2625,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
 
        if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
            mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
-           mv88e6xxx_6320_family(chip)) {
+           mv88e6xxx_6320_family(chip) || mv88e6xxx_6341_family(chip)) {
                /* Port ATU control: disable limiting the number of
                 * address database entries that this port is allowed
                 * to use.
@@ -2820,7 +2849,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
        int i;
 
        chip->ds = ds;
-       ds->slave_mii_bus = chip->mdio_bus;
+       ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
 
        mutex_lock(&chip->reg_lock);
 
@@ -2877,50 +2906,64 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
 
 static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
 {
-       struct mv88e6xxx_chip *chip = bus->priv;
+       struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+       struct mv88e6xxx_chip *chip = mdio_bus->chip;
        u16 val;
        int err;
 
-       if (phy >= mv88e6xxx_num_ports(chip))
-               return 0xffff;
+       if (!chip->info->ops->phy_read)
+               return -EOPNOTSUPP;
 
        mutex_lock(&chip->reg_lock);
-       err = mv88e6xxx_phy_read(chip, phy, reg, &val);
+       err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
        mutex_unlock(&chip->reg_lock);
 
+       if (reg == MII_PHYSID2) {
+               /* Some internal PHYS don't have a model number.  Use
+                * the mv88e6390 family model number instead.
+                */
+               if (!(val & 0x3f0))
+                       val |= PORT_SWITCH_ID_PROD_NUM_6390;
+       }
+
        return err ? err : val;
 }
 
 static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
 {
-       struct mv88e6xxx_chip *chip = bus->priv;
+       struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+       struct mv88e6xxx_chip *chip = mdio_bus->chip;
        int err;
 
-       if (phy >= mv88e6xxx_num_ports(chip))
-               return 0xffff;
+       if (!chip->info->ops->phy_write)
+               return -EOPNOTSUPP;
 
        mutex_lock(&chip->reg_lock);
-       err = mv88e6xxx_phy_write(chip, phy, reg, val);
+       err = chip->info->ops->phy_write(chip, bus, phy, reg, val);
        mutex_unlock(&chip->reg_lock);
 
        return err;
 }
 
 static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
-                                  struct device_node *np)
+                                  struct device_node *np,
+                                  bool external)
 {
        static int index;
+       struct mv88e6xxx_mdio_bus *mdio_bus;
        struct mii_bus *bus;
        int err;
 
-       if (np)
-               chip->mdio_np = of_get_child_by_name(np, "mdio");
-
-       bus = devm_mdiobus_alloc(chip->dev);
+       bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus));
        if (!bus)
                return -ENOMEM;
 
-       bus->priv = (void *)chip;
+       mdio_bus = bus->priv;
+       mdio_bus->bus = bus;
+       mdio_bus->chip = chip;
+       INIT_LIST_HEAD(&mdio_bus->list);
+       mdio_bus->external = external;
+
        if (np) {
                bus->name = np->full_name;
                snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name);
@@ -2933,183 +2976,73 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
        bus->write = mv88e6xxx_mdio_write;
        bus->parent = chip->dev;
 
-       if (chip->mdio_np)
-               err = of_mdiobus_register(bus, chip->mdio_np);
+       if (np)
+               err = of_mdiobus_register(bus, np);
        else
                err = mdiobus_register(bus);
        if (err) {
                dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
-               goto out;
+               return err;
        }
-       chip->mdio_bus = bus;
-
-       return 0;
-
-out:
-       if (chip->mdio_np)
-               of_node_put(chip->mdio_np);
-
-       return err;
-}
-
-static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip)
-
-{
-       struct mii_bus *bus = chip->mdio_bus;
-
-       mdiobus_unregister(bus);
-
-       if (chip->mdio_np)
-               of_node_put(chip->mdio_np);
-}
-
-#ifdef CONFIG_NET_DSA_HWMON
-
-static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
-{
-       struct mv88e6xxx_chip *chip = ds->priv;
-       u16 val;
-       int ret;
-
-       *temp = 0;
-
-       mutex_lock(&chip->reg_lock);
-
-       ret = mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x6);
-       if (ret < 0)
-               goto error;
-
-       /* Enable temperature sensor */
-       ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val);
-       if (ret < 0)
-               goto error;
-
-       ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val | (1 << 5));
-       if (ret < 0)
-               goto error;
-
-       /* Wait for temperature to stabilize */
-       usleep_range(10000, 12000);
 
-       ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val);
-       if (ret < 0)
-               goto error;
-
-       /* Disable temperature sensor */
-       ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val & ~(1 << 5));
-       if (ret < 0)
-               goto error;
-
-       *temp = ((val & 0x1f) - 5) * 5;
-
-error:
-       mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x0);
-       mutex_unlock(&chip->reg_lock);
-       return ret;
-}
-
-static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
-{
-       struct mv88e6xxx_chip *chip = ds->priv;
-       int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
-       u16 val;
-       int ret;
-
-       *temp = 0;
-
-       mutex_lock(&chip->reg_lock);
-       ret = mv88e6xxx_phy_page_read(chip, phy, 6, 27, &val);
-       mutex_unlock(&chip->reg_lock);
-       if (ret < 0)
-               return ret;
-
-       *temp = (val & 0xff) - 25;
+       if (external)
+               list_add_tail(&mdio_bus->list, &chip->mdios);
+       else
+               list_add(&mdio_bus->list, &chip->mdios);
 
        return 0;
 }
 
-static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
-{
-       struct mv88e6xxx_chip *chip = ds->priv;
-
-       if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP))
-               return -EOPNOTSUPP;
-
-       if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip))
-               return mv88e63xx_get_temp(ds, temp);
-
-       return mv88e61xx_get_temp(ds, temp);
-}
+static const struct of_device_id mv88e6xxx_mdio_external_match[] = {
+       { .compatible = "marvell,mv88e6xxx-mdio-external",
+         .data = (void *)true },
+       { },
+};
 
-static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
+static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
+                                   struct device_node *np)
 {
-       struct mv88e6xxx_chip *chip = ds->priv;
-       int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
-       u16 val;
-       int ret;
-
-       if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
-               return -EOPNOTSUPP;
-
-       *temp = 0;
+       const struct of_device_id *match;
+       struct device_node *child;
+       int err;
 
-       mutex_lock(&chip->reg_lock);
-       ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val);
-       mutex_unlock(&chip->reg_lock);
-       if (ret < 0)
-               return ret;
+       /* Always register one mdio bus for the internal/default mdio
+        * bus. This maybe represented in the device tree, but is
+        * optional.
+        */
+       child = of_get_child_by_name(np, "mdio");
+       err = mv88e6xxx_mdio_register(chip, child, false);
+       if (err)
+               return err;
 
-       *temp = (((val >> 8) & 0x1f) * 5) - 25;
+       /* Walk the device tree, and see if there are any other nodes
+        * which say they are compatible with the external mdio
+        * bus.
+        */
+       for_each_available_child_of_node(np, child) {
+               match = of_match_node(mv88e6xxx_mdio_external_match, child);
+               if (match) {
+                       err = mv88e6xxx_mdio_register(chip, child, true);
+                       if (err)
+                               return err;
+               }
+       }
 
        return 0;
 }
 
-static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
-{
-       struct mv88e6xxx_chip *chip = ds->priv;
-       int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
-       u16 val;
-       int err;
-
-       if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
-               return -EOPNOTSUPP;
-
-       mutex_lock(&chip->reg_lock);
-       err = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val);
-       if (err)
-               goto unlock;
-       temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
-       err = mv88e6xxx_phy_page_write(chip, phy, 6, 26,
-                                      (val & 0xe0ff) | (temp << 8));
-unlock:
-       mutex_unlock(&chip->reg_lock);
-
-       return err;
-}
+static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
 
-static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
 {
-       struct mv88e6xxx_chip *chip = ds->priv;
-       int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
-       u16 val;
-       int ret;
-
-       if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
-               return -EOPNOTSUPP;
-
-       *alarm = false;
-
-       mutex_lock(&chip->reg_lock);
-       ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val);
-       mutex_unlock(&chip->reg_lock);
-       if (ret < 0)
-               return ret;
+       struct mv88e6xxx_mdio_bus *mdio_bus;
+       struct mii_bus *bus;
 
-       *alarm = !!(val & 0x40);
+       list_for_each_entry(mdio_bus, &chip->mdios, list) {
+               bus = mdio_bus->bus;
 
-       return 0;
+               mdiobus_unregister(bus);
+       }
 }
-#endif /* CONFIG_NET_DSA_HWMON */
 
 static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
 {
@@ -3178,6 +3111,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
        .stats_get_stats = mv88e6095_stats_get_stats,
        .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .ppu_enable = mv88e6185_g1_ppu_enable,
        .ppu_disable = mv88e6185_g1_ppu_disable,
@@ -3193,7 +3127,8 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
        .port_set_duplex = mv88e6xxx_port_set_duplex,
        .port_set_speed = mv88e6185_port_set_speed,
        .port_set_frame_mode = mv88e6085_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6085_port_set_egress_unknowns,
+       .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns,
+       .port_set_upstream_port = mv88e6095_port_set_upstream_port,
        .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
@@ -3225,6 +3160,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
        .stats_get_stats = mv88e6095_stats_get_stats,
        .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
@@ -3232,8 +3168,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
 static const struct mv88e6xxx_ops mv88e6123_ops = {
        /* MV88E6XXX_FAMILY_6165 */
        .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
-       .phy_read = mv88e6xxx_read,
-       .phy_write = mv88e6xxx_write,
+       .phy_read = mv88e6165_phy_read,
+       .phy_write = mv88e6165_phy_write,
        .port_set_link = mv88e6xxx_port_set_link,
        .port_set_duplex = mv88e6xxx_port_set_duplex,
        .port_set_speed = mv88e6185_port_set_speed,
@@ -3245,6 +3181,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
        .stats_get_stats = mv88e6095_stats_get_stats,
        .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
@@ -3259,8 +3196,9 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
        .port_set_speed = mv88e6185_port_set_speed,
        .port_tag_remap = mv88e6095_port_tag_remap,
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
+       .port_set_upstream_port = mv88e6095_port_set_upstream_port,
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6097_port_pause_config,
@@ -3270,6 +3208,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
        .stats_get_stats = mv88e6095_stats_get_stats,
        .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .ppu_enable = mv88e6185_g1_ppu_enable,
        .ppu_disable = mv88e6185_g1_ppu_disable,
@@ -3279,8 +3218,8 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
 static const struct mv88e6xxx_ops mv88e6161_ops = {
        /* MV88E6XXX_FAMILY_6165 */
        .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
-       .phy_read = mv88e6xxx_read,
-       .phy_write = mv88e6xxx_write,
+       .phy_read = mv88e6165_phy_read,
+       .phy_write = mv88e6165_phy_write,
        .port_set_link = mv88e6xxx_port_set_link,
        .port_set_duplex = mv88e6xxx_port_set_duplex,
        .port_set_speed = mv88e6185_port_set_speed,
@@ -3297,6 +3236,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
        .stats_get_stats = mv88e6095_stats_get_stats,
        .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
@@ -3304,8 +3244,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
 static const struct mv88e6xxx_ops mv88e6165_ops = {
        /* MV88E6XXX_FAMILY_6165 */
        .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
-       .phy_read = mv88e6xxx_read,
-       .phy_write = mv88e6xxx_write,
+       .phy_read = mv88e6165_phy_read,
+       .phy_write = mv88e6165_phy_write,
        .port_set_link = mv88e6xxx_port_set_link,
        .port_set_duplex = mv88e6xxx_port_set_duplex,
        .port_set_speed = mv88e6185_port_set_speed,
@@ -3315,6 +3255,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = {
        .stats_get_stats = mv88e6095_stats_get_stats,
        .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
@@ -3341,6 +3282,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
        .stats_get_stats = mv88e6095_stats_get_stats,
        .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
@@ -3369,6 +3311,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
        .stats_get_stats = mv88e6095_stats_get_stats,
        .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
@@ -3395,6 +3338,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
        .stats_get_stats = mv88e6095_stats_get_stats,
        .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
@@ -3423,6 +3367,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
        .stats_get_stats = mv88e6095_stats_get_stats,
        .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
@@ -3436,14 +3381,16 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
        .port_set_duplex = mv88e6xxx_port_set_duplex,
        .port_set_speed = mv88e6185_port_set_speed,
        .port_set_frame_mode = mv88e6085_port_set_frame_mode,
-       .port_set_egress_unknowns = mv88e6085_port_set_egress_unknowns,
+       .port_set_egress_unknowns = mv88e6095_port_set_egress_unknowns,
        .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
+       .port_set_upstream_port = mv88e6095_port_set_upstream_port,
        .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
        .stats_get_sset_count = mv88e6095_stats_get_sset_count,
        .stats_get_strings = mv88e6095_stats_get_strings,
        .stats_get_stats = mv88e6095_stats_get_stats,
        .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .ppu_enable = mv88e6185_g1_ppu_enable,
        .ppu_disable = mv88e6185_g1_ppu_disable,
@@ -3452,6 +3399,8 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
 
 static const struct mv88e6xxx_ops mv88e6190_ops = {
        /* MV88E6XXX_FAMILY_6390 */
+       .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+       .set_eeprom = mv88e6xxx_g2_set_eeprom8,
        .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
        .phy_read = mv88e6xxx_g2_smi_phy_read,
        .phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3471,12 +3420,15 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
        .stats_get_stats = mv88e6390_stats_get_stats,
        .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .watchdog_ops = &mv88e6390_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
 
 static const struct mv88e6xxx_ops mv88e6190x_ops = {
        /* MV88E6XXX_FAMILY_6390 */
+       .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+       .set_eeprom = mv88e6xxx_g2_set_eeprom8,
        .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
        .phy_read = mv88e6xxx_g2_smi_phy_read,
        .phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3496,12 +3448,15 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
        .stats_get_stats = mv88e6390_stats_get_stats,
        .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .watchdog_ops = &mv88e6390_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
 
 static const struct mv88e6xxx_ops mv88e6191_ops = {
        /* MV88E6XXX_FAMILY_6390 */
+       .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+       .set_eeprom = mv88e6xxx_g2_set_eeprom8,
        .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
        .phy_read = mv88e6xxx_g2_smi_phy_read,
        .phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3521,6 +3476,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
        .stats_get_stats = mv88e6390_stats_get_stats,
        .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .watchdog_ops = &mv88e6390_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
@@ -3549,12 +3505,15 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
        .stats_get_stats = mv88e6095_stats_get_stats,
        .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
 
 static const struct mv88e6xxx_ops mv88e6290_ops = {
        /* MV88E6XXX_FAMILY_6390 */
+       .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+       .set_eeprom = mv88e6xxx_g2_set_eeprom8,
        .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
        .phy_read = mv88e6xxx_g2_smi_phy_read,
        .phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3567,6 +3526,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
        .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
        .port_pause_config = mv88e6390_port_pause_config,
+       .port_set_cmode = mv88e6390x_port_set_cmode,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
        .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3574,6 +3534,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
        .stats_get_stats = mv88e6390_stats_get_stats,
        .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .watchdog_ops = &mv88e6390_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
@@ -3653,6 +3614,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
        .stats_get_stats = mv88e6095_stats_get_stats,
        .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
@@ -3679,6 +3641,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
        .stats_get_stats = mv88e6095_stats_get_stats,
        .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
@@ -3707,12 +3670,73 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
        .stats_get_stats = mv88e6095_stats_get_stats,
        .g1_set_cpu_port = mv88e6095_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6095_g1_set_egress_port,
+       .watchdog_ops = &mv88e6097_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
 
+static const struct mv88e6xxx_ops mv88e6141_ops = {
+       /* MV88E6XXX_FAMILY_6341 */
+       .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+       .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+       .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+       .phy_read = mv88e6xxx_g2_smi_phy_read,
+       .phy_write = mv88e6xxx_g2_smi_phy_write,
+       .port_set_link = mv88e6xxx_port_set_link,
+       .port_set_duplex = mv88e6xxx_port_set_duplex,
+       .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+       .port_set_speed = mv88e6390_port_set_speed,
+       .port_tag_remap = mv88e6095_port_tag_remap,
+       .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_ether_type = mv88e6351_port_set_ether_type,
+       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+       .port_pause_config = mv88e6097_port_pause_config,
+       .stats_snapshot = mv88e6390_g1_stats_snapshot,
+       .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+       .stats_get_strings = mv88e6320_stats_get_strings,
+       .stats_get_stats = mv88e6390_stats_get_stats,
+       .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+       .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .watchdog_ops = &mv88e6390_watchdog_ops,
+       .mgmt_rsvd2cpu =  mv88e6390_g1_mgmt_rsvd2cpu,
+       .reset = mv88e6352_g1_reset,
+};
+
+static const struct mv88e6xxx_ops mv88e6341_ops = {
+       /* MV88E6XXX_FAMILY_6341 */
+       .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+       .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+       .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+       .phy_read = mv88e6xxx_g2_smi_phy_read,
+       .phy_write = mv88e6xxx_g2_smi_phy_write,
+       .port_set_link = mv88e6xxx_port_set_link,
+       .port_set_duplex = mv88e6xxx_port_set_duplex,
+       .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+       .port_set_speed = mv88e6390_port_set_speed,
+       .port_tag_remap = mv88e6095_port_tag_remap,
+       .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+       .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+       .port_set_ether_type = mv88e6351_port_set_ether_type,
+       .port_jumbo_config = mv88e6165_port_jumbo_config,
+       .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+       .port_pause_config = mv88e6097_port_pause_config,
+       .stats_snapshot = mv88e6390_g1_stats_snapshot,
+       .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+       .stats_get_strings = mv88e6320_stats_get_strings,
+       .stats_get_stats = mv88e6390_stats_get_stats,
+       .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+       .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .watchdog_ops = &mv88e6390_watchdog_ops,
+       .mgmt_rsvd2cpu =  mv88e6390_g1_mgmt_rsvd2cpu,
+       .reset = mv88e6352_g1_reset,
+};
+
 static const struct mv88e6xxx_ops mv88e6390_ops = {
        /* MV88E6XXX_FAMILY_6390 */
+       .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+       .set_eeprom = mv88e6xxx_g2_set_eeprom8,
        .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
        .phy_read = mv88e6xxx_g2_smi_phy_read,
        .phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3727,6 +3751,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
        .port_jumbo_config = mv88e6165_port_jumbo_config,
        .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
        .port_pause_config = mv88e6390_port_pause_config,
+       .port_set_cmode = mv88e6390x_port_set_cmode,
        .stats_snapshot = mv88e6390_g1_stats_snapshot,
        .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
        .stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3734,12 +3759,15 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
        .stats_get_stats = mv88e6390_stats_get_stats,
        .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .watchdog_ops = &mv88e6390_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
 
 static const struct mv88e6xxx_ops mv88e6390x_ops = {
        /* MV88E6XXX_FAMILY_6390 */
+       .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+       .set_eeprom = mv88e6xxx_g2_set_eeprom8,
        .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
        .phy_read = mv88e6xxx_g2_smi_phy_read,
        .phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3761,12 +3789,15 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
        .stats_get_stats = mv88e6390_stats_get_stats,
        .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .watchdog_ops = &mv88e6390_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
 
 static const struct mv88e6xxx_ops mv88e6391_ops = {
        /* MV88E6XXX_FAMILY_6390 */
+       .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+       .set_eeprom = mv88e6xxx_g2_set_eeprom8,
        .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
        .phy_read = mv88e6xxx_g2_smi_phy_read,
        .phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3786,6 +3817,7 @@ static const struct mv88e6xxx_ops mv88e6391_ops = {
        .stats_get_stats = mv88e6390_stats_get_stats,
        .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
        .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+       .watchdog_ops = &mv88e6390_watchdog_ops,
        .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
        .reset = mv88e6352_g1_reset,
 };
@@ -3996,7 +4028,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .port_base_addr = 0x0,
                .global1_addr = 0x1b,
                .tag_protocol = DSA_TAG_PROTO_DSA,
-               .age_time_coeff = 15000,
+               .age_time_coeff = 3750,
                .g1_irqs = 9,
                .flags = MV88E6XXX_FLAGS_FAMILY_6390,
                .ops = &mv88e6190_ops,
@@ -4010,7 +4042,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 11,        /* 10 + Z80 */
                .port_base_addr = 0x0,
                .global1_addr = 0x1b,
-               .age_time_coeff = 15000,
+               .age_time_coeff = 3750,
                .g1_irqs = 9,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6390,
@@ -4025,7 +4057,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 11,        /* 10 + Z80 */
                .port_base_addr = 0x0,
                .global1_addr = 0x1b,
-               .age_time_coeff = 15000,
+               .age_time_coeff = 3750,
                .g1_irqs = 9,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6390,
@@ -4055,7 +4087,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 11,        /* 10 + Z80 */
                .port_base_addr = 0x0,
                .global1_addr = 0x1b,
-               .age_time_coeff = 15000,
+               .age_time_coeff = 3750,
                .g1_irqs = 9,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6390,
@@ -4092,6 +4124,34 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .ops = &mv88e6321_ops,
        },
 
+       [MV88E6141] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6141,
+               .family = MV88E6XXX_FAMILY_6341,
+               .name = "Marvell 88E6341",
+               .num_databases = 4096,
+               .num_ports = 6,
+               .port_base_addr = 0x10,
+               .global1_addr = 0x1b,
+               .age_time_coeff = 3750,
+               .tag_protocol = DSA_TAG_PROTO_EDSA,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6341,
+               .ops = &mv88e6141_ops,
+       },
+
+       [MV88E6341] = {
+               .prod_num = PORT_SWITCH_ID_PROD_NUM_6341,
+               .family = MV88E6XXX_FAMILY_6341,
+               .name = "Marvell 88E6341",
+               .num_databases = 4096,
+               .num_ports = 6,
+               .port_base_addr = 0x10,
+               .global1_addr = 0x1b,
+               .age_time_coeff = 3750,
+               .tag_protocol = DSA_TAG_PROTO_EDSA,
+               .flags = MV88E6XXX_FLAGS_FAMILY_6341,
+               .ops = &mv88e6341_ops,
+       },
+
        [MV88E6350] = {
                .prod_num = PORT_SWITCH_ID_PROD_NUM_6350,
                .family = MV88E6XXX_FAMILY_6351,
@@ -4144,7 +4204,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 11,        /* 10 + Z80 */
                .port_base_addr = 0x0,
                .global1_addr = 0x1b,
-               .age_time_coeff = 15000,
+               .age_time_coeff = 3750,
                .g1_irqs = 9,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6390,
@@ -4158,7 +4218,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .num_ports = 11,        /* 10 + Z80 */
                .port_base_addr = 0x0,
                .global1_addr = 0x1b,
-               .age_time_coeff = 15000,
+               .age_time_coeff = 3750,
                .g1_irqs = 9,
                .tag_protocol = DSA_TAG_PROTO_DSA,
                .flags = MV88E6XXX_FLAGS_FAMILY_6390,
@@ -4221,6 +4281,7 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
        chip->dev = dev;
 
        mutex_init(&chip->reg_lock);
+       INIT_LIST_HEAD(&chip->mdios);
 
        return chip;
 }
@@ -4240,10 +4301,6 @@ static void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip)
 static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
                              struct mii_bus *bus, int sw_addr)
 {
-       /* ADDR[0] pin is unavailable externally and considered zero */
-       if (sw_addr & 0x1)
-               return -EINVAL;
-
        if (sw_addr == 0)
                chip->smi_ops = &mv88e6xxx_smi_single_chip_ops;
        else if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_MULTI_CHIP))
@@ -4299,7 +4356,7 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
 
        mv88e6xxx_phy_init(chip);
 
-       err = mv88e6xxx_mdio_register(chip, NULL);
+       err = mv88e6xxx_mdios_register(chip, NULL);
        if (err)
                goto free;
 
@@ -4364,7 +4421,7 @@ static int mv88e6xxx_port_mdb_dump(struct dsa_switch *ds, int port,
        return err;
 }
 
-static struct dsa_switch_ops mv88e6xxx_switch_ops = {
+static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
        .probe                  = mv88e6xxx_drv_probe,
        .get_tag_protocol       = mv88e6xxx_get_tag_protocol,
        .setup                  = mv88e6xxx_setup,
@@ -4375,12 +4432,6 @@ static struct dsa_switch_ops mv88e6xxx_switch_ops = {
        .get_sset_count         = mv88e6xxx_get_sset_count,
        .set_eee                = mv88e6xxx_set_eee,
        .get_eee                = mv88e6xxx_get_eee,
-#ifdef CONFIG_NET_DSA_HWMON
-       .get_temp               = mv88e6xxx_get_temp,
-       .get_temp_limit         = mv88e6xxx_get_temp_limit,
-       .set_temp_limit         = mv88e6xxx_set_temp_limit,
-       .get_temp_alarm         = mv88e6xxx_get_temp_alarm,
-#endif
        .get_eeprom_len         = mv88e6xxx_get_eeprom_len,
        .get_eeprom             = mv88e6xxx_get_eeprom,
        .set_eeprom             = mv88e6xxx_set_eeprom,
@@ -4406,23 +4457,25 @@ static struct dsa_switch_ops mv88e6xxx_switch_ops = {
        .port_mdb_dump          = mv88e6xxx_port_mdb_dump,
 };
 
-static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip,
-                                    struct device_node *np)
+static struct dsa_switch_driver mv88e6xxx_switch_drv = {
+       .ops                    = &mv88e6xxx_switch_ops,
+};
+
+static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
 {
        struct device *dev = chip->dev;
        struct dsa_switch *ds;
 
-       ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+       ds = dsa_switch_alloc(dev, DSA_MAX_PORTS);
        if (!ds)
                return -ENOMEM;
 
-       ds->dev = dev;
        ds->priv = chip;
        ds->ops = &mv88e6xxx_switch_ops;
 
        dev_set_drvdata(dev, ds);
 
-       return dsa_register_switch(ds, np);
+       return dsa_register_switch(ds, dev);
 }
 
 static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip)
@@ -4502,18 +4555,18 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
                }
        }
 
-       err = mv88e6xxx_mdio_register(chip, np);
+       err = mv88e6xxx_mdios_register(chip, np);
        if (err)
                goto out_g2_irq;
 
-       err = mv88e6xxx_register_switch(chip, np);
+       err = mv88e6xxx_register_switch(chip);
        if (err)
                goto out_mdio;
 
        return 0;
 
 out_mdio:
-       mv88e6xxx_mdio_unregister(chip);
+       mv88e6xxx_mdios_unregister(chip);
 out_g2_irq:
        if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT) && chip->irq > 0)
                mv88e6xxx_g2_irq_free(chip);
@@ -4534,7 +4587,7 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
 
        mv88e6xxx_phy_destroy(chip);
        mv88e6xxx_unregister_switch(chip);
-       mv88e6xxx_mdio_unregister(chip);
+       mv88e6xxx_mdios_unregister(chip);
 
        if (chip->irq > 0) {
                if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT))
@@ -4568,7 +4621,7 @@ static struct mdio_driver mv88e6xxx_driver = {
 
 static int __init mv88e6xxx_init(void)
 {
-       register_switch_driver(&mv88e6xxx_switch_ops);
+       register_switch_driver(&mv88e6xxx_switch_drv);
        return mdio_driver_register(&mv88e6xxx_driver);
 }
 module_init(mv88e6xxx_init);
@@ -4576,7 +4629,7 @@ module_init(mv88e6xxx_init);
 static void __exit mv88e6xxx_cleanup(void)
 {
        mdio_driver_unregister(&mv88e6xxx_driver);
-       unregister_switch_driver(&mv88e6xxx_switch_ops);
+       unregister_switch_driver(&mv88e6xxx_switch_drv);
 }
 module_exit(mv88e6xxx_cleanup);
 
index 3e77071949ab0ebba03237e0dfb87249fea8b3e4..8f15bc7b1f5f88d2e8150f78cdf79b9be28cb07c 100644 (file)
@@ -218,7 +218,8 @@ static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip)
 }
 
 /* Offset 0x14: EEPROM Command
- * Offset 0x15: EEPROM Data
+ * Offset 0x15: EEPROM Data (for 16-bit data access)
+ * Offset 0x15: EEPROM Addr (for 8-bit data access)
  */
 
 static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
@@ -239,6 +240,50 @@ static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
        return mv88e6xxx_g2_eeprom_wait(chip);
 }
 
+static int mv88e6xxx_g2_eeprom_read8(struct mv88e6xxx_chip *chip,
+                                    u16 addr, u8 *data)
+{
+       u16 cmd = GLOBAL2_EEPROM_CMD_OP_READ;
+       int err;
+
+       err = mv88e6xxx_g2_eeprom_wait(chip);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g2_eeprom_cmd(chip, cmd);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_CMD, &cmd);
+       if (err)
+               return err;
+
+       *data = cmd & 0xff;
+
+       return 0;
+}
+
+static int mv88e6xxx_g2_eeprom_write8(struct mv88e6xxx_chip *chip,
+                                     u16 addr, u8 data)
+{
+       u16 cmd = GLOBAL2_EEPROM_CMD_OP_WRITE | GLOBAL2_EEPROM_CMD_WRITE_EN;
+       int err;
+
+       err = mv88e6xxx_g2_eeprom_wait(chip);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr);
+       if (err)
+               return err;
+
+       return mv88e6xxx_g2_eeprom_cmd(chip, cmd | data);
+}
+
 static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip,
                                      u8 addr, u16 *data)
 {
@@ -273,6 +318,52 @@ static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip,
        return mv88e6xxx_g2_eeprom_cmd(chip, cmd);
 }
 
+int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
+                            struct ethtool_eeprom *eeprom, u8 *data)
+{
+       unsigned int offset = eeprom->offset;
+       unsigned int len = eeprom->len;
+       int err;
+
+       eeprom->len = 0;
+
+       while (len) {
+               err = mv88e6xxx_g2_eeprom_read8(chip, offset, data);
+               if (err)
+                       return err;
+
+               eeprom->len++;
+               offset++;
+               data++;
+               len--;
+       }
+
+       return 0;
+}
+
+int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip,
+                            struct ethtool_eeprom *eeprom, u8 *data)
+{
+       unsigned int offset = eeprom->offset;
+       unsigned int len = eeprom->len;
+       int err;
+
+       eeprom->len = 0;
+
+       while (len) {
+               err = mv88e6xxx_g2_eeprom_write8(chip, offset, *data);
+               if (err)
+                       return err;
+
+               eeprom->len++;
+               offset++;
+               data++;
+               len--;
+       }
+
+       return 0;
+}
+
 int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
                              struct ethtool_eeprom *eeprom, u8 *data)
 {
@@ -410,12 +501,67 @@ static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
        return mv88e6xxx_g2_smi_phy_wait(chip);
 }
 
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg,
-                             u16 *val)
+static int mv88e6xxx_g2_smi_phy_write_addr(struct mv88e6xxx_chip *chip,
+                                          int addr, int device, int reg,
+                                          bool external)
+{
+       int cmd = SMI_CMD_OP_45_WRITE_ADDR | (addr << 5) | device;
+       int err;
+
+       if (external)
+               cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+
+       err = mv88e6xxx_g2_smi_phy_wait(chip);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_DATA, reg);
+       if (err)
+               return err;
+
+       return mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
+}
+
+int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip, int addr,
+                                 int reg_c45, u16 *val, bool external)
+{
+       int device = (reg_c45 >> 16) & 0x1f;
+       int reg = reg_c45 & 0xffff;
+       int err;
+       u16 cmd;
+
+       err = mv88e6xxx_g2_smi_phy_write_addr(chip, addr, device, reg,
+                                             external);
+       if (err)
+               return err;
+
+       cmd = GLOBAL2_SMI_PHY_CMD_OP_45_READ_DATA | (addr << 5) | device;
+
+       if (external)
+               cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+
+       err = mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val);
+       if (err)
+               return err;
+
+       err = *val;
+
+       return 0;
+}
+
+int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip, int addr,
+                                 int reg, u16 *val, bool external)
 {
        u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg;
        int err;
 
+       if (external)
+               cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+
        err = mv88e6xxx_g2_smi_phy_wait(chip);
        if (err)
                return err;
@@ -427,12 +573,57 @@ int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg,
        return mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val);
 }
 
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg,
-                              u16 val)
+int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
+                             struct mii_bus *bus,
+                             int addr, int reg, u16 *val)
+{
+       struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+       bool external = mdio_bus->external;
+
+       if (reg & MII_ADDR_C45)
+               return mv88e6xxx_g2_smi_phy_read_c45(chip, addr, reg, val,
+                                                    external);
+       return mv88e6xxx_g2_smi_phy_read_c22(chip, addr, reg, val, external);
+}
+
+int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip, int addr,
+                                  int reg_c45, u16 val, bool external)
+{
+       int device = (reg_c45 >> 16) & 0x1f;
+       int reg = reg_c45 & 0xffff;
+       int err;
+       u16 cmd;
+
+       err = mv88e6xxx_g2_smi_phy_write_addr(chip, addr, device, reg,
+                                             external);
+       if (err)
+               return err;
+
+       cmd = GLOBAL2_SMI_PHY_CMD_OP_45_WRITE_DATA | (addr << 5) | device;
+
+       if (external)
+               cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+
+       err = mv88e6xxx_g2_write(chip, GLOBAL2_SMI_PHY_DATA, val);
+       if (err)
+               return err;
+
+       err = mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip, int addr,
+                                  int reg, u16 val, bool external)
 {
        u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg;
        int err;
 
+       if (external)
+               cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+
        err = mv88e6xxx_g2_smi_phy_wait(chip);
        if (err)
                return err;
@@ -444,6 +635,153 @@ int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg,
        return mv88e6xxx_g2_smi_phy_cmd(chip, cmd);
 }
 
+int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
+                              struct mii_bus *bus,
+                              int addr, int reg, u16 val)
+{
+       struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+       bool external = mdio_bus->external;
+
+       if (reg & MII_ADDR_C45)
+               return mv88e6xxx_g2_smi_phy_write_c45(chip, addr, reg, val,
+                                                     external);
+
+       return mv88e6xxx_g2_smi_phy_write_c22(chip, addr, reg, val, external);
+}
+
+static int mv88e6097_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
+{
+       u16 reg;
+
+       mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, &reg);
+
+       dev_info(chip->dev, "Watchdog event: 0x%04x", reg);
+
+       return IRQ_HANDLED;
+}
+
+static void mv88e6097_watchdog_free(struct mv88e6xxx_chip *chip)
+{
+       u16 reg;
+
+       mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, &reg);
+
+       reg &= ~(GLOBAL2_WDOG_CONTROL_EGRESS_ENABLE |
+                GLOBAL2_WDOG_CONTROL_QC_ENABLE);
+
+       mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, reg);
+}
+
+static int mv88e6097_watchdog_setup(struct mv88e6xxx_chip *chip)
+{
+       return mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL,
+                                 GLOBAL2_WDOG_CONTROL_EGRESS_ENABLE |
+                                 GLOBAL2_WDOG_CONTROL_QC_ENABLE |
+                                 GLOBAL2_WDOG_CONTROL_SWRESET);
+}
+
+const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {
+       .irq_action = mv88e6097_watchdog_action,
+       .irq_setup = mv88e6097_watchdog_setup,
+       .irq_free = mv88e6097_watchdog_free,
+};
+
+static int mv88e6390_watchdog_setup(struct mv88e6xxx_chip *chip)
+{
+       return mv88e6xxx_g2_update(chip, GLOBAL2_WDOG_CONTROL,
+                                  GLOBAL2_WDOG_INT_ENABLE |
+                                  GLOBAL2_WDOG_CUT_THROUGH |
+                                  GLOBAL2_WDOG_QUEUE_CONTROLLER |
+                                  GLOBAL2_WDOG_EGRESS |
+                                  GLOBAL2_WDOG_FORCE_IRQ);
+}
+
+static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
+{
+       int err;
+       u16 reg;
+
+       mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, GLOBAL2_WDOG_EVENT);
+       err = mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, &reg);
+
+       dev_info(chip->dev, "Watchdog event: 0x%04x",
+                reg & GLOBAL2_WDOG_DATA_MASK);
+
+       mv88e6xxx_g2_write(chip, GLOBAL2_WDOG_CONTROL, GLOBAL2_WDOG_HISTORY);
+       err = mv88e6xxx_g2_read(chip, GLOBAL2_WDOG_CONTROL, &reg);
+
+       dev_info(chip->dev, "Watchdog history: 0x%04x",
+                reg & GLOBAL2_WDOG_DATA_MASK);
+
+       /* Trigger a software reset to try to recover the switch */
+       if (chip->info->ops->reset)
+               chip->info->ops->reset(chip);
+
+       mv88e6390_watchdog_setup(chip);
+
+       return IRQ_HANDLED;
+}
+
+static void mv88e6390_watchdog_free(struct mv88e6xxx_chip *chip)
+{
+       mv88e6xxx_g2_update(chip, GLOBAL2_WDOG_CONTROL,
+                           GLOBAL2_WDOG_INT_ENABLE);
+}
+
+const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {
+       .irq_action = mv88e6390_watchdog_action,
+       .irq_setup = mv88e6390_watchdog_setup,
+       .irq_free = mv88e6390_watchdog_free,
+};
+
+static irqreturn_t mv88e6xxx_g2_watchdog_thread_fn(int irq, void *dev_id)
+{
+       struct mv88e6xxx_chip *chip = dev_id;
+       irqreturn_t ret = IRQ_NONE;
+
+       mutex_lock(&chip->reg_lock);
+       if (chip->info->ops->watchdog_ops->irq_action)
+               ret = chip->info->ops->watchdog_ops->irq_action(chip, irq);
+       mutex_unlock(&chip->reg_lock);
+
+       return ret;
+}
+
+static void mv88e6xxx_g2_watchdog_free(struct mv88e6xxx_chip *chip)
+{
+       mutex_lock(&chip->reg_lock);
+       if (chip->info->ops->watchdog_ops->irq_free)
+               chip->info->ops->watchdog_ops->irq_free(chip);
+       mutex_unlock(&chip->reg_lock);
+
+       free_irq(chip->watchdog_irq, chip);
+       irq_dispose_mapping(chip->watchdog_irq);
+}
+
+static int mv88e6xxx_g2_watchdog_setup(struct mv88e6xxx_chip *chip)
+{
+       int err;
+
+       chip->watchdog_irq = irq_find_mapping(chip->g2_irq.domain,
+                                             GLOBAL2_INT_SOURCE_WATCHDOG);
+       if (chip->watchdog_irq < 0)
+               return chip->watchdog_irq;
+
+       err = request_threaded_irq(chip->watchdog_irq, NULL,
+                                  mv88e6xxx_g2_watchdog_thread_fn,
+                                  IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+                                  "mv88e6xxx-watchdog", chip);
+       if (err)
+               return err;
+
+       mutex_lock(&chip->reg_lock);
+       if (chip->info->ops->watchdog_ops->irq_setup)
+               err = chip->info->ops->watchdog_ops->irq_setup(chip);
+       mutex_unlock(&chip->reg_lock);
+
+       return err;
+}
+
 static void mv88e6xxx_g2_irq_mask(struct irq_data *d)
 {
        struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
@@ -532,6 +870,8 @@ void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip)
 {
        int irq, virq;
 
+       mv88e6xxx_g2_watchdog_free(chip);
+
        free_irq(chip->device_irq, chip);
        irq_dispose_mapping(chip->device_irq);
 
@@ -574,7 +914,7 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
        if (err)
                goto out;
 
-       return 0;
+       return mv88e6xxx_g2_watchdog_setup(chip);
 
 out:
        for (irq = 0; irq < 16; irq++) {
index 9aefb7d8b0ad78dbec865593e0842b91ffc2062b..a8b2f9486a4abad1227030f57ce00b6d8cbb1fbb 100644 (file)
@@ -23,20 +23,32 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
        return 0;
 }
 
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg,
-                             u16 *val);
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg,
-                              u16 val);
+int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
+                             struct mii_bus *bus,
+                             int addr, int reg, u16 *val);
+int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
+                              struct mii_bus *bus,
+                              int addr, int reg, u16 val);
 int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
+
+int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
+                            struct ethtool_eeprom *eeprom, u8 *data);
+int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip,
+                            struct ethtool_eeprom *eeprom, u8 *data);
+
 int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
                              struct ethtool_eeprom *eeprom, u8 *data);
 int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
                              struct ethtool_eeprom *eeprom, u8 *data);
+
 int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip);
 int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip);
 void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip);
 int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
 
+extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
+extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
+
 #else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
 
 static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
@@ -50,12 +62,14 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
 }
 
 static inline int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
+                                           struct mii_bus *bus,
                                            int addr, int reg, u16 *val)
 {
        return -EOPNOTSUPP;
 }
 
 static inline int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
+                                            struct mii_bus *bus,
                                             int addr, int reg, u16 val)
 {
        return -EOPNOTSUPP;
@@ -67,6 +81,20 @@ static inline int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip,
        return -EOPNOTSUPP;
 }
 
+static inline int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
+                                          struct ethtool_eeprom *eeprom,
+                                          u8 *data)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip,
+                                          struct ethtool_eeprom *eeprom,
+                                          u8 *data)
+{
+       return -EOPNOTSUPP;
+}
+
 static inline int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
                                            struct ethtool_eeprom *eeprom,
                                            u8 *data)
@@ -100,6 +128,9 @@ static inline int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
        return -EOPNOTSUPP;
 }
 
+static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {};
+static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {};
+
 #endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
 
 #endif /* _MV88E6XXX_GLOBAL2_H */
index af54baea47cff5614743dd28862542d297c32dc8..6033f2f6260a464418fcc981e5430e155e836bc3 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/if_vlan.h>
 #include <linux/irq.h>
 #include <linux/gpio/consumer.h>
+#include <linux/phy.h>
 
 #ifndef UINT64_MAX
 #define UINT64_MAX             (u64)(~((u64)0))
@@ -58,6 +59,9 @@
 #define PORT_STATUS_CMODE_100BASE_X    0x8
 #define PORT_STATUS_CMODE_1000BASE_X   0x9
 #define PORT_STATUS_CMODE_SGMII                0xa
+#define PORT_STATUS_CMODE_2500BASEX    0xb
+#define PORT_STATUS_CMODE_XAUI         0xc
+#define PORT_STATUS_CMODE_RXAUI                0xd
 #define PORT_PCS_CTRL          0x01
 #define PORT_PCS_CTRL_RGMII_DELAY_RXCLK        BIT(15)
 #define PORT_PCS_CTRL_RGMII_DELAY_TXCLK        BIT(14)
@@ -87,6 +91,7 @@
 #define PORT_SWITCH_ID_PROD_NUM_6131   0x106
 #define PORT_SWITCH_ID_PROD_NUM_6320   0x115
 #define PORT_SWITCH_ID_PROD_NUM_6123   0x121
+#define PORT_SWITCH_ID_PROD_NUM_6141   0x340
 #define PORT_SWITCH_ID_PROD_NUM_6161   0x161
 #define PORT_SWITCH_ID_PROD_NUM_6165   0x165
 #define PORT_SWITCH_ID_PROD_NUM_6171   0x171
 #define PORT_SWITCH_ID_PROD_NUM_6240   0x240
 #define PORT_SWITCH_ID_PROD_NUM_6290   0x290
 #define PORT_SWITCH_ID_PROD_NUM_6321   0x310
+#define PORT_SWITCH_ID_PROD_NUM_6341   0x341
 #define PORT_SWITCH_ID_PROD_NUM_6352   0x352
 #define PORT_SWITCH_ID_PROD_NUM_6350   0x371
 #define PORT_SWITCH_ID_PROD_NUM_6351   0x375
 #define PORT_CONTROL_2_FORWARD_UNKNOWN BIT(6)
 #define PORT_CONTROL_2_EGRESS_MONITOR  BIT(5)
 #define PORT_CONTROL_2_INGRESS_MONITOR BIT(4)
+#define PORT_CONTROL_2_UPSTREAM_MASK   0x0f
 #define PORT_RATE_CONTROL      0x09
 #define PORT_RATE_CONTROL_2    0x0a
 #define PORT_ASSOC_VECTOR      0x0b
 #define GLOBAL_STATS_COUNTER_01        0x1f
 
 #define GLOBAL2_INT_SOURCE     0x00
+#define GLOBAL2_INT_SOURCE_WATCHDOG    15
 #define GLOBAL2_INT_MASK       0x01
 #define GLOBAL2_MGMT_EN_2X     0x02
 #define GLOBAL2_MGMT_EN_0X     0x03
 #define GLOBAL2_EEPROM_CMD_WRITE_EN    BIT(10)
 #define GLOBAL2_EEPROM_CMD_ADDR_MASK   0xff
 #define GLOBAL2_EEPROM_DATA    0x15
+#define GLOBAL2_EEPROM_ADDR    0x15 /* 6390, 6341 */
 #define GLOBAL2_PTP_AVB_OP     0x16
 #define GLOBAL2_PTP_AVB_DATA   0x17
 #define GLOBAL2_SMI_PHY_CMD                    0x18
 #define GLOBAL2_SMI_PHY_CMD_BUSY               BIT(15)
+#define GLOBAL2_SMI_PHY_CMD_EXTERNAL           BIT(13)
 #define GLOBAL2_SMI_PHY_CMD_MODE_22            BIT(12)
 #define GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA   ((0x1 << 10) | \
                                                 GLOBAL2_SMI_PHY_CMD_MODE_22 | \
 #define GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA    ((0x2 << 10) | \
                                                 GLOBAL2_SMI_PHY_CMD_MODE_22 | \
                                                 GLOBAL2_SMI_PHY_CMD_BUSY)
+#define GLOBAL2_SMI_PHY_CMD_OP_45_WRITE_ADDR   ((0x0 << 10) | \
+                                                GLOBAL2_SMI_PHY_CMD_BUSY)
+#define GLOBAL2_SMI_PHY_CMD_OP_45_WRITE_DATA   ((0x1 << 10) | \
+                                                GLOBAL2_SMI_PHY_CMD_BUSY)
+#define GLOBAL2_SMI_PHY_CMD_OP_45_READ_DATA    ((0x3 << 10) | \
+                                                GLOBAL2_SMI_PHY_CMD_BUSY)
+
 #define GLOBAL2_SMI_PHY_DATA                   0x19
 #define GLOBAL2_SCRATCH_MISC   0x1a
 #define GLOBAL2_SCRATCH_BUSY           BIT(15)
 #define GLOBAL2_SCRATCH_REGISTER_SHIFT 8
 #define GLOBAL2_SCRATCH_VALUE_MASK     0xff
 #define GLOBAL2_WDOG_CONTROL   0x1b
+#define GLOBAL2_WDOG_CONTROL_EGRESS_EVENT      BIT(7)
+#define GLOBAL2_WDOG_CONTROL_RMU_TIMEOUT       BIT(6)
+#define GLOBAL2_WDOG_CONTROL_QC_ENABLE         BIT(5)
+#define GLOBAL2_WDOG_CONTROL_EGRESS_HISTORY    BIT(4)
+#define GLOBAL2_WDOG_CONTROL_EGRESS_ENABLE     BIT(3)
+#define GLOBAL2_WDOG_CONTROL_FORCE_IRQ         BIT(2)
+#define GLOBAL2_WDOG_CONTROL_HISTORY           BIT(1)
+#define GLOBAL2_WDOG_CONTROL_SWRESET           BIT(0)
+#define GLOBAL2_WDOG_UPDATE                    BIT(15)
+#define GLOBAL2_WDOG_INT_SOURCE                        (0x00 << 8)
+#define GLOBAL2_WDOG_INT_STATUS                        (0x10 << 8)
+#define GLOBAL2_WDOG_INT_ENABLE                        (0x11 << 8)
+#define GLOBAL2_WDOG_EVENT                     (0x12 << 8)
+#define GLOBAL2_WDOG_HISTORY                   (0x13 << 8)
+#define GLOBAL2_WDOG_DATA_MASK                 0xff
+#define GLOBAL2_WDOG_CUT_THROUGH               BIT(3)
+#define GLOBAL2_WDOG_QUEUE_CONTROLLER          BIT(2)
+#define GLOBAL2_WDOG_EGRESS                    BIT(1)
+#define GLOBAL2_WDOG_FORCE_IRQ                 BIT(0)
 #define GLOBAL2_QOS_WEIGHT     0x1c
 #define GLOBAL2_MISC           0x1d
 
@@ -418,6 +454,7 @@ enum mv88e6xxx_model {
        MV88E6097,
        MV88E6123,
        MV88E6131,
+       MV88E6141,
        MV88E6161,
        MV88E6165,
        MV88E6171,
@@ -432,6 +469,7 @@ enum mv88e6xxx_model {
        MV88E6290,
        MV88E6320,
        MV88E6321,
+       MV88E6341,
        MV88E6350,
        MV88E6351,
        MV88E6352,
@@ -447,6 +485,7 @@ enum mv88e6xxx_family {
        MV88E6XXX_FAMILY_6165,  /* 6123 6161 6165 */
        MV88E6XXX_FAMILY_6185,  /* 6108 6121 6122 6131 6152 6155 6182 6185 */
        MV88E6XXX_FAMILY_6320,  /* 6320 6321 */
+       MV88E6XXX_FAMILY_6341,  /* 6141 6341 */
        MV88E6XXX_FAMILY_6351,  /* 6171 6175 6350 6351 */
        MV88E6XXX_FAMILY_6352,  /* 6172 6176 6240 6352 */
        MV88E6XXX_FAMILY_6390,  /* 6190 6190X 6191 6290 6390 6390X */
@@ -496,12 +535,6 @@ enum mv88e6xxx_cap {
         */
        MV88E6XXX_CAP_STU,
 
-       /* Internal temperature sensor.
-        * Available from any enabled port's PHY register 26, page 6.
-        */
-       MV88E6XXX_CAP_TEMP,
-       MV88E6XXX_CAP_TEMP_LIMIT,
-
        /* VLAN Table Unit.
         * The VTU is used to program 802.1Q VLANs. See GLOBAL_VTU_OP.
         */
@@ -532,8 +565,6 @@ enum mv88e6xxx_cap {
 #define MV88E6XXX_FLAG_G2_POT          BIT_ULL(MV88E6XXX_CAP_G2_POT)
 
 #define MV88E6XXX_FLAG_STU             BIT_ULL(MV88E6XXX_CAP_STU)
-#define MV88E6XXX_FLAG_TEMP            BIT_ULL(MV88E6XXX_CAP_TEMP)
-#define MV88E6XXX_FLAG_TEMP_LIMIT      BIT_ULL(MV88E6XXX_CAP_TEMP_LIMIT)
 #define MV88E6XXX_FLAG_VTU             BIT_ULL(MV88E6XXX_CAP_VTU)
 
 /* Ingress Rate Limit unit */
@@ -566,6 +597,7 @@ enum mv88e6xxx_cap {
        (MV88E6XXX_FLAG_G1_ATU_FID |    \
         MV88E6XXX_FLAG_G1_VTU_FID |    \
         MV88E6XXX_FLAG_GLOBAL2 |       \
+        MV88E6XXX_FLAG_G2_INT |        \
         MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
         MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
         MV88E6XXX_FLAG_G2_POT |        \
@@ -584,7 +616,6 @@ enum mv88e6xxx_cap {
         MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
         MV88E6XXX_FLAG_G2_POT |        \
         MV88E6XXX_FLAG_STU |           \
-        MV88E6XXX_FLAG_TEMP |          \
         MV88E6XXX_FLAG_VTU |           \
         MV88E6XXX_FLAGS_IRL |          \
         MV88E6XXX_FLAGS_MULTI_CHIP |   \
@@ -603,13 +634,25 @@ enum mv88e6xxx_cap {
         MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
         MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
         MV88E6XXX_FLAG_G2_POT |        \
-        MV88E6XXX_FLAG_TEMP |          \
-        MV88E6XXX_FLAG_TEMP_LIMIT |    \
         MV88E6XXX_FLAG_VTU |           \
         MV88E6XXX_FLAGS_IRL |          \
         MV88E6XXX_FLAGS_MULTI_CHIP |   \
         MV88E6XXX_FLAGS_PVT)
 
+#define MV88E6XXX_FLAGS_FAMILY_6341    \
+       (MV88E6XXX_FLAG_EEE |           \
+        MV88E6XXX_FLAG_G1_ATU_FID |    \
+        MV88E6XXX_FLAG_G1_VTU_FID |    \
+        MV88E6XXX_FLAG_GLOBAL2 |       \
+        MV88E6XXX_FLAG_G2_INT |        \
+        MV88E6XXX_FLAG_G2_POT |        \
+        MV88E6XXX_FLAG_STU |           \
+        MV88E6XXX_FLAG_VTU |           \
+        MV88E6XXX_FLAGS_IRL |          \
+        MV88E6XXX_FLAGS_MULTI_CHIP |   \
+        MV88E6XXX_FLAGS_PVT |          \
+        MV88E6XXX_FLAGS_SERDES)
+
 #define MV88E6XXX_FLAGS_FAMILY_6351    \
        (MV88E6XXX_FLAG_G1_ATU_FID |    \
         MV88E6XXX_FLAG_G1_VTU_FID |    \
@@ -619,7 +662,6 @@ enum mv88e6xxx_cap {
         MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
         MV88E6XXX_FLAG_G2_POT |        \
         MV88E6XXX_FLAG_STU |           \
-        MV88E6XXX_FLAG_TEMP |          \
         MV88E6XXX_FLAG_VTU |           \
         MV88E6XXX_FLAGS_IRL |          \
         MV88E6XXX_FLAGS_MULTI_CHIP |   \
@@ -635,27 +677,24 @@ enum mv88e6xxx_cap {
         MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
         MV88E6XXX_FLAG_G2_POT |        \
         MV88E6XXX_FLAG_STU |           \
-        MV88E6XXX_FLAG_TEMP |          \
-        MV88E6XXX_FLAG_TEMP_LIMIT |    \
         MV88E6XXX_FLAG_VTU |           \
         MV88E6XXX_FLAGS_IRL |          \
         MV88E6XXX_FLAGS_MULTI_CHIP |   \
         MV88E6XXX_FLAGS_PVT |          \
         MV88E6XXX_FLAGS_SERDES)
 
-struct mv88e6xxx_ops;
-
 #define MV88E6XXX_FLAGS_FAMILY_6390    \
        (MV88E6XXX_FLAG_EEE |           \
         MV88E6XXX_FLAG_GLOBAL2 |       \
+        MV88E6XXX_FLAG_G2_INT |        \
         MV88E6XXX_FLAG_STU |           \
-        MV88E6XXX_FLAG_TEMP |          \
-        MV88E6XXX_FLAG_TEMP_LIMIT |    \
         MV88E6XXX_FLAG_VTU |           \
         MV88E6XXX_FLAGS_IRL |          \
         MV88E6XXX_FLAGS_MULTI_CHIP |   \
         MV88E6XXX_FLAGS_PVT)
 
+struct mv88e6xxx_ops;
+
 struct mv88e6xxx_info {
        enum mv88e6xxx_family family;
        u16 prod_num;
@@ -688,10 +727,7 @@ struct mv88e6xxx_vtu_entry {
 };
 
 struct mv88e6xxx_bus_ops;
-
-struct mv88e6xxx_priv_port {
-       struct net_device *bridge_dev;
-};
+struct mv88e6xxx_irq_ops;
 
 struct mv88e6xxx_irq {
        u16 masked;
@@ -733,8 +769,6 @@ struct mv88e6xxx_chip {
         */
        struct mutex    stats_mutex;
 
-       struct mv88e6xxx_priv_port      ports[DSA_MAX_PORTS];
-
        /* A switch may have a GPIO line tied to its reset pin. Parse
         * this from the device tree, and use it before performing
         * switch soft reset.
@@ -744,11 +778,8 @@ struct mv88e6xxx_chip {
        /* set to size of eeprom if supported by the switch */
        int             eeprom_len;
 
-       /* Device node for the MDIO bus */
-       struct device_node *mdio_np;
-
-       /* And the MDIO bus itself */
-       struct mii_bus *mdio_bus;
+       /* List of mdio busses */
+       struct list_head mdios;
 
        /* There can be two interrupt controllers, which are chained
         * off a GPIO as interrupt source
@@ -757,6 +788,7 @@ struct mv88e6xxx_chip {
        struct mv88e6xxx_irq g2_irq;
        int irq;
        int device_irq;
+       int watchdog_irq;
 };
 
 struct mv88e6xxx_bus_ops {
@@ -764,6 +796,13 @@ struct mv88e6xxx_bus_ops {
        int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
 };
 
+struct mv88e6xxx_mdio_bus {
+       struct mii_bus *bus;
+       struct mv88e6xxx_chip *chip;
+       struct list_head list;
+       bool external;
+};
+
 struct mv88e6xxx_ops {
        int (*get_eeprom)(struct mv88e6xxx_chip *chip,
                          struct ethtool_eeprom *eeprom, u8 *data);
@@ -772,10 +811,12 @@ struct mv88e6xxx_ops {
 
        int (*set_switch_mac)(struct mv88e6xxx_chip *chip, u8 *addr);
 
-       int (*phy_read)(struct mv88e6xxx_chip *chip, int addr, int reg,
-                       u16 *val);
-       int (*phy_write)(struct mv88e6xxx_chip *chip, int addr, int reg,
-                        u16 val);
+       int (*phy_read)(struct mv88e6xxx_chip *chip,
+                       struct mii_bus *bus,
+                       int addr, int reg, u16 *val);
+       int (*phy_write)(struct mv88e6xxx_chip *chip,
+                        struct mii_bus *bus,
+                        int addr, int reg, u16 val);
 
        /* PHY Polling Unit (PPU) operations */
        int (*ppu_enable)(struct mv88e6xxx_chip *chip);
@@ -832,6 +873,18 @@ struct mv88e6xxx_ops {
        int (*port_egress_rate_limiting)(struct mv88e6xxx_chip *chip, int port);
        int (*port_pause_config)(struct mv88e6xxx_chip *chip, int port);
 
+       /* CMODE control what PHY mode the MAC will use, eg. SGMII, RGMII, etc.
+        * Some chips allow this to be configured on specific ports.
+        */
+       int (*port_set_cmode)(struct mv88e6xxx_chip *chip, int port,
+                             phy_interface_t mode);
+
+       /* Some devices have a per port register indicating what is
+        * the upstream port this port should forward to.
+        */
+       int (*port_set_upstream_port)(struct mv88e6xxx_chip *chip, int port,
+                                     int upstream_port);
+
        /* Snapshot the statistics for a port. The statistics can then
         * be read back a leisure but still with a consistent view.
         */
@@ -849,11 +902,21 @@ struct mv88e6xxx_ops {
                                uint64_t *data);
        int (*g1_set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
        int (*g1_set_egress_port)(struct mv88e6xxx_chip *chip, int port);
+       const struct mv88e6xxx_irq_ops *watchdog_ops;
 
        /* Can be either in g1 or g2, so don't use a prefix */
        int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip);
 };
 
+struct mv88e6xxx_irq_ops {
+       /* Action to be performed when the interrupt happens */
+       int (*irq_action)(struct mv88e6xxx_chip *chip, int irq);
+       /* Setup the hardware to generate the interrupt */
+       int (*irq_setup)(struct mv88e6xxx_chip *chip);
+       /* Reset the hardware to stop generating the interrupt */
+       void (*irq_free)(struct mv88e6xxx_chip *chip);
+};
+
 #define STATS_TYPE_PORT                BIT(0)
 #define STATS_TYPE_BANK0       BIT(1)
 #define STATS_TYPE_BANK1       BIT(2)
index 0db7fa0373ae29ae03607c79a83dd4e40e582e23..8875784c4718feee699355f91c2092622417cce1 100644 (file)
@@ -11,6 +11,7 @@
  * (at your option) any later version.
  */
 
+#include <linux/phy.h>
 #include "mv88e6xxx.h"
 #include "port.h"
 
@@ -193,7 +194,7 @@ static int mv88e6xxx_port_set_speed(struct mv88e6xxx_chip *chip, int port,
                ctrl = PORT_PCS_CTRL_SPEED_1000;
                break;
        case 2500:
-               ctrl = PORT_PCS_CTRL_SPEED_1000 | PORT_PCS_CTRL_ALTSPEED;
+               ctrl = PORT_PCS_CTRL_SPEED_10000 | PORT_PCS_CTRL_ALTSPEED;
                break;
        case 10000:
                /* all bits set, fall through... */
@@ -304,6 +305,69 @@ int mv88e6390x_port_set_speed(struct mv88e6xxx_chip *chip, int port, int speed)
        return mv88e6xxx_port_set_speed(chip, port, speed, true, true);
 }
 
+int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+                             phy_interface_t mode)
+{
+       u16 reg;
+       u16 cmode;
+       int err;
+
+       if (mode == PHY_INTERFACE_MODE_NA)
+               return 0;
+
+       if (port != 9 && port != 10)
+               return -EOPNOTSUPP;
+
+       switch (mode) {
+       case PHY_INTERFACE_MODE_1000BASEX:
+               cmode = PORT_STATUS_CMODE_1000BASE_X;
+               break;
+       case PHY_INTERFACE_MODE_SGMII:
+               cmode = PORT_STATUS_CMODE_SGMII;
+               break;
+       case PHY_INTERFACE_MODE_2500BASEX:
+               cmode = PORT_STATUS_CMODE_2500BASEX;
+               break;
+       case PHY_INTERFACE_MODE_XGMII:
+               cmode = PORT_STATUS_CMODE_XAUI;
+               break;
+       case PHY_INTERFACE_MODE_RXAUI:
+               cmode = PORT_STATUS_CMODE_RXAUI;
+               break;
+       default:
+               cmode = 0;
+       }
+
+       if (cmode) {
+               err = mv88e6xxx_port_read(chip, port, PORT_STATUS, &reg);
+               if (err)
+                       return err;
+
+               reg &= ~PORT_STATUS_CMODE_MASK;
+               reg |= cmode;
+
+               err = mv88e6xxx_port_write(chip, port, PORT_STATUS, reg);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
+{
+       int err;
+       u16 reg;
+
+       err = mv88e6xxx_port_read(chip, port, PORT_STATUS, &reg);
+       if (err)
+               return err;
+
+       *cmode = reg & PORT_STATUS_CMODE_MASK;
+
+       return 0;
+}
+
 /* Offset 0x02: Pause Control
  *
  * Do not limit the period of time that this port can be paused for by
@@ -608,6 +672,40 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = {
        [PORT_CONTROL_2_8021Q_SECURE] = "Secure",
 };
 
+int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
+                                      bool on)
+{
+       int err;
+       u16 reg;
+
+       err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, &reg);
+       if (err)
+               return err;
+
+       if (on)
+               reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
+       else
+               reg &= ~PORT_CONTROL_2_FORWARD_UNKNOWN;
+
+       return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+}
+
+int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
+                                    int upstream_port)
+{
+       int err;
+       u16 reg;
+
+       err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, &reg);
+       if (err)
+               return err;
+
+       reg &= ~PORT_CONTROL_2_UPSTREAM_MASK;
+       reg |= upstream_port;
+
+       return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+}
+
 int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
                                  u16 mode)
 {
@@ -631,6 +729,20 @@ int mv88e6xxx_port_set_8021q_mode(struct mv88e6xxx_chip *chip, int port,
        return 0;
 }
 
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port)
+{
+       u16 reg;
+       int err;
+
+       err = mv88e6xxx_port_read(chip, port, PORT_CONTROL_2, &reg);
+       if (err)
+               return err;
+
+       reg |= PORT_CONTROL_2_MAP_DA;
+
+       return mv88e6xxx_port_write(chip, port, PORT_CONTROL_2, reg);
+}
+
 int mv88e6165_port_jumbo_config(struct mv88e6xxx_chip *chip, int port)
 {
        u16 reg;
index 7b3bacaacbfe128fe7b6269fe5336f457358186e..c83cbb3f449182317a21c9fb894a114d99a08ebb 100644 (file)
@@ -58,6 +58,8 @@ int mv88e6351_port_set_frame_mode(struct mv88e6xxx_chip *chip, int port,
                                  enum mv88e6xxx_frame_mode mode);
 int mv88e6085_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
                                       bool on);
+int mv88e6095_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
+                                      bool on);
 int mv88e6351_port_set_egress_unknowns(struct mv88e6xxx_chip *chip, int port,
                                       bool on);
 int mv88e6351_port_set_ether_type(struct mv88e6xxx_chip *chip, int port,
@@ -67,5 +69,10 @@ int mv88e6095_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
 int mv88e6097_port_egress_rate_limiting(struct mv88e6xxx_chip *chip, int port);
 int mv88e6097_port_pause_config(struct mv88e6xxx_chip *chip, int port);
 int mv88e6390_port_pause_config(struct mv88e6xxx_chip *chip, int port);
-
+int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+                             phy_interface_t mode);
+int mv88e6xxx_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
+int mv88e6xxx_port_set_map_da(struct mv88e6xxx_chip *chip, int port);
+int mv88e6095_port_set_upstream_port(struct mv88e6xxx_chip *chip, int port,
+                                    int upstream_port);
 #endif /* _MV88E6XXX_PORT_H */
index b3df70d07ff632f85517968716248f751ce59836..a4fd4ccf7b67794e22fc3259d96cc2f9aa6b8723 100644 (file)
@@ -746,17 +746,14 @@ qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
 }
 
 static int
-qca8k_port_bridge_join(struct dsa_switch *ds, int port,
-                      struct net_device *bridge)
+qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
 {
        struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
        int port_mask = BIT(QCA8K_CPU_PORT);
        int i;
 
-       priv->port_sts[port].bridge_dev = bridge;
-
        for (i = 1; i < QCA8K_NUM_PORTS; i++) {
-               if (priv->port_sts[i].bridge_dev != bridge)
+               if (ds->ports[i].bridge_dev != br)
                        continue;
                /* Add this port to the portvlan mask of the other ports
                 * in the bridge
@@ -775,14 +772,13 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port,
 }
 
 static void
-qca8k_port_bridge_leave(struct dsa_switch *ds, int port)
+qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
 {
        struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
        int i;
 
        for (i = 1; i < QCA8K_NUM_PORTS; i++) {
-               if (priv->port_sts[i].bridge_dev !=
-                   priv->port_sts[port].bridge_dev)
+               if (ds->ports[i].bridge_dev != br)
                        continue;
                /* Remove this port to the portvlan mask of the other ports
                 * in the bridge
@@ -791,7 +787,7 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port)
                                QCA8K_PORT_LOOKUP_CTRL(i),
                                BIT(port));
        }
-       priv->port_sts[port].bridge_dev = NULL;
+
        /* Set the cpu port to be the only one in the portvlan mask of
         * this port
         */
@@ -911,7 +907,7 @@ qca8k_get_tag_protocol(struct dsa_switch *ds)
        return DSA_TAG_PROTO_QCA;
 }
 
-static struct dsa_switch_ops qca8k_switch_ops = {
+static const struct dsa_switch_ops qca8k_switch_ops = {
        .get_tag_protocol       = qca8k_get_tag_protocol,
        .setup                  = qca8k_setup,
        .get_strings            = qca8k_get_strings,
@@ -954,17 +950,16 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
        if (id != QCA8K_ID_QCA8337)
                return -ENODEV;
 
-       priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
+       priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
        if (!priv->ds)
                return -ENOMEM;
 
        priv->ds->priv = priv;
-       priv->ds->dev = &mdiodev->dev;
        priv->ds->ops = &qca8k_switch_ops;
        mutex_init(&priv->reg_mutex);
        dev_set_drvdata(&mdiodev->dev, priv);
 
-       return dsa_register_switch(priv->ds, priv->ds->dev->of_node);
+       return dsa_register_switch(priv->ds, &mdiodev->dev);
 }
 
 static void
index 2014647195316977b57bf4c495149694a7afb392..1ed4fac6cd6d5bc3cf05fa8cb03b513f178aa07e 100644 (file)
@@ -157,7 +157,6 @@ enum qca8k_fdb_cmd {
 
 struct ar8xxx_port_status {
        struct ethtool_eee eee;
-       struct net_device *bridge_dev;
        int enabled;
 };
 
index 6421835f11b7eef6de593a07210a144c1e4c3ce4..2c80611b94aef3c5ce9d0f98e8d92e497542123a 100644 (file)
 #define DRV_NAME       "dummy"
 #define DRV_VERSION    "1.0"
 
+#undef pr_fmt
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
 static int numdummies = 1;
+static int num_vfs;
+
+struct vf_data_storage {
+       u8      vf_mac[ETH_ALEN];
+       u16     pf_vlan; /* When set, guest VLAN config not allowed. */
+       u16     pf_qos;
+       __be16  vlan_proto;
+       u16     min_tx_rate;
+       u16     max_tx_rate;
+       u8      spoofchk_enabled;
+       bool    rss_query_enabled;
+       u8      trusted;
+       int     link_state;
+};
+
+struct dummy_priv {
+       struct vf_data_storage  *vfinfo;
+};
+
+static int dummy_num_vf(struct device *dev)
+{
+       return num_vfs;
+}
+
+static struct bus_type dummy_bus = {
+       .name   = "dummy",
+       .num_vf = dummy_num_vf,
+};
+
+static void release_dummy_parent(struct device *dev)
+{
+}
+
+static struct device dummy_parent = {
+       .init_name      = "dummy",
+       .bus            = &dummy_bus,
+       .release        = release_dummy_parent,
+};
 
 /* fake multicast ability */
 static void set_multicast_list(struct net_device *dev)
@@ -54,8 +95,8 @@ struct pcpu_dstats {
        struct u64_stats_sync   syncp;
 };
 
-static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev,
-                                                  struct rtnl_link_stats64 *stats)
+static void dummy_get_stats64(struct net_device *dev,
+                             struct rtnl_link_stats64 *stats)
 {
        int i;
 
@@ -73,7 +114,6 @@ static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev,
                stats->tx_bytes += tbytes;
                stats->tx_packets += tpackets;
        }
-       return stats;
 }
 
 static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -91,10 +131,25 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
 
 static int dummy_dev_init(struct net_device *dev)
 {
+       struct dummy_priv *priv = netdev_priv(dev);
+
        dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
        if (!dev->dstats)
                return -ENOMEM;
 
+       priv->vfinfo = NULL;
+
+       if (!num_vfs)
+               return 0;
+
+       dev->dev.parent = &dummy_parent;
+       priv->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
+                              GFP_KERNEL);
+       if (!priv->vfinfo) {
+               free_percpu(dev->dstats);
+               return -ENOMEM;
+       }
+
        return 0;
 }
 
@@ -112,6 +167,117 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
        return 0;
 }
 
+static int dummy_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+       struct dummy_priv *priv = netdev_priv(dev);
+
+       if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+               return -EINVAL;
+
+       memcpy(priv->vfinfo[vf].vf_mac, mac, ETH_ALEN);
+
+       return 0;
+}
+
+static int dummy_set_vf_vlan(struct net_device *dev, int vf,
+                            u16 vlan, u8 qos, __be16 vlan_proto)
+{
+       struct dummy_priv *priv = netdev_priv(dev);
+
+       if ((vf >= num_vfs) || (vlan > 4095) || (qos > 7))
+               return -EINVAL;
+
+       priv->vfinfo[vf].pf_vlan = vlan;
+       priv->vfinfo[vf].pf_qos = qos;
+       priv->vfinfo[vf].vlan_proto = vlan_proto;
+
+       return 0;
+}
+
+static int dummy_set_vf_rate(struct net_device *dev, int vf, int min, int max)
+{
+       struct dummy_priv *priv = netdev_priv(dev);
+
+       if (vf >= num_vfs)
+               return -EINVAL;
+
+       priv->vfinfo[vf].min_tx_rate = min;
+       priv->vfinfo[vf].max_tx_rate = max;
+
+       return 0;
+}
+
+static int dummy_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
+{
+       struct dummy_priv *priv = netdev_priv(dev);
+
+       if (vf >= num_vfs)
+               return -EINVAL;
+
+       priv->vfinfo[vf].spoofchk_enabled = val;
+
+       return 0;
+}
+
+static int dummy_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
+{
+       struct dummy_priv *priv = netdev_priv(dev);
+
+       if (vf >= num_vfs)
+               return -EINVAL;
+
+       priv->vfinfo[vf].rss_query_enabled = val;
+
+       return 0;
+}
+
+static int dummy_set_vf_trust(struct net_device *dev, int vf, bool val)
+{
+       struct dummy_priv *priv = netdev_priv(dev);
+
+       if (vf >= num_vfs)
+               return -EINVAL;
+
+       priv->vfinfo[vf].trusted = val;
+
+       return 0;
+}
+
+static int dummy_get_vf_config(struct net_device *dev,
+                              int vf, struct ifla_vf_info *ivi)
+{
+       struct dummy_priv *priv = netdev_priv(dev);
+
+       if (vf >= num_vfs)
+               return -EINVAL;
+
+       ivi->vf = vf;
+       memcpy(&ivi->mac, priv->vfinfo[vf].vf_mac, ETH_ALEN);
+       ivi->vlan = priv->vfinfo[vf].pf_vlan;
+       ivi->qos = priv->vfinfo[vf].pf_qos;
+       ivi->spoofchk = priv->vfinfo[vf].spoofchk_enabled;
+       ivi->linkstate = priv->vfinfo[vf].link_state;
+       ivi->min_tx_rate = priv->vfinfo[vf].min_tx_rate;
+       ivi->max_tx_rate = priv->vfinfo[vf].max_tx_rate;
+       ivi->rss_query_en = priv->vfinfo[vf].rss_query_enabled;
+       ivi->trusted = priv->vfinfo[vf].trusted;
+       ivi->vlan_proto = priv->vfinfo[vf].vlan_proto;
+
+       return 0;
+}
+
+static int dummy_set_vf_link_state(struct net_device *dev, int vf, int state)
+{
+       struct dummy_priv *priv = netdev_priv(dev);
+
+       if (vf >= num_vfs)
+               return -EINVAL;
+
+       priv->vfinfo[vf].link_state = state;
+
+       return 0;
+}
+
 static const struct net_device_ops dummy_netdev_ops = {
        .ndo_init               = dummy_dev_init,
        .ndo_uninit             = dummy_dev_uninit,
@@ -121,6 +287,14 @@ static const struct net_device_ops dummy_netdev_ops = {
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_get_stats64        = dummy_get_stats64,
        .ndo_change_carrier     = dummy_change_carrier,
+       .ndo_set_vf_mac         = dummy_set_vf_mac,
+       .ndo_set_vf_vlan        = dummy_set_vf_vlan,
+       .ndo_set_vf_rate        = dummy_set_vf_rate,
+       .ndo_set_vf_spoofchk    = dummy_set_vf_spoofchk,
+       .ndo_set_vf_trust       = dummy_set_vf_trust,
+       .ndo_get_vf_config      = dummy_get_vf_config,
+       .ndo_set_vf_link_state  = dummy_set_vf_link_state,
+       .ndo_set_vf_rss_query_en = dummy_set_vf_rss_query_en,
 };
 
 static void dummy_get_drvinfo(struct net_device *dev,
@@ -134,6 +308,14 @@ static const struct ethtool_ops dummy_ethtool_ops = {
        .get_drvinfo            = dummy_get_drvinfo,
 };
 
+static void dummy_free_netdev(struct net_device *dev)
+{
+       struct dummy_priv *priv = netdev_priv(dev);
+
+       kfree(priv->vfinfo);
+       free_netdev(dev);
+}
+
 static void dummy_setup(struct net_device *dev)
 {
        ether_setup(dev);
@@ -141,7 +323,7 @@ static void dummy_setup(struct net_device *dev)
        /* Initialize the device structure. */
        dev->netdev_ops = &dummy_netdev_ops;
        dev->ethtool_ops = &dummy_ethtool_ops;
-       dev->destructor = free_netdev;
+       dev->destructor = dummy_free_netdev;
 
        /* Fill in device structure with ethernet-generic values. */
        dev->flags |= IFF_NOARP;
@@ -172,6 +354,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
 
 static struct rtnl_link_ops dummy_link_ops __read_mostly = {
        .kind           = DRV_NAME,
+       .priv_size      = sizeof(struct dummy_priv),
        .setup          = dummy_setup,
        .validate       = dummy_validate,
 };
@@ -180,12 +363,16 @@ static struct rtnl_link_ops dummy_link_ops __read_mostly = {
 module_param(numdummies, int, 0);
 MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices");
 
+module_param(num_vfs, int, 0);
+MODULE_PARM_DESC(num_vfs, "Number of dummy VFs per dummy device");
+
 static int __init dummy_init_one(void)
 {
        struct net_device *dev_dummy;
        int err;
 
-       dev_dummy = alloc_netdev(0, "dummy%d", NET_NAME_UNKNOWN, dummy_setup);
+       dev_dummy = alloc_netdev(sizeof(struct dummy_priv),
+                                "dummy%d", NET_NAME_UNKNOWN, dummy_setup);
        if (!dev_dummy)
                return -ENOMEM;
 
@@ -204,6 +391,21 @@ static int __init dummy_init_module(void)
 {
        int i, err = 0;
 
+       if (num_vfs) {
+               err = bus_register(&dummy_bus);
+               if (err < 0) {
+                       pr_err("registering dummy bus failed\n");
+                       return err;
+               }
+
+               err = device_register(&dummy_parent);
+               if (err < 0) {
+                       pr_err("registering dummy parent device failed\n");
+                       bus_unregister(&dummy_bus);
+                       return err;
+               }
+       }
+
        rtnl_lock();
        err = __rtnl_link_register(&dummy_link_ops);
        if (err < 0)
@@ -219,12 +421,22 @@ static int __init dummy_init_module(void)
 out:
        rtnl_unlock();
 
+       if (err && num_vfs) {
+               device_unregister(&dummy_parent);
+               bus_unregister(&dummy_bus);
+       }
+
        return err;
 }
 
 static void __exit dummy_cleanup_module(void)
 {
        rtnl_link_unregister(&dummy_link_ops);
+
+       if (num_vfs) {
+               device_unregister(&dummy_parent);
+               bus_unregister(&dummy_bus);
+       }
 }
 
 module_init(dummy_init_module);
index 9fe3990319ecd771b103f62f79ffc27a5256cb80..084a6d58543a71b35e0aa184cf03a9f9e5af3044 100644 (file)
@@ -1753,7 +1753,7 @@ typhoon_poll(struct napi_struct *napi, int budget)
        }
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                iowrite32(TYPHOON_INTR_NONE,
                                tp->ioaddr + TYPHOON_REG_INTR_MASK);
                typhoon_post_pci_writes(tp->ioaddr);
@@ -2370,9 +2370,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
         * 4) Get the hardware address.
         * 5) Put the card to sleep.
         */
-       if (typhoon_reset(ioaddr, WaitSleep) < 0) {
+       err = typhoon_reset(ioaddr, WaitSleep);
+       if (err < 0) {
                err_msg = "could not reset 3XP";
-               err = -EIO;
                goto error_out_dma;
        }
 
@@ -2386,24 +2386,25 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        typhoon_init_interface(tp);
        typhoon_init_rings(tp);
 
-       if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
+       err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST);
+       if (err < 0) {
                err_msg = "cannot boot 3XP sleep image";
-               err = -EIO;
                goto error_out_reset;
        }
 
        INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
-       if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
+       err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp);
+       if (err < 0) {
                err_msg = "cannot read MAC address";
-               err = -EIO;
                goto error_out_reset;
        }
 
        *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
        *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
 
-       if(!is_valid_ether_addr(dev->dev_addr)) {
+       if (!is_valid_ether_addr(dev->dev_addr)) {
                err_msg = "Could not obtain valid ethernet address, aborting";
+               err = -EIO;
                goto error_out_reset;
        }
 
@@ -2411,7 +2412,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
         * later when we print out the version reported.
         */
        INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
-       if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
+       err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp);
+       if (err < 0) {
                err_msg = "Could not get Sleep Image version";
                goto error_out_reset;
        }
@@ -2428,9 +2430,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if(xp_resp[0].numDesc != 0)
                tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
 
-       if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
+       err = typhoon_sleep(tp, PCI_D3hot, 0);
+       if (err < 0) {
                err_msg = "cannot put adapter to sleep";
-               err = -EIO;
                goto error_out_reset;
        }
 
@@ -2453,7 +2455,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->features = dev->hw_features |
                NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
 
-       if(register_netdev(dev) < 0) {
+       err = register_netdev(dev);
+       if (err < 0) {
                err_msg = "unable to register netdev";
                goto error_out_reset;
        }
index e4c28fed61d50866c5a7955d8de6ee458eae7834..8c08f9deef9268e4cacc939a2534110a42be6c3b 100644 (file)
@@ -29,6 +29,7 @@ source "drivers/net/ethernet/amazon/Kconfig"
 source "drivers/net/ethernet/amd/Kconfig"
 source "drivers/net/ethernet/apm/Kconfig"
 source "drivers/net/ethernet/apple/Kconfig"
+source "drivers/net/ethernet/aquantia/Kconfig"
 source "drivers/net/ethernet/arc/Kconfig"
 source "drivers/net/ethernet/atheros/Kconfig"
 source "drivers/net/ethernet/aurora/Kconfig"
@@ -170,7 +171,6 @@ source "drivers/net/ethernet/sgi/Kconfig"
 source "drivers/net/ethernet/smsc/Kconfig"
 source "drivers/net/ethernet/stmicro/Kconfig"
 source "drivers/net/ethernet/sun/Kconfig"
-source "drivers/net/ethernet/synopsys/Kconfig"
 source "drivers/net/ethernet/tehuti/Kconfig"
 source "drivers/net/ethernet/ti/Kconfig"
 source "drivers/net/ethernet/tile/Kconfig"
index 24330f4885a92e278d3d339d0828300583e30ac7..26dce5bf2c18c966c5b378cf79b385aa726f9b4f 100644 (file)
@@ -15,6 +15,7 @@ obj-$(CONFIG_NET_VENDOR_AMAZON) += amazon/
 obj-$(CONFIG_NET_VENDOR_AMD) += amd/
 obj-$(CONFIG_NET_XGENE) += apm/
 obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
+obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
 obj-$(CONFIG_NET_VENDOR_ARC) += arc/
 obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
 obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
@@ -81,7 +82,6 @@ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
 obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
 obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
 obj-$(CONFIG_NET_VENDOR_SUN) += sun/
-obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
 obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
 obj-$(CONFIG_NET_VENDOR_TI) += ti/
 obj-$(CONFIG_TILE_NET) += tile/
index 88164529b52a2a44207b71b49c86297067b71718..a8173130373056b3a245b241f834312ded0f8fed 100644 (file)
@@ -1274,7 +1274,7 @@ static int bfin_mac_poll(struct napi_struct *napi, int budget)
        }
 
        if (i < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, i);
                if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags))
                        enable_irq(IRQ_MAC_RX);
        }
index 93def92f999703d234b44508725f45863ef39a18..9f7422ada704e9484b79d336ece46a0547667abb 100644 (file)
@@ -1008,7 +1008,7 @@ restart_txrx_poll:
                        spin_unlock_irqrestore(&greth->devlock, flags);
                        goto restart_txrx_poll;
                } else {
-                       __napi_complete(napi);
+                       napi_complete_done(napi, work_done);
                        spin_unlock_irqrestore(&greth->devlock, flags);
                }
        }
index 831bab352f8e6a99b0ee3ce519e8b7f7cbd466d0..87a11b9f0ea5beab9a97493e23ae901f6d390346 100644 (file)
@@ -3575,7 +3575,7 @@ static int et131x_poll(struct napi_struct *napi, int budget)
        et131x_handle_send_pkts(adapter);
 
        if (work_done < budget) {
-               napi_complete(&adapter->napi);
+               napi_complete_done(&adapter->napi, work_done);
                et131x_enable_interrupts(adapter);
        }
 
index b21d8aa8d6533352e1486440a83a55af325ab452..15a8096c60dfe51b9e5cf4929689be68b73e9745 100644 (file)
@@ -1471,8 +1471,8 @@ drop_skb:
        return NETDEV_TX_OK;
 }
 
-static struct rtnl_link_stats64 *slic_get_stats(struct net_device *dev,
-                                               struct rtnl_link_stats64 *lst)
+static void slic_get_stats(struct net_device *dev,
+                          struct rtnl_link_stats64 *lst)
 {
        struct slic_device *sdev = netdev_priv(dev);
        struct slic_stats *stats = &sdev->stats;
@@ -1489,8 +1489,6 @@ static struct rtnl_link_stats64 *slic_get_stats(struct net_device *dev,
        SLIC_GET_STATS_COUNTER(lst->rx_crc_errors, stats, rx_crc);
        SLIC_GET_STATS_COUNTER(lst->rx_fifo_errors, stats, rx_oflow802);
        SLIC_GET_STATS_COUNTER(lst->tx_carrier_errors, stats, tx_carrier);
-
-       return lst;
 }
 
 static int slic_get_sset_count(struct net_device *dev, int sset)
index 25864bff25eee4b1038fd7520499ac47ca3f45a0..527908c7e3845d57c2418c8ab17f3c445a094856 100644 (file)
@@ -513,7 +513,7 @@ static int tse_poll(struct napi_struct *napi, int budget)
 
        if (rxcomplete < budget) {
 
-               napi_complete(napi);
+               napi_complete_done(napi, rxcomplete);
 
                netdev_dbg(priv->dev,
                           "NAPI Complete, did %d packets with budget %d\n",
index a46e749bf2268776dc9db9405eb3cec5b83f06a0..5b6509d59716241505095f86d5b391f818ac9584 100644 (file)
@@ -631,22 +631,22 @@ enum ena_admin_flow_hash_proto {
 /* RSS flow hash fields */
 enum ena_admin_flow_hash_fields {
        /* Ethernet Dest Addr */
-       ENA_ADMIN_RSS_L2_DA     = 0,
+       ENA_ADMIN_RSS_L2_DA     = BIT(0),
 
        /* Ethernet Src Addr */
-       ENA_ADMIN_RSS_L2_SA     = 1,
+       ENA_ADMIN_RSS_L2_SA     = BIT(1),
 
        /* ipv4/6 Dest Addr */
-       ENA_ADMIN_RSS_L3_DA     = 2,
+       ENA_ADMIN_RSS_L3_DA     = BIT(2),
 
        /* ipv4/6 Src Addr */
-       ENA_ADMIN_RSS_L3_SA     = 5,
+       ENA_ADMIN_RSS_L3_SA     = BIT(3),
 
        /* tcp/udp Dest Port */
-       ENA_ADMIN_RSS_L4_DP     = 6,
+       ENA_ADMIN_RSS_L4_DP     = BIT(4),
 
        /* tcp/udp Src Port */
-       ENA_ADMIN_RSS_L4_SP     = 7,
+       ENA_ADMIN_RSS_L4_SP     = BIT(5),
 };
 
 struct ena_admin_proto_input {
@@ -873,6 +873,14 @@ struct ena_admin_aenq_link_change_desc {
        u32 flags;
 };
 
+struct ena_admin_aenq_keep_alive_desc {
+       struct ena_admin_aenq_common_desc aenq_common_desc;
+
+       u32 rx_drops_low;
+
+       u32 rx_drops_high;
+};
+
 struct ena_admin_ena_mmio_req_read_less_resp {
        u16 req_id;
 
index 3066d9c999841033d1f02b85480fb19f0cb112f6..08d11cede9c972596ee683c5d255fe143b76b9b8 100644 (file)
@@ -36,9 +36,9 @@
 /*****************************************************************************/
 
 /* Timeout in micro-sec */
-#define ADMIN_CMD_TIMEOUT_US (1000000)
+#define ADMIN_CMD_TIMEOUT_US (3000000)
 
-#define ENA_ASYNC_QUEUE_DEPTH 4
+#define ENA_ASYNC_QUEUE_DEPTH 16
 #define ENA_ADMIN_QUEUE_DEPTH 32
 
 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
@@ -784,7 +784,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
        int ret;
 
        if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
-               pr_info("Feature %d isn't supported\n", feature_id);
+               pr_debug("Feature %d isn't supported\n", feature_id);
                return -EPERM;
        }
 
@@ -1126,7 +1126,13 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
        comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
                                            comp, comp_size);
        if (unlikely(IS_ERR(comp_ctx))) {
-               pr_err("Failed to submit command [%ld]\n", PTR_ERR(comp_ctx));
+               if (comp_ctx == ERR_PTR(-ENODEV))
+                       pr_debug("Failed to submit command [%ld]\n",
+                                PTR_ERR(comp_ctx));
+               else
+                       pr_err("Failed to submit command [%ld]\n",
+                              PTR_ERR(comp_ctx));
+
                return PTR_ERR(comp_ctx);
        }
 
@@ -1895,7 +1901,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
        int ret;
 
        if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
-               pr_info("Feature %d isn't supported\n", ENA_ADMIN_MTU);
+               pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
                return -EPERM;
        }
 
@@ -1948,8 +1954,8 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
 
        if (!ena_com_check_supported_feature_id(ena_dev,
                                                ENA_ADMIN_RSS_HASH_FUNCTION)) {
-               pr_info("Feature %d isn't supported\n",
-                       ENA_ADMIN_RSS_HASH_FUNCTION);
+               pr_debug("Feature %d isn't supported\n",
+                        ENA_ADMIN_RSS_HASH_FUNCTION);
                return -EPERM;
        }
 
@@ -2112,7 +2118,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
 
        if (!ena_com_check_supported_feature_id(ena_dev,
                                                ENA_ADMIN_RSS_HASH_INPUT)) {
-               pr_info("Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT);
+               pr_debug("Feature %d isn't supported\n",
+                        ENA_ADMIN_RSS_HASH_INPUT);
                return -EPERM;
        }
 
@@ -2184,7 +2191,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
        hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
                ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
 
-       hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
+       hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
                ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
 
        for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
@@ -2270,8 +2277,8 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
 
        if (!ena_com_check_supported_feature_id(
                    ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
-               pr_info("Feature %d isn't supported\n",
-                       ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+               pr_debug("Feature %d isn't supported\n",
+                        ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
                return -EPERM;
        }
 
@@ -2444,11 +2451,9 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
 
        int ret;
 
-       if (!ena_com_check_supported_feature_id(ena_dev,
-                                               ENA_ADMIN_HOST_ATTR_CONFIG)) {
-               pr_warn("Set host attribute isn't supported\n");
-               return -EPERM;
-       }
+       /* Host attribute config is called before ena_com_get_dev_attr_feat
+        * so ena_com can't check if the feature is supported.
+        */
 
        memset(&cmd, 0x0, sizeof(cmd));
        admin_queue = &ena_dev->admin_queue;
@@ -2542,8 +2547,8 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
 
        if (rc) {
                if (rc == -EPERM) {
-                       pr_info("Feature %d isn't supported\n",
-                               ENA_ADMIN_INTERRUPT_MODERATION);
+                       pr_debug("Feature %d isn't supported\n",
+                                ENA_ADMIN_INTERRUPT_MODERATION);
                        rc = 0;
                } else {
                        pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
index 509d7b8e15ab9b96f067629455535052f68522d4..c9b33ee5f258d85ff94685275b9bcf0c76c1002e 100644 (file)
@@ -33,6 +33,7 @@
 #ifndef ENA_COM
 #define ENA_COM
 
+#include <linux/compiler.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/gfp.h>
index 539c536464a5c0212b88239ad8702882b7990f2c..f999305e13630f15b35c4c0fc34571b69b7594d4 100644 (file)
@@ -45,7 +45,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
        cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
                        + (head_masked * io_cq->cdesc_entry_size_in_bytes));
 
-       desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+       desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
                        ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
 
        if (desc_phase != expected_phase)
@@ -141,7 +141,7 @@ static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
 
                ena_com_cq_inc_head(io_cq);
                count++;
-               last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+               last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
                        ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
        } while (!last);
 
@@ -489,13 +489,13 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
         * expected, it mean that the device still didn't update
         * this completion.
         */
-       cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+       cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
        if (cdesc_phase != expected_phase)
                return -EAGAIN;
 
        ena_com_cq_inc_head(io_cq);
 
-       *req_id = cdesc->req_id;
+       *req_id = READ_ONCE(cdesc->req_id);
 
        return 0;
 }
index cc8b13ebfa75a8b7c0757ce861589c7b7b075149..d8c920be5e916d2e2d2a1dcf9f35105a24a06d18 100644 (file)
@@ -80,14 +80,18 @@ static void ena_tx_timeout(struct net_device *dev)
 {
        struct ena_adapter *adapter = netdev_priv(dev);
 
+       /* Change the state of the device to trigger reset
+        * Check that we are not in the middle or a trigger already
+        */
+
+       if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
+               return;
+
        u64_stats_update_begin(&adapter->syncp);
        adapter->dev_stats.tx_timeout++;
        u64_stats_update_end(&adapter->syncp);
 
        netif_err(adapter, tx_err, dev, "Transmit time out\n");
-
-       /* Change the state of the device to trigger reset */
-       set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
 }
 
 static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
@@ -559,6 +563,7 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
  */
 static void ena_free_tx_bufs(struct ena_ring *tx_ring)
 {
+       bool print_once = true;
        u32 i;
 
        for (i = 0; i < tx_ring->ring_size; i++) {
@@ -570,9 +575,16 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
                if (!tx_info->skb)
                        continue;
 
-               netdev_notice(tx_ring->netdev,
-                             "free uncompleted tx skb qid %d idx 0x%x\n",
-                             tx_ring->qid, i);
+               if (print_once) {
+                       netdev_notice(tx_ring->netdev,
+                                     "free uncompleted tx skb qid %d idx 0x%x\n",
+                                     tx_ring->qid, i);
+                       print_once = false;
+               } else {
+                       netdev_dbg(tx_ring->netdev,
+                                  "free uncompleted tx skb qid %d idx 0x%x\n",
+                                  tx_ring->qid, i);
+               }
 
                ena_buf = tx_info->bufs;
                dma_unmap_single(tx_ring->dev,
@@ -1109,7 +1121,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
 
        tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
 
-       if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) {
+       if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
+           test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
                napi_complete_done(napi, 0);
                return 0;
        }
@@ -1117,26 +1130,40 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
        tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
        rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
 
-       if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
-               napi_complete_done(napi, rx_work_done);
+       /* If the device is about to reset or down, avoid unmask
+        * the interrupt and return 0 so NAPI won't reschedule
+        */
+       if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
+                    test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
+               napi_complete_done(napi, 0);
+               ret = 0;
 
+       } else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
                napi_comp_call = 1;
-               /* Tx and Rx share the same interrupt vector */
-               if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
-                       ena_adjust_intr_moderation(rx_ring, tx_ring);
 
-               /* Update intr register: rx intr delay, tx intr delay and
-                * interrupt unmask
+               /* Update numa and unmask the interrupt only when schedule
+                * from the interrupt context (vs from sk_busy_loop)
                 */
-               ena_com_update_intr_reg(&intr_reg,
-                                       rx_ring->smoothed_interval,
-                                       tx_ring->smoothed_interval,
-                                       true);
+               if (napi_complete_done(napi, rx_work_done)) {
+                       /* Tx and Rx share the same interrupt vector */
+                       if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
+                               ena_adjust_intr_moderation(rx_ring, tx_ring);
+
+                       /* Update intr register: rx intr delay,
+                        * tx intr delay and interrupt unmask
+                        */
+                       ena_com_update_intr_reg(&intr_reg,
+                                               rx_ring->smoothed_interval,
+                                               tx_ring->smoothed_interval,
+                                               true);
+
+                       /* It is a shared MSI-X.
+                        * Tx and Rx CQ have pointer to it.
+                        * So we use one of them to reach the intr reg
+                        */
+                       ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
+               }
 
-               /* It is a shared MSI-X. Tx and Rx CQ have pointer to it.
-                * So we use one of them to reach the intr reg
-                */
-               ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
 
                ena_update_ring_numa_node(tx_ring, rx_ring);
 
@@ -1698,12 +1725,22 @@ static void ena_down(struct ena_adapter *adapter)
        adapter->dev_stats.interface_down++;
        u64_stats_update_end(&adapter->syncp);
 
-       /* After this point the napi handler won't enable the tx queue */
-       ena_napi_disable_all(adapter);
        netif_carrier_off(adapter->netdev);
        netif_tx_disable(adapter->netdev);
 
+       /* After this point the napi handler won't enable the tx queue */
+       ena_napi_disable_all(adapter);
+
        /* After destroy the queue there won't be any new interrupts */
+
+       if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
+               int rc;
+
+               rc = ena_com_dev_reset(adapter->ena_dev);
+               if (rc)
+                       dev_err(&adapter->pdev->dev, "Device reset failed\n");
+       }
+
        ena_destroy_all_io_queues(adapter);
 
        ena_disable_io_intr_sync(adapter);
@@ -2065,6 +2102,14 @@ static void ena_netpoll(struct net_device *netdev)
        struct ena_adapter *adapter = netdev_priv(netdev);
        int i;
 
+       /* Dont schedule NAPI if the driver is in the middle of reset
+        * or netdev is down.
+        */
+
+       if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
+           test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
+               return;
+
        for (i = 0; i < adapter->num_queues; i++)
                napi_schedule(&adapter->ena_napi[i].napi);
 }
@@ -2165,32 +2210,50 @@ err:
        ena_com_delete_debug_area(adapter->ena_dev);
 }
 
-static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev,
-                                                struct rtnl_link_stats64 *stats)
+static void ena_get_stats64(struct net_device *netdev,
+                           struct rtnl_link_stats64 *stats)
 {
        struct ena_adapter *adapter = netdev_priv(netdev);
-       struct ena_admin_basic_stats ena_stats;
-       int rc;
+       struct ena_ring *rx_ring, *tx_ring;
+       unsigned int start;
+       u64 rx_drops;
+       int i;
 
        if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
-               return NULL;
+               return;
 
-       rc = ena_com_get_dev_basic_stats(adapter->ena_dev, &ena_stats);
-       if (rc)
-               return NULL;
+       for (i = 0; i < adapter->num_queues; i++) {
+               u64 bytes, packets;
+
+               tx_ring = &adapter->tx_ring[i];
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
+                       packets = tx_ring->tx_stats.cnt;
+                       bytes = tx_ring->tx_stats.bytes;
+               } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
+
+               stats->tx_packets += packets;
+               stats->tx_bytes += bytes;
+
+               rx_ring = &adapter->rx_ring[i];
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
+                       packets = rx_ring->rx_stats.cnt;
+                       bytes = rx_ring->rx_stats.bytes;
+               } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
 
-       stats->tx_bytes = ((u64)ena_stats.tx_bytes_high << 32) |
-               ena_stats.tx_bytes_low;
-       stats->rx_bytes = ((u64)ena_stats.rx_bytes_high << 32) |
-               ena_stats.rx_bytes_low;
+               stats->rx_packets += packets;
+               stats->rx_bytes += bytes;
+       }
 
-       stats->rx_packets = ((u64)ena_stats.rx_pkts_high << 32) |
-               ena_stats.rx_pkts_low;
-       stats->tx_packets = ((u64)ena_stats.tx_pkts_high << 32) |
-               ena_stats.tx_pkts_low;
+       do {
+               start = u64_stats_fetch_begin_irq(&adapter->syncp);
+               rx_drops = adapter->dev_stats.rx_drops;
+       } while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
 
-       stats->rx_dropped = ((u64)ena_stats.rx_drops_high << 32) |
-               ena_stats.rx_drops_low;
+       stats->rx_dropped = rx_drops;
 
        stats->multicast = 0;
        stats->collisions = 0;
@@ -2204,8 +2267,6 @@ static struct rtnl_link_stats64 *ena_get_stats64(struct net_device *netdev,
 
        stats->rx_errors = 0;
        stats->tx_errors = 0;
-
-       return stats;
 }
 
 static const struct net_device_ops ena_netdev_ops = {
@@ -2353,6 +2414,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
         */
        ena_com_set_admin_polling_mode(ena_dev, true);
 
+       ena_config_host_info(ena_dev);
+
        /* Get Device Attributes*/
        rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
        if (rc) {
@@ -2377,11 +2440,10 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
 
        *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
 
-       ena_config_host_info(ena_dev);
-
        return 0;
 
 err_admin_init:
+       ena_com_delete_host_info(ena_dev);
        ena_com_admin_destroy(ena_dev);
 err_mmio_read_less:
        ena_com_mmio_reg_read_request_destroy(ena_dev);
@@ -2433,6 +2495,14 @@ static void ena_fw_reset_device(struct work_struct *work)
        bool dev_up, wd_state;
        int rc;
 
+       if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+               dev_err(&pdev->dev,
+                       "device reset schedule while reset bit is off\n");
+               return;
+       }
+
+       netif_carrier_off(netdev);
+
        del_timer_sync(&adapter->timer_service);
 
        rtnl_lock();
@@ -2446,12 +2516,6 @@ static void ena_fw_reset_device(struct work_struct *work)
         */
        ena_close(netdev);
 
-       rc = ena_com_dev_reset(ena_dev);
-       if (rc) {
-               dev_err(&pdev->dev, "Device reset failed\n");
-               goto err;
-       }
-
        ena_free_mgmnt_irq(adapter);
 
        ena_disable_msix(adapter);
@@ -2464,6 +2528,8 @@ static void ena_fw_reset_device(struct work_struct *work)
 
        ena_com_mmio_reg_read_request_destroy(ena_dev);
 
+       clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+
        /* Finish with the destroy part. Start the init part */
 
        rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
@@ -2509,6 +2575,8 @@ err_device_destroy:
 err:
        rtnl_unlock();
 
+       clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
+
        dev_err(&pdev->dev,
                "Reset attempt failed. Can not reset the device\n");
 }
@@ -2527,6 +2595,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
        if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
                return;
 
+       if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
+               return;
+
        budget = ENA_MONITORED_TX_QUEUES;
 
        for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
@@ -2626,7 +2697,7 @@ static void ena_timer_service(unsigned long data)
        if (host_info)
                ena_update_host_info(host_info, adapter->netdev);
 
-       if (unlikely(test_and_clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
+       if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
                netif_err(adapter, drv, adapter->netdev,
                          "Trigger reset is on\n");
                ena_dump_stats_to_dmesg(adapter);
@@ -2660,7 +2731,7 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
                io_sq_num = get_feat_ctx->max_queues.max_sq_num;
        }
 
-       io_queue_num = min_t(int, num_possible_cpus(), ENA_MAX_NUM_IO_QUEUES);
+       io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
        io_queue_num = min_t(int, io_queue_num, io_sq_num);
        io_queue_num = min_t(int, io_queue_num,
                             get_feat_ctx->max_queues.max_cq_num);
@@ -2722,7 +2793,6 @@ static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
        netdev->features =
                dev_features |
                NETIF_F_SG |
-               NETIF_F_NTUPLE |
                NETIF_F_RXHASH |
                NETIF_F_HIGHDMA;
 
@@ -3118,7 +3188,9 @@ static void ena_remove(struct pci_dev *pdev)
 
        cancel_work_sync(&adapter->resume_io_task);
 
-       ena_com_dev_reset(ena_dev);
+       /* Reset the device only if the device is running. */
+       if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+               ena_com_dev_reset(ena_dev);
 
        ena_free_mgmnt_irq(adapter);
 
index 69d7e9ed5bc8583ccc675f60b7a891191568a40e..ed62d8e231a155a693a1278862a967f3594635b4 100644 (file)
@@ -44,7 +44,7 @@
 #include "ena_eth_com.h"
 
 #define DRV_MODULE_VER_MAJOR   1
-#define DRV_MODULE_VER_MINOR   0
+#define DRV_MODULE_VER_MINOR   1
 #define DRV_MODULE_VER_SUBMINOR 2
 
 #define DRV_MODULE_NAME                "ena"
 /* Number of queues to check for missing queues per timer service */
 #define ENA_MONITORED_TX_QUEUES        4
 /* Max timeout packets before device reset */
-#define MAX_NUM_OF_TIMEOUTED_PACKETS 32
+#define MAX_NUM_OF_TIMEOUTED_PACKETS 128
 
 #define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
 
 #define ENA_IO_IRQ_IDX(q)              (ENA_IO_IRQ_FIRST_IDX + (q))
 
 /* ENA device should send keep alive msg every 1 sec.
- * We wait for 3 sec just to be on the safe side.
+ * We wait for 6 sec just to be on the safe side.
  */
-#define ENA_DEVICE_KALIVE_TIMEOUT      (3 * HZ)
+#define ENA_DEVICE_KALIVE_TIMEOUT      (6 * HZ)
 
 #define ENA_MMIO_DISABLE_REG_READ      BIT(0)
 
@@ -241,6 +241,7 @@ struct ena_stats_dev {
        u64 interface_up;
        u64 interface_down;
        u64 admin_q_pause;
+       u64 rx_drops;
 };
 
 enum ena_flags_t {
index 9595f1bc535b73306720e4d0005fe58d20256767..7b5df562f30f5625cdc2e1e5b6d59bff6919d564 100644 (file)
@@ -695,125 +695,105 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
        void __iomem *mmio = lp->mmio;
        struct sk_buff *skb,*new_skb;
        int min_pkt_len, status;
-       unsigned int intr0;
        int num_rx_pkt = 0;
        short pkt_len;
 #if AMD8111E_VLAN_TAG_USED
        short vtag;
 #endif
-       int rx_pkt_limit = budget;
-       unsigned long flags;
 
-       if (rx_pkt_limit <= 0)
-               goto rx_not_empty;
+       while (num_rx_pkt < budget) {
+               status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
+               if (status & OWN_BIT)
+                       break;
 
-       do{
-               /* process receive packets until we use the quota.
-                * If we own the next entry, it's a new packet. Send it up.
+               /* There is a tricky error noted by John Murphy,
+                * <murf@perftech.com> to Russ Nelson: Even with
+                * full-sized * buffers it's possible for a
+                * jabber packet to use two buffers, with only
+                * the last correctly noting the error.
                 */
-               while(1) {
-                       status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
-                       if (status & OWN_BIT)
-                               break;
-
-                       /* There is a tricky error noted by John Murphy,
-                        * <murf@perftech.com> to Russ Nelson: Even with
-                        * full-sized * buffers it's possible for a
-                        * jabber packet to use two buffers, with only
-                        * the last correctly noting the error.
-                        */
-                       if(status & ERR_BIT) {
-                               /* resetting flags */
-                               lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
-                               goto err_next_pkt;
-                       }
-                       /* check for STP and ENP */
-                       if(!((status & STP_BIT) && (status & ENP_BIT))){
-                               /* resetting flags */
-                               lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
-                               goto err_next_pkt;
-                       }
-                       pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
+               if (status & ERR_BIT) {
+                       /* resetting flags */
+                       lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+                       goto err_next_pkt;
+               }
+               /* check for STP and ENP */
+               if (!((status & STP_BIT) && (status & ENP_BIT))){
+                       /* resetting flags */
+                       lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+                       goto err_next_pkt;
+               }
+               pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
 
 #if AMD8111E_VLAN_TAG_USED
-                       vtag = status & TT_MASK;
-                       /*MAC will strip vlan tag*/
-                       if (vtag != 0)
-                               min_pkt_len =MIN_PKT_LEN - 4;
+               vtag = status & TT_MASK;
+               /* MAC will strip vlan tag */
+               if (vtag != 0)
+                       min_pkt_len = MIN_PKT_LEN - 4;
                        else
 #endif
-                               min_pkt_len =MIN_PKT_LEN;
+                       min_pkt_len = MIN_PKT_LEN;
 
-                       if (pkt_len < min_pkt_len) {
-                               lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
-                               lp->drv_rx_errors++;
-                               goto err_next_pkt;
-                       }
-                       if(--rx_pkt_limit < 0)
-                               goto rx_not_empty;
-                       new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
-                       if (!new_skb) {
-                               /* if allocation fail,
-                                * ignore that pkt and go to next one
-                                */
-                               lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
-                               lp->drv_rx_errors++;
-                               goto err_next_pkt;
-                       }
+               if (pkt_len < min_pkt_len) {
+                       lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+                       lp->drv_rx_errors++;
+                       goto err_next_pkt;
+               }
+               new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
+               if (!new_skb) {
+                       /* if allocation fail,
+                        * ignore that pkt and go to next one
+                        */
+                       lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
+                       lp->drv_rx_errors++;
+                       goto err_next_pkt;
+               }
 
-                       skb_reserve(new_skb, 2);
-                       skb = lp->rx_skbuff[rx_index];
-                       pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
-                                        lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
-                       skb_put(skb, pkt_len);
-                       lp->rx_skbuff[rx_index] = new_skb;
-                       lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
-                                                                  new_skb->data,
-                                                                  lp->rx_buff_len-2,
-                                                                  PCI_DMA_FROMDEVICE);
+               skb_reserve(new_skb, 2);
+               skb = lp->rx_skbuff[rx_index];
+               pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
+                                lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
+               skb_put(skb, pkt_len);
+               lp->rx_skbuff[rx_index] = new_skb;
+               lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
+                                                          new_skb->data,
+                                                          lp->rx_buff_len-2,
+                                                          PCI_DMA_FROMDEVICE);
 
-                       skb->protocol = eth_type_trans(skb, dev);
+               skb->protocol = eth_type_trans(skb, dev);
 
 #if AMD8111E_VLAN_TAG_USED
-                       if (vtag == TT_VLAN_TAGGED){
-                               u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
-                               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-                       }
-#endif
-                       netif_receive_skb(skb);
-                       /*COAL update rx coalescing parameters*/
-                       lp->coal_conf.rx_packets++;
-                       lp->coal_conf.rx_bytes += pkt_len;
-                       num_rx_pkt++;
-
-               err_next_pkt:
-                       lp->rx_ring[rx_index].buff_phy_addr
-                               = cpu_to_le32(lp->rx_dma_addr[rx_index]);
-                       lp->rx_ring[rx_index].buff_count =
-                               cpu_to_le16(lp->rx_buff_len-2);
-                       wmb();
-                       lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
-                       rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
+               if (vtag == TT_VLAN_TAGGED){
+                       u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
                }
-               /* Check the interrupt status register for more packets in the
-                * mean time. Process them since we have not used up our quota.
-                */
-               intr0 = readl(mmio + INT0);
-               /*Ack receive packets */
-               writel(intr0 & RINT0,mmio + INT0);
+#endif
+               napi_gro_receive(napi, skb);
+               /* COAL update rx coalescing parameters */
+               lp->coal_conf.rx_packets++;
+               lp->coal_conf.rx_bytes += pkt_len;
+               num_rx_pkt++;
+
+err_next_pkt:
+               lp->rx_ring[rx_index].buff_phy_addr
+                       = cpu_to_le32(lp->rx_dma_addr[rx_index]);
+               lp->rx_ring[rx_index].buff_count =
+                       cpu_to_le16(lp->rx_buff_len-2);
+               wmb();
+               lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
+               rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
+       }
 
-       } while(intr0 & RINT0);
+       if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) {
+               unsigned long flags;
 
-       if (rx_pkt_limit > 0) {
                /* Receive descriptor is empty now */
                spin_lock_irqsave(&lp->lock, flags);
-               __napi_complete(napi);
                writel(VAL0|RINTEN0, mmio + INTEN0);
                writel(VAL2 | RDMD0, mmio + CMD0);
                spin_unlock_irqrestore(&lp->lock, flags);
        }
 
-rx_not_empty:
        return num_rx_pkt;
 }
 
index 41e58cca8feed284391d91f7469eaf4b79e52ca1..86369d7c9a0ff5221a424d44ed20022c3ea9cb44 100644 (file)
@@ -291,7 +291,10 @@ struct pcnet32_private {
        int                     options;
        unsigned int            shared_irq:1,   /* shared irq possible */
                                dxsuflo:1,   /* disable transmit stop on uflo */
-                               mii:1;          /* mii port available */
+                               mii:1,          /* mii port available */
+                               autoneg:1,      /* autoneg enabled */
+                               port_tp:1,      /* port set to TP */
+                               fdx:1;          /* full duplex enabled */
        struct net_device       *next;
        struct mii_if_info      mii_if;
        struct timer_list       watchdog_timer;
@@ -677,6 +680,52 @@ static void pcnet32_poll_controller(struct net_device *dev)
 }
 #endif
 
+/*
+ * lp->lock must be held.
+ */
+static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
+                          int can_sleep)
+{
+       int csr5;
+       struct pcnet32_private *lp = netdev_priv(dev);
+       const struct pcnet32_access *a = lp->a;
+       ulong ioaddr = dev->base_addr;
+       int ticks;
+
+       /* really old chips have to be stopped. */
+       if (lp->chip_version < PCNET32_79C970A)
+               return 0;
+
+       /* set SUSPEND (SPND) - CSR5 bit 0 */
+       csr5 = a->read_csr(ioaddr, CSR5);
+       a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
+
+       /* poll waiting for bit to be set */
+       ticks = 0;
+       while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
+               spin_unlock_irqrestore(&lp->lock, *flags);
+               if (can_sleep)
+                       msleep(1);
+               else
+                       mdelay(1);
+               spin_lock_irqsave(&lp->lock, *flags);
+               ticks++;
+               if (ticks > 200) {
+                       netif_printk(lp, hw, KERN_DEBUG, dev,
+                                    "Error getting into suspend!\n");
+                       return 0;
+               }
+       }
+       return 1;
+}
+
+static void pcnet32_clr_suspend(struct pcnet32_private *lp, ulong ioaddr)
+{
+       int csr5 = lp->a->read_csr(ioaddr, CSR5);
+       /* clear SUSPEND (SPND) - CSR5 bit 0 */
+       lp->a->write_csr(ioaddr, CSR5, csr5 & ~CSR5_SUSPEND);
+}
+
 static int pcnet32_get_link_ksettings(struct net_device *dev,
                                      struct ethtool_link_ksettings *cmd)
 {
@@ -684,12 +733,29 @@ static int pcnet32_get_link_ksettings(struct net_device *dev,
        unsigned long flags;
        int r = -EOPNOTSUPP;
 
+       spin_lock_irqsave(&lp->lock, flags);
        if (lp->mii) {
-               spin_lock_irqsave(&lp->lock, flags);
                mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
-               spin_unlock_irqrestore(&lp->lock, flags);
+               r = 0;
+       } else if (lp->chip_version == PCNET32_79C970A) {
+               if (lp->autoneg) {
+                       cmd->base.autoneg = AUTONEG_ENABLE;
+                       if (lp->a->read_bcr(dev->base_addr, 4) == 0xc0)
+                               cmd->base.port = PORT_AUI;
+                       else
+                               cmd->base.port = PORT_TP;
+               } else {
+                       cmd->base.autoneg = AUTONEG_DISABLE;
+                       cmd->base.port = lp->port_tp ? PORT_TP : PORT_AUI;
+               }
+               cmd->base.duplex = lp->fdx ? DUPLEX_FULL : DUPLEX_HALF;
+               cmd->base.speed = SPEED_10;
+               ethtool_convert_legacy_u32_to_link_mode(
+                                               cmd->link_modes.supported,
+                                               SUPPORTED_TP | SUPPORTED_AUI);
                r = 0;
        }
+       spin_unlock_irqrestore(&lp->lock, flags);
        return r;
 }
 
@@ -697,14 +763,46 @@ static int pcnet32_set_link_ksettings(struct net_device *dev,
                                      const struct ethtool_link_ksettings *cmd)
 {
        struct pcnet32_private *lp = netdev_priv(dev);
+       ulong ioaddr = dev->base_addr;
        unsigned long flags;
        int r = -EOPNOTSUPP;
+       int suspended, bcr2, bcr9, csr15;
 
+       spin_lock_irqsave(&lp->lock, flags);
        if (lp->mii) {
-               spin_lock_irqsave(&lp->lock, flags);
                r = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
-               spin_unlock_irqrestore(&lp->lock, flags);
+       } else if (lp->chip_version == PCNET32_79C970A) {
+               suspended = pcnet32_suspend(dev, &flags, 0);
+               if (!suspended)
+                       lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
+
+               lp->autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
+               bcr2 = lp->a->read_bcr(ioaddr, 2);
+               if (cmd->base.autoneg == AUTONEG_ENABLE) {
+                       lp->a->write_bcr(ioaddr, 2, bcr2 | 0x0002);
+               } else {
+                       lp->a->write_bcr(ioaddr, 2, bcr2 & ~0x0002);
+
+                       lp->port_tp = cmd->base.port == PORT_TP;
+                       csr15 = lp->a->read_csr(ioaddr, CSR15) & ~0x0180;
+                       if (cmd->base.port == PORT_TP)
+                               csr15 |= 0x0080;
+                       lp->a->write_csr(ioaddr, CSR15, csr15);
+                       lp->init_block->mode = cpu_to_le16(csr15);
+
+                       lp->fdx = cmd->base.duplex == DUPLEX_FULL;
+                       bcr9 = lp->a->read_bcr(ioaddr, 9) & ~0x0003;
+                       if (cmd->base.duplex == DUPLEX_FULL)
+                               bcr9 |= 0x0003;
+                       lp->a->write_bcr(ioaddr, 9, bcr9);
+               }
+               if (suspended)
+                       pcnet32_clr_suspend(lp, ioaddr);
+               else if (netif_running(dev))
+                       pcnet32_restart(dev, CSR0_NORMAL);
+               r = 0;
        }
+       spin_unlock_irqrestore(&lp->lock, flags);
        return r;
 }
 
@@ -732,7 +830,14 @@ static u32 pcnet32_get_link(struct net_device *dev)
        spin_lock_irqsave(&lp->lock, flags);
        if (lp->mii) {
                r = mii_link_ok(&lp->mii_if);
-       } else if (lp->chip_version >= PCNET32_79C970A) {
+       } else if (lp->chip_version == PCNET32_79C970A) {
+               ulong ioaddr = dev->base_addr;  /* card base I/O address */
+               /* only read link if port is set to TP */
+               if (!lp->autoneg && lp->port_tp)
+                       r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
+               else /* link always up for AUI port or port auto select */
+                       r = 1;
+       } else if (lp->chip_version > PCNET32_79C970A) {
                ulong ioaddr = dev->base_addr;  /* card base I/O address */
                r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
        } else {        /* can not detect link on really old chips */
@@ -1069,45 +1174,6 @@ static int pcnet32_set_phys_id(struct net_device *dev,
        return 0;
 }
 
-/*
- * lp->lock must be held.
- */
-static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
-               int can_sleep)
-{
-       int csr5;
-       struct pcnet32_private *lp = netdev_priv(dev);
-       const struct pcnet32_access *a = lp->a;
-       ulong ioaddr = dev->base_addr;
-       int ticks;
-
-       /* really old chips have to be stopped. */
-       if (lp->chip_version < PCNET32_79C970A)
-               return 0;
-
-       /* set SUSPEND (SPND) - CSR5 bit 0 */
-       csr5 = a->read_csr(ioaddr, CSR5);
-       a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
-
-       /* poll waiting for bit to be set */
-       ticks = 0;
-       while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
-               spin_unlock_irqrestore(&lp->lock, *flags);
-               if (can_sleep)
-                       msleep(1);
-               else
-                       mdelay(1);
-               spin_lock_irqsave(&lp->lock, *flags);
-               ticks++;
-               if (ticks > 200) {
-                       netif_printk(lp, hw, KERN_DEBUG, dev,
-                                    "Error getting into suspend!\n");
-                       return 0;
-               }
-       }
-       return 1;
-}
-
 /*
  * process one receive descriptor entry
  */
@@ -1350,13 +1416,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
                pcnet32_restart(dev, CSR0_START);
                netif_wake_queue(dev);
        }
-       spin_unlock_irqrestore(&lp->lock, flags);
-
-       if (work_done < budget) {
-               spin_lock_irqsave(&lp->lock, flags);
-
-               __napi_complete(napi);
 
+       if (work_done < budget && napi_complete_done(napi, work_done)) {
                /* clear interrupt masks */
                val = lp->a->read_csr(ioaddr, CSR3);
                val &= 0x00ff;
@@ -1364,9 +1425,9 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
 
                /* Set interrupt enable. */
                lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
-
-               spin_unlock_irqrestore(&lp->lock, flags);
        }
+
+       spin_unlock_irqrestore(&lp->lock, flags);
        return work_done;
 }
 
@@ -1430,13 +1491,8 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
                }
        }
 
-       if (!(csr0 & CSR0_STOP)) {      /* If not stopped */
-               int csr5;
-
-               /* clear SUSPEND (SPND) - CSR5 bit 0 */
-               csr5 = a->read_csr(ioaddr, CSR5);
-               a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
-       }
+       if (!(csr0 & CSR0_STOP))        /* If not stopped */
+               pcnet32_clr_suspend(lp, ioaddr);
 
        spin_unlock_irqrestore(&lp->lock, flags);
 }
@@ -1817,6 +1873,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
                lp->options = PCNET32_PORT_ASEL;
        else
                lp->options = options_mapping[options[cards_found]];
+       /* force default port to TP on 79C970A so link detection can work */
+       if (lp->chip_version == PCNET32_79C970A)
+               lp->options = PCNET32_PORT_10BT;
        lp->mii_if.dev = dev;
        lp->mii_if.mdio_read = mdio_read;
        lp->mii_if.mdio_write = mdio_write;
@@ -2068,6 +2127,10 @@ static int pcnet32_open(struct net_device *dev)
                     (u32) (lp->rx_ring_dma_addr),
                     (u32) (lp->init_dma_addr));
 
+       lp->autoneg = !!(lp->options & PCNET32_PORT_ASEL);
+       lp->port_tp = !!(lp->options & PCNET32_PORT_10BT);
+       lp->fdx = !!(lp->options & PCNET32_PORT_FD);
+
        /* set/reset autoselect bit */
        val = lp->a->read_bcr(ioaddr, 2) & ~2;
        if (lp->options & PCNET32_PORT_ASEL)
@@ -2680,10 +2743,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
        }
 
        if (suspended) {
-               int csr5;
-               /* clear SUSPEND (SPND) - CSR5 bit 0 */
-               csr5 = lp->a->read_csr(ioaddr, CSR5);
-               lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
+               pcnet32_clr_suspend(lp, ioaddr);
        } else {
                lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
                pcnet32_restart(dev, CSR0_NORMAL);
@@ -2794,6 +2854,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
 
        if (lp->mii) {
                curr_link = mii_link_ok(&lp->mii_if);
+       } else if (lp->chip_version == PCNET32_79C970A) {
+               ulong ioaddr = dev->base_addr;  /* card base I/O address */
+               /* only read link if port is set to TP */
+               if (!lp->autoneg && lp->port_tp)
+                       curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
+               else /* link always up for AUI port or port auto select */
+                       curr_link = 1;
        } else {
                ulong ioaddr = dev->base_addr;  /* card base I/O address */
                curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
index 1c87cc20407590fc84710c9ecb4f8047cf811797..3aa457c8ca21d30f768eaf57b2f94eedeaadf40d 100644 (file)
@@ -1761,8 +1761,8 @@ static void xgbe_tx_timeout(struct net_device *netdev)
        schedule_work(&pdata->restart_work);
 }
 
-static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
-                                                 struct rtnl_link_stats64 *s)
+static void xgbe_get_stats64(struct net_device *netdev,
+                            struct rtnl_link_stats64 *s)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
        struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
@@ -1788,8 +1788,6 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
        s->tx_dropped = netdev->stats.tx_dropped;
 
        DBGPR("<--%s\n", __func__);
-
-       return s;
 }
 
 static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
index 523b8eff6d7be67dab281d6fbf23d2c994b41558..d0d0d12b531fc683613455fc3ab6cb9fa3b9fea9 100644 (file)
@@ -840,7 +840,7 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget)
        processed = xgene_enet_process_ring(ring, budget);
 
        if (processed != budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, processed);
                enable_irq(ring->irq);
        }
 
@@ -1453,7 +1453,7 @@ err:
        return ret;
 }
 
-static struct rtnl_link_stats64 *xgene_enet_get_stats64(
+static void xgene_enet_get_stats64(
                        struct net_device *ndev,
                        struct rtnl_link_stats64 *storage)
 {
@@ -1462,7 +1462,6 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64(
        struct xgene_enet_desc_ring *ring;
        int i;
 
-       memset(stats, 0, sizeof(struct rtnl_link_stats64));
        for (i = 0; i < pdata->txq_cnt; i++) {
                ring = pdata->tx_ring[i];
                if (ring) {
@@ -1484,8 +1483,6 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64(
                }
        }
        memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
-
-       return storage;
 }
 
 static int xgene_enet_set_mac_address(struct net_device *ndev, void *addr)
@@ -1967,6 +1964,30 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
        }
 }
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_enet_acpi_match[] = {
+       { "APMC0D05", XGENE_ENET1},
+       { "APMC0D30", XGENE_ENET1},
+       { "APMC0D31", XGENE_ENET1},
+       { "APMC0D3F", XGENE_ENET1},
+       { "APMC0D26", XGENE_ENET2},
+       { "APMC0D25", XGENE_ENET2},
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
+#endif
+
+static const struct of_device_id xgene_enet_of_match[] = {
+       {.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
+       {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
+       {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
+       {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
+       {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
+
 static int xgene_enet_probe(struct platform_device *pdev)
 {
        struct net_device *ndev;
@@ -2113,32 +2134,6 @@ static void xgene_enet_shutdown(struct platform_device *pdev)
        xgene_enet_remove(pdev);
 }
 
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_enet_acpi_match[] = {
-       { "APMC0D05", XGENE_ENET1},
-       { "APMC0D30", XGENE_ENET1},
-       { "APMC0D31", XGENE_ENET1},
-       { "APMC0D3F", XGENE_ENET1},
-       { "APMC0D26", XGENE_ENET2},
-       { "APMC0D25", XGENE_ENET2},
-       { }
-};
-MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id xgene_enet_of_match[] = {
-       {.compatible = "apm,xgene-enet",    .data = (void *)XGENE_ENET1},
-       {.compatible = "apm,xgene1-sgenet", .data = (void *)XGENE_ENET1},
-       {.compatible = "apm,xgene1-xgenet", .data = (void *)XGENE_ENET1},
-       {.compatible = "apm,xgene2-sgenet", .data = (void *)XGENE_ENET2},
-       {.compatible = "apm,xgene2-xgenet", .data = (void *)XGENE_ENET2},
-       {},
-};
-
-MODULE_DEVICE_TABLE(of, xgene_enet_of_match);
-#endif
-
 static struct platform_driver xgene_enet_driver = {
        .driver = {
                   .name = "xgene-enet",
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
new file mode 100644 (file)
index 0000000..cdf78e0
--- /dev/null
@@ -0,0 +1,24 @@
+#
+# aQuantia device configuration
+#
+
+config NET_VENDOR_AQUANTIA
+       bool "aQuantia devices"
+       default y
+       ---help---
+         Set this to y if you have an Ethernet network cards that uses the aQuantia
+         AQC107/AQC108 chipset.
+
+         This option does not build any drivers; it casues the aQuantia
+         drivers that can be built to appear in the list of Ethernet drivers.
+
+
+if NET_VENDOR_AQUANTIA
+
+config AQTION
+       tristate "aQuantia AQtion(tm) Support"
+       depends on PCI && X86_64
+       ---help---
+         This enables the support for the aQuantia AQtion(tm) Ethernet card.
+
+endif # NET_VENDOR_AQUANTIA
diff --git a/drivers/net/ethernet/aquantia/Makefile b/drivers/net/ethernet/aquantia/Makefile
new file mode 100644 (file)
index 0000000..4f4897b
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the aQuantia device drivers.
+#
+
+obj-$(CONFIG_AQTION) += atlantic/
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
new file mode 100644 (file)
index 0000000..e4ae696
--- /dev/null
@@ -0,0 +1,42 @@
+################################################################################
+#
+# aQuantia Ethernet Controller AQtion Linux Driver
+# Copyright(c) 2014-2017 aQuantia Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information: <rdc-drv@aquantia.com>
+# aQuantia Corporation, 105 E. Tasman Dr. San Jose, CA 95134, USA
+#
+################################################################################
+
+#
+# Makefile for the AQtion(tm) Ethernet driver
+#
+
+obj-$(CONFIG_AQTION) += atlantic.o
+
+atlantic-objs := aq_main.o \
+       aq_nic.o \
+       aq_pci_func.o \
+       aq_vec.o \
+       aq_ring.o \
+       aq_hw_utils.o \
+       aq_ethtool.o \
+       hw_atl/hw_atl_a0.o \
+       hw_atl/hw_atl_b0.o \
+       hw_atl/hw_atl_utils.o \
+       hw_atl/hw_atl_llh.o
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
new file mode 100644 (file)
index 0000000..5f99237
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_cfg.h: Definition of configuration parameters and constants. */
+
+#ifndef AQ_CFG_H
+#define AQ_CFG_H
+
+#define AQ_CFG_VECS_DEF   4U
+#define AQ_CFG_TCS_DEF    1U
+
+#define AQ_CFG_TXDS_DEF    4096U
+#define AQ_CFG_RXDS_DEF    1024U
+
+#define AQ_CFG_IS_POLLING_DEF 0U
+
+#define AQ_CFG_FORCE_LEGACY_INT 0U
+
+#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF   1U
+#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU
+#define AQ_CFG_IRQ_MASK                      0x1FFU
+
+#define AQ_CFG_VECS_MAX   8U
+#define AQ_CFG_TCS_MAX    8U
+
+#define AQ_CFG_TX_FRAME_MAX  (16U * 1024U)
+#define AQ_CFG_RX_FRAME_MAX  (4U * 1024U)
+
+/* LRO */
+#define AQ_CFG_IS_LRO_DEF           1U
+
+/* RSS */
+#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX  128U
+#define AQ_CFG_RSS_HASHKEY_SIZE           320U
+
+#define AQ_CFG_IS_RSS_DEF           1U
+#define AQ_CFG_NUM_RSS_QUEUES_DEF   AQ_CFG_VECS_DEF
+#define AQ_CFG_RSS_BASE_CPU_NUM_DEF 0U
+
+#define AQ_CFG_PCI_FUNC_MSIX_IRQS   9U
+#define AQ_CFG_PCI_FUNC_PORTS       2U
+
+#define AQ_CFG_SERVICE_TIMER_INTERVAL    (2 * HZ)
+#define AQ_CFG_POLLING_TIMER_INTERVAL   ((unsigned int)(2 * HZ))
+
+#define AQ_CFG_SKB_FRAGS_MAX   32U
+
+#define AQ_CFG_NAPI_WEIGHT     64U
+
+#define AQ_CFG_MULTICAST_ADDRESS_MAX     32U
+
+/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
+
+#define AQ_CFG_FC_MODE 3U
+
+#define AQ_CFG_SPEED_MSK  0xFFFFU      /* 0xFFFFU==auto_neg */
+
+#define AQ_CFG_IS_AUTONEG_DEF       1U
+#define AQ_CFG_MTU_DEF              1514U
+
+#define AQ_CFG_LOCK_TRYS   100U
+
+#define AQ_CFG_DRV_AUTHOR      "aQuantia"
+#define AQ_CFG_DRV_DESC        "aQuantia Corporation(R) Network Driver"
+#define AQ_CFG_DRV_NAME        "aquantia"
+#define AQ_CFG_DRV_VERSION     __stringify(NIC_MAJOR_DRIVER_VERSION)"."\
+                               __stringify(NIC_MINOR_DRIVER_VERSION)"."\
+                               __stringify(NIC_BUILD_DRIVER_VERSION)"."\
+                               __stringify(NIC_REVISION_DRIVER_VERSION)
+
+#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
new file mode 100644 (file)
index 0000000..9eb5e22
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_common.h: Basic includes for all files in project. */
+
+#ifndef AQ_COMMON_H
+#define AQ_COMMON_H
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "ver.h"
+#include "aq_nic.h"
+#include "aq_cfg.h"
+#include "aq_utils.h"
+
+#endif /* AQ_COMMON_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
new file mode 100644 (file)
index 0000000..a761e91
--- /dev/null
@@ -0,0 +1,262 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_ethtool.c: Definition of ethertool related functions. */
+
+#include "aq_ethtool.h"
+#include "aq_nic.h"
+
+static void aq_ethtool_get_regs(struct net_device *ndev,
+                               struct ethtool_regs *regs, void *p)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       u32 regs_count = aq_nic_get_regs_count(aq_nic);
+
+       memset(p, 0, regs_count * sizeof(u32));
+       aq_nic_get_regs(aq_nic, regs, p);
+}
+
+static int aq_ethtool_get_regs_len(struct net_device *ndev)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       u32 regs_count = aq_nic_get_regs_count(aq_nic);
+
+       return regs_count * sizeof(u32);
+}
+
+static u32 aq_ethtool_get_link(struct net_device *ndev)
+{
+       return ethtool_op_get_link(ndev);
+}
+
+static int aq_ethtool_get_link_ksettings(struct net_device *ndev,
+                                        struct ethtool_link_ksettings *cmd)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+       aq_nic_get_link_ksettings(aq_nic, cmd);
+       cmd->base.speed = netif_carrier_ok(ndev) ?
+                               aq_nic_get_link_speed(aq_nic) : 0U;
+
+       return 0;
+}
+
+static int
+aq_ethtool_set_link_ksettings(struct net_device *ndev,
+                             const struct ethtool_link_ksettings *cmd)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+       return aq_nic_set_link_ksettings(aq_nic, cmd);
+}
+
+/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */
+static const unsigned int aq_ethtool_stat_queue_lines = 5U;
+static const unsigned int aq_ethtool_stat_queue_chars =
+       5U * ETH_GSTRING_LEN;
+static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
+       "InPackets",
+       "InUCast",
+       "InMCast",
+       "InBCast",
+       "InErrors",
+       "OutPackets",
+       "OutUCast",
+       "OutMCast",
+       "OutBCast",
+       "InUCastOctects",
+       "OutUCastOctects",
+       "InMCastOctects",
+       "OutMCastOctects",
+       "InBCastOctects",
+       "OutBCastOctects",
+       "InOctects",
+       "OutOctects",
+       "InPacketsDma",
+       "OutPacketsDma",
+       "InOctetsDma",
+       "OutOctetsDma",
+       "InDroppedDma",
+       "Queue[0] InPackets",
+       "Queue[0] OutPackets",
+       "Queue[0] InJumboPackets",
+       "Queue[0] InLroPackets",
+       "Queue[0] InErrors",
+       "Queue[1] InPackets",
+       "Queue[1] OutPackets",
+       "Queue[1] InJumboPackets",
+       "Queue[1] InLroPackets",
+       "Queue[1] InErrors",
+       "Queue[2] InPackets",
+       "Queue[2] OutPackets",
+       "Queue[2] InJumboPackets",
+       "Queue[2] InLroPackets",
+       "Queue[2] InErrors",
+       "Queue[3] InPackets",
+       "Queue[3] OutPackets",
+       "Queue[3] InJumboPackets",
+       "Queue[3] InLroPackets",
+       "Queue[3] InErrors",
+       "Queue[4] InPackets",
+       "Queue[4] OutPackets",
+       "Queue[4] InJumboPackets",
+       "Queue[4] InLroPackets",
+       "Queue[4] InErrors",
+       "Queue[5] InPackets",
+       "Queue[5] OutPackets",
+       "Queue[5] InJumboPackets",
+       "Queue[5] InLroPackets",
+       "Queue[5] InErrors",
+       "Queue[6] InPackets",
+       "Queue[6] OutPackets",
+       "Queue[6] InJumboPackets",
+       "Queue[6] InLroPackets",
+       "Queue[6] InErrors",
+       "Queue[7] InPackets",
+       "Queue[7] OutPackets",
+       "Queue[7] InJumboPackets",
+       "Queue[7] InLroPackets",
+       "Queue[7] InErrors",
+};
+
+static void aq_ethtool_stats(struct net_device *ndev,
+                            struct ethtool_stats *stats, u64 *data)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */
+       BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8);
+       memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64));
+       aq_nic_get_stats(aq_nic, data);
+}
+
+static void aq_ethtool_get_drvinfo(struct net_device *ndev,
+                                  struct ethtool_drvinfo *drvinfo)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+       struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
+       u32 firmware_version = aq_nic_get_fw_version(aq_nic);
+       u32 regs_count = aq_nic_get_regs_count(aq_nic);
+
+       strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver));
+       strlcat(drvinfo->version, AQ_CFG_DRV_VERSION, sizeof(drvinfo->version));
+
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+                "%u.%u.%u", firmware_version >> 24,
+                (firmware_version >> 16) & 0xFFU, firmware_version & 0xFFFFU);
+
+       strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
+               sizeof(drvinfo->bus_info));
+       drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) -
+               (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines;
+       drvinfo->testinfo_len = 0;
+       drvinfo->regdump_len = regs_count;
+       drvinfo->eedump_len = 0;
+}
+
+static void aq_ethtool_get_strings(struct net_device *ndev,
+                                  u32 stringset, u8 *data)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+       if (stringset == ETH_SS_STATS)
+               memcpy(data, *aq_ethtool_stat_names,
+                      sizeof(aq_ethtool_stat_names) -
+                      (AQ_CFG_VECS_MAX - cfg->vecs) *
+                      aq_ethtool_stat_queue_chars);
+}
+
+static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
+{
+       int ret = 0;
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               ret = ARRAY_SIZE(aq_ethtool_stat_names) -
+                       (AQ_CFG_VECS_MAX - cfg->vecs) *
+                       aq_ethtool_stat_queue_lines;
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+       }
+       return ret;
+}
+
+static u32 aq_ethtool_get_rss_indir_size(struct net_device *ndev)
+{
+       return AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
+}
+
+static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+       return sizeof(cfg->aq_rss.hash_secret_key);
+}
+
+static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
+                             u8 *hfunc)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+       unsigned int i = 0U;
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+       if (indir) {
+               for (i = 0; i < AQ_CFG_RSS_INDIRECTION_TABLE_MAX; i++)
+                       indir[i] = cfg->aq_rss.indirection_table[i];
+       }
+       if (key)
+               memcpy(key, cfg->aq_rss.hash_secret_key,
+                      sizeof(cfg->aq_rss.hash_secret_key));
+       return 0;
+}
+
+static int aq_ethtool_get_rxnfc(struct net_device *ndev,
+                               struct ethtool_rxnfc *cmd,
+                               u32 *rule_locs)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+       int err = 0;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_GRXRINGS:
+               cmd->data = cfg->vecs;
+               break;
+
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       return err;
+}
+
+const struct ethtool_ops aq_ethtool_ops = {
+       .get_link            = aq_ethtool_get_link,
+       .get_regs_len        = aq_ethtool_get_regs_len,
+       .get_regs            = aq_ethtool_get_regs,
+       .get_drvinfo         = aq_ethtool_get_drvinfo,
+       .get_strings         = aq_ethtool_get_strings,
+       .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
+       .get_rxfh_key_size   = aq_ethtool_get_rss_key_size,
+       .get_rxfh            = aq_ethtool_get_rss,
+       .get_rxnfc           = aq_ethtool_get_rxnfc,
+       .get_sset_count      = aq_ethtool_get_sset_count,
+       .get_ethtool_stats   = aq_ethtool_stats,
+       .get_link_ksettings  = aq_ethtool_get_link_ksettings,
+       .set_link_ksettings  = aq_ethtool_set_link_ksettings,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
new file mode 100644 (file)
index 0000000..21c126e
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_ethtool.h: Declaration of ethertool related functions. */
+
+#ifndef AQ_ETHTOOL_H
+#define AQ_ETHTOOL_H
+
+#include "aq_common.h"
+
+extern const struct ethtool_ops aq_ethtool_ops;
+
+#endif /* AQ_ETHTOOL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
new file mode 100644 (file)
index 0000000..fce0fd3
--- /dev/null
@@ -0,0 +1,177 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific
+ * functions.
+ */
+
+#ifndef AQ_HW_H
+#define AQ_HW_H
+
+#include "aq_common.h"
+
+/* NIC H/W capabilities */
+struct aq_hw_caps_s {
+       u64 hw_features;
+       u64 link_speed_msk;
+       unsigned int hw_priv_flags;
+       u32 rxds;
+       u32 txds;
+       u32 txhwb_alignment;
+       u32 irq_mask;
+       u32 vecs;
+       u32 mtu;
+       u32 mac_regs_count;
+       u8 ports;
+       u8 msix_irqs;
+       u8 tcs;
+       u8 rxd_alignment;
+       u8 rxd_size;
+       u8 txd_alignment;
+       u8 txd_size;
+       u8 tx_rings;
+       u8 rx_rings;
+       bool flow_control;
+       bool is_64_dma;
+       u32 fw_ver_expected;
+};
+
+struct aq_hw_link_status_s {
+       unsigned int mbps;
+};
+
+#define AQ_HW_IRQ_INVALID 0U
+#define AQ_HW_IRQ_LEGACY  1U
+#define AQ_HW_IRQ_MSI     2U
+#define AQ_HW_IRQ_MSIX    3U
+
+#define AQ_HW_POWER_STATE_D0   0U
+#define AQ_HW_POWER_STATE_D3   3U
+
+#define AQ_HW_FLAG_STARTED     0x00000004U
+#define AQ_HW_FLAG_STOPPING    0x00000008U
+#define AQ_HW_FLAG_RESETTING   0x00000010U
+#define AQ_HW_FLAG_CLOSING     0x00000020U
+#define AQ_HW_LINK_DOWN        0x04000000U
+#define AQ_HW_FLAG_ERR_UNPLUG  0x40000000U
+#define AQ_HW_FLAG_ERR_HW      0x80000000U
+
+#define AQ_HW_FLAG_ERRORS      (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG)
+
+struct aq_hw_s {
+       struct aq_obj_s header;
+       struct aq_nic_cfg_s *aq_nic_cfg;
+       struct aq_pci_func_s *aq_pci_func;
+       void __iomem *mmio;
+       unsigned int not_ff_addr;
+       struct aq_hw_link_status_s aq_link_status;
+};
+
+struct aq_ring_s;
+struct aq_ring_param_s;
+struct aq_nic_cfg_s;
+struct sk_buff;
+
+struct aq_hw_ops {
+       struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func,
+                                 unsigned int port, struct aq_hw_ops *ops);
+
+       void (*destroy)(struct aq_hw_s *self);
+
+       int (*get_hw_caps)(struct aq_hw_s *self,
+                          struct aq_hw_caps_s *aq_hw_caps);
+
+       int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+                              unsigned int frags);
+
+       int (*hw_ring_rx_receive)(struct aq_hw_s *self,
+                                 struct aq_ring_s *aq_ring);
+
+       int (*hw_ring_rx_fill)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+                              unsigned int sw_tail_old);
+
+       int (*hw_ring_tx_head_update)(struct aq_hw_s *self,
+                                     struct aq_ring_s *aq_ring);
+
+       int (*hw_get_mac_permanent)(struct aq_hw_s *self,
+                                   struct aq_hw_caps_s *aq_hw_caps,
+                                   u8 *mac);
+
+       int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
+
+       int (*hw_get_link_status)(struct aq_hw_s *self,
+                                 struct aq_hw_link_status_s *link_status);
+
+       int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);
+
+       int (*hw_reset)(struct aq_hw_s *self);
+
+       int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg,
+                      u8 *mac_addr);
+
+       int (*hw_start)(struct aq_hw_s *self);
+
+       int (*hw_stop)(struct aq_hw_s *self);
+
+       int (*hw_ring_tx_init)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+                              struct aq_ring_param_s *aq_ring_param);
+
+       int (*hw_ring_tx_start)(struct aq_hw_s *self,
+                               struct aq_ring_s *aq_ring);
+
+       int (*hw_ring_tx_stop)(struct aq_hw_s *self,
+                              struct aq_ring_s *aq_ring);
+
+       int (*hw_ring_rx_init)(struct aq_hw_s *self,
+                              struct aq_ring_s *aq_ring,
+                              struct aq_ring_param_s *aq_ring_param);
+
+       int (*hw_ring_rx_start)(struct aq_hw_s *self,
+                               struct aq_ring_s *aq_ring);
+
+       int (*hw_ring_rx_stop)(struct aq_hw_s *self,
+                              struct aq_ring_s *aq_ring);
+
+       int (*hw_irq_enable)(struct aq_hw_s *self, u64 mask);
+
+       int (*hw_irq_disable)(struct aq_hw_s *self, u64 mask);
+
+       int (*hw_irq_read)(struct aq_hw_s *self, u64 *mask);
+
+       int (*hw_packet_filter_set)(struct aq_hw_s *self,
+                                   unsigned int packet_filter);
+
+       int (*hw_multicast_list_set)(struct aq_hw_s *self,
+                                    u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX]
+                                    [ETH_ALEN],
+                                    u32 count);
+
+       int (*hw_interrupt_moderation_set)(struct aq_hw_s *self,
+                                          bool itr_enabled);
+
+       int (*hw_rss_set)(struct aq_hw_s *self,
+                         struct aq_rss_parameters *rss_params);
+
+       int (*hw_rss_hash_set)(struct aq_hw_s *self,
+                              struct aq_rss_parameters *rss_params);
+
+       int (*hw_get_regs)(struct aq_hw_s *self,
+                          struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
+
+       int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
+                              unsigned int *p_count);
+
+       int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
+
+       int (*hw_deinit)(struct aq_hw_s *self);
+
+       int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state);
+};
+
+#endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
new file mode 100644 (file)
index 0000000..5f13465
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_hw_utils.c: Definitions of helper functions used across
+ * hardware layer.
+ */
+
+#include "aq_hw_utils.h"
+#include "aq_hw.h"
+
+void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
+                        u32 shift, u32 val)
+{
+       if (msk ^ ~0) {
+               u32 reg_old, reg_new;
+
+               reg_old = aq_hw_read_reg(aq_hw, addr);
+               reg_new = (reg_old & (~msk)) | (val << shift);
+
+               if (reg_old != reg_new)
+                       aq_hw_write_reg(aq_hw, addr, reg_new);
+       } else {
+               aq_hw_write_reg(aq_hw, addr, val);
+       }
+}
+
+u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift)
+{
+       return ((aq_hw_read_reg(aq_hw, addr) & msk) >> shift);
+}
+
+u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
+{
+       u32 value = readl(hw->mmio + reg);
+
+       if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr))
+               aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG);
+
+       return value;
+}
+
+void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value)
+{
+       writel(value, hw->mmio + reg);
+}
+
+int aq_hw_err_from_flags(struct aq_hw_s *hw)
+{
+       int err = 0;
+
+       if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+               err = -ENXIO;
+               goto err_exit;
+       }
+       if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) {
+               err = -EIO;
+               goto err_exit;
+       }
+
+err_exit:
+       return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
new file mode 100644 (file)
index 0000000..03b72dd
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_hw_utils.h: Declaration of helper functions used across hardware
+ * layer.
+ */
+
+#ifndef AQ_HW_UTILS_H
+#define AQ_HW_UTILS_H
+
+#include "aq_common.h"
+
+#ifndef HIDWORD
+#define LODWORD(_qw)    ((u32)(_qw))
+#define HIDWORD(_qw)    ((u32)(((_qw) >> 32) & 0xffffffff))
+#endif
+
+#define AQ_HW_SLEEP(_US_) mdelay(_US_)
+
+#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \
+do { \
+       unsigned int AQ_HW_WAIT_FOR_i; \
+       for (AQ_HW_WAIT_FOR_i = _N_; (!(_B_)) && (AQ_HW_WAIT_FOR_i);\
+       --AQ_HW_WAIT_FOR_i) {\
+               udelay(_US_); \
+       } \
+       if (!AQ_HW_WAIT_FOR_i) {\
+               err = -ETIME; \
+       } \
+} while (0)
+
+struct aq_hw_s;
+
+void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
+                        u32 shift, u32 val);
+u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift);
+u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
+void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
+int aq_hw_err_from_flags(struct aq_hw_s *hw);
+
+#endif /* AQ_HW_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
new file mode 100644 (file)
index 0000000..c17c70a
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_main.c: Main file for aQuantia Linux driver. */
+
+#include "aq_main.h"
+#include "aq_nic.h"
+#include "aq_pci_func.h"
+#include "aq_ethtool.h"
+#include "hw_atl/hw_atl_a0.h"
+#include "hw_atl/hw_atl_b0.h"
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+
+static const struct pci_device_id aq_pci_tbl[] = {
+       { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), },
+       { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), },
+       { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), },
+       { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), },
+       { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), },
+       {}
+};
+
+MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(AQ_CFG_DRV_VERSION);
+MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
+MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
+
+static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev)
+{
+       struct aq_hw_ops *ops = NULL;
+
+       ops = hw_atl_a0_get_ops_by_id(pdev);
+       if (!ops)
+               ops = hw_atl_b0_get_ops_by_id(pdev);
+
+       return ops;
+}
+
+static int aq_ndev_open(struct net_device *ndev)
+{
+       struct aq_nic_s *aq_nic = NULL;
+       int err = 0;
+
+       aq_nic = aq_nic_alloc_hot(ndev);
+       if (!aq_nic) {
+               err = -ENOMEM;
+               goto err_exit;
+       }
+       err = aq_nic_init(aq_nic);
+       if (err < 0)
+               goto err_exit;
+       err = aq_nic_start(aq_nic);
+       if (err < 0)
+               goto err_exit;
+
+err_exit:
+       if (err < 0)
+               aq_nic_deinit(aq_nic);
+       return err;
+}
+
+static int aq_ndev_close(struct net_device *ndev)
+{
+       int err = 0;
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+       err = aq_nic_stop(aq_nic);
+       if (err < 0)
+               goto err_exit;
+       aq_nic_deinit(aq_nic);
+       aq_nic_free_hot_resources(aq_nic);
+
+err_exit:
+       return err;
+}
+
+static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       int err = 0;
+
+       err = aq_nic_xmit(aq_nic, skb);
+       if (err < 0)
+               goto err_exit;
+
+err_exit:
+       return err;
+}
+
+static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       int err = 0;
+
+       if (new_mtu == ndev->mtu) {
+               err = 0;
+               goto err_exit;
+       }
+       if (new_mtu < 68) {
+               err = -EINVAL;
+               goto err_exit;
+       }
+       err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
+       if (err < 0)
+               goto err_exit;
+       ndev->mtu = new_mtu;
+
+       if (netif_running(ndev)) {
+               aq_ndev_close(ndev);
+               aq_ndev_open(ndev);
+       }
+
+err_exit:
+       return err;
+}
+
+static int aq_ndev_set_features(struct net_device *ndev,
+                               netdev_features_t features)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic);
+       bool is_lro = false;
+
+       if (aq_cfg->hw_features & NETIF_F_LRO) {
+               is_lro = features & NETIF_F_LRO;
+
+               if (aq_cfg->is_lro != is_lro) {
+                       aq_cfg->is_lro = is_lro;
+
+                       if (netif_running(ndev)) {
+                               aq_ndev_close(ndev);
+                               aq_ndev_open(ndev);
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       int err = 0;
+
+       err = eth_mac_addr(ndev, addr);
+       if (err < 0)
+               goto err_exit;
+       err = aq_nic_set_mac(aq_nic, ndev);
+       if (err < 0)
+               goto err_exit;
+
+err_exit:
+       return err;
+}
+
+static void aq_ndev_set_multicast_settings(struct net_device *ndev)
+{
+       struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       int err = 0;
+
+       err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
+       if (err < 0)
+               goto err_exit;
+
+       if (netdev_mc_count(ndev)) {
+               err = aq_nic_set_multicast_list(aq_nic, ndev);
+               if (err < 0)
+                       goto err_exit;
+       }
+
+err_exit:;
+}
+
+static const struct net_device_ops aq_ndev_ops = {
+       .ndo_open = aq_ndev_open,
+       .ndo_stop = aq_ndev_close,
+       .ndo_start_xmit = aq_ndev_start_xmit,
+       .ndo_set_rx_mode = aq_ndev_set_multicast_settings,
+       .ndo_change_mtu = aq_ndev_change_mtu,
+       .ndo_set_mac_address = aq_ndev_set_mac_address,
+       .ndo_set_features = aq_ndev_set_features
+};
+
+static int aq_pci_probe(struct pci_dev *pdev,
+                       const struct pci_device_id *pci_id)
+{
+       struct aq_hw_ops *aq_hw_ops = NULL;
+       struct aq_pci_func_s *aq_pci_func = NULL;
+       int err = 0;
+
+       err = pci_enable_device(pdev);
+       if (err < 0)
+               goto err_exit;
+       aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev);
+       aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev,
+                                       &aq_ndev_ops, &aq_ethtool_ops);
+       if (!aq_pci_func) {
+               err = -ENOMEM;
+               goto err_exit;
+       }
+       err = aq_pci_func_init(aq_pci_func);
+       if (err < 0)
+               goto err_exit;
+
+err_exit:
+       if (err < 0) {
+               if (aq_pci_func)
+                       aq_pci_func_free(aq_pci_func);
+       }
+       return err;
+}
+
+static void aq_pci_remove(struct pci_dev *pdev)
+{
+       struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
+
+       aq_pci_func_deinit(aq_pci_func);
+       aq_pci_func_free(aq_pci_func);
+}
+
+static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
+{
+       struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
+
+       return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
+}
+
+static int aq_pci_resume(struct pci_dev *pdev)
+{
+       struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
+       pm_message_t pm_msg = PMSG_RESTORE;
+
+       return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
+}
+
+static struct pci_driver aq_pci_ops = {
+       .name = AQ_CFG_DRV_NAME,
+       .id_table = aq_pci_tbl,
+       .probe = aq_pci_probe,
+       .remove = aq_pci_remove,
+       .suspend = aq_pci_suspend,
+       .resume = aq_pci_resume,
+};
+
+static int __init aq_module_init(void)
+{
+       int err = 0;
+
+       err = pci_register_driver(&aq_pci_ops);
+       if (err < 0)
+               goto err_exit;
+
+err_exit:
+       return err;
+}
+
+static void __exit aq_module_exit(void)
+{
+       pci_unregister_driver(&aq_pci_ops);
+}
+
+module_init(aq_module_init);
+module_exit(aq_module_exit);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
new file mode 100644 (file)
index 0000000..9748e7e
--- /dev/null
@@ -0,0 +1,17 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_main.h: Main file for aQuantia Linux driver. */
+
+#ifndef AQ_MAIN_H
+#define AQ_MAIN_H
+
+#include "aq_common.h"
+
+#endif /* AQ_MAIN_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
new file mode 100644 (file)
index 0000000..aa22a7c
--- /dev/null
@@ -0,0 +1,974 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_nic.c: Definition of common code for NIC. */
+
+#include "aq_nic.h"
+#include "aq_ring.h"
+#include "aq_vec.h"
+#include "aq_hw.h"
+#include "aq_pci_func.h"
+#include "aq_nic_internal.h"
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/timer.h>
+#include <linux/cpu.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/ip.h>
+
+static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
+{
+       struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+       struct aq_rss_parameters *rss_params = &cfg->aq_rss;
+       int i = 0;
+
+       static u8 rss_key[40] = {
+               0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
+               0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
+               0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
+               0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
+               0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
+       };
+
+       rss_params->hash_secret_key_size = sizeof(rss_key);
+       memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
+       rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
+
+       for (i = rss_params->indirection_table_size; i--;)
+               rss_params->indirection_table[i] = i & (num_rss_queues - 1);
+}
+
+/* Fills aq_nic_cfg with valid defaults */
+static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
+{
+       struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+       cfg->aq_hw_caps = &self->aq_hw_caps;
+
+       cfg->vecs = AQ_CFG_VECS_DEF;
+       cfg->tcs = AQ_CFG_TCS_DEF;
+
+       cfg->rxds = AQ_CFG_RXDS_DEF;
+       cfg->txds = AQ_CFG_TXDS_DEF;
+
+       cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
+
+       cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF;
+       cfg->itr = cfg->is_interrupt_moderation ?
+               AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U;
+
+       cfg->is_rss = AQ_CFG_IS_RSS_DEF;
+       cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
+       cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
+       cfg->flow_control = AQ_CFG_FC_MODE;
+
+       cfg->mtu = AQ_CFG_MTU_DEF;
+       cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
+       cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
+
+       cfg->is_lro = AQ_CFG_IS_LRO_DEF;
+
+       cfg->vlan_id = 0U;
+
+       aq_nic_rss_init(self, cfg->num_rss_queues);
+}
+
+/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
+int aq_nic_cfg_start(struct aq_nic_s *self)
+{
+       struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+       /*descriptors */
+       cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds);
+       cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds);
+
+       /*rss rings */
+       cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs);
+       cfg->vecs = min(cfg->vecs, num_online_cpus());
+       /* cfg->vecs should be power of 2 for RSS */
+       if (cfg->vecs >= 8U)
+               cfg->vecs = 8U;
+       else if (cfg->vecs >= 4U)
+               cfg->vecs = 4U;
+       else if (cfg->vecs >= 2U)
+               cfg->vecs = 2U;
+       else
+               cfg->vecs = 1U;
+
+       cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
+
+       if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
+           (self->aq_hw_caps.vecs == 1U) ||
+           (cfg->vecs == 1U)) {
+               cfg->is_rss = 0U;
+               cfg->vecs = 1U;
+       }
+
+       cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk;
+       cfg->hw_features = self->aq_hw_caps.hw_features;
+       return 0;
+}
+
+static void aq_nic_service_timer_cb(unsigned long param)
+{
+       struct aq_nic_s *self = (struct aq_nic_s *)param;
+       struct net_device *ndev = aq_nic_get_ndev(self);
+       int err = 0;
+       bool is_busy = false;
+       unsigned int i = 0U;
+       struct aq_hw_link_status_s link_status;
+       struct aq_ring_stats_rx_s stats_rx;
+       struct aq_ring_stats_tx_s stats_tx;
+
+       atomic_inc(&self->header.busy_count);
+       is_busy = true;
+       if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
+               goto err_exit;
+
+       err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status);
+       if (err < 0)
+               goto err_exit;
+
+       self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
+                           self->aq_nic_cfg.is_interrupt_moderation);
+
+       if (memcmp(&link_status, &self->link_status, sizeof(link_status))) {
+               if (link_status.mbps) {
+                       aq_utils_obj_set(&self->header.flags,
+                                        AQ_NIC_FLAG_STARTED);
+                       aq_utils_obj_clear(&self->header.flags,
+                                          AQ_NIC_LINK_DOWN);
+                       netif_carrier_on(self->ndev);
+               } else {
+                       netif_carrier_off(self->ndev);
+                       aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
+               }
+
+               self->link_status = link_status;
+       }
+
+       memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
+       memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
+       for (i = AQ_DIMOF(self->aq_vec); i--;) {
+               if (self->aq_vec[i])
+                       aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
+       }
+
+       ndev->stats.rx_packets = stats_rx.packets;
+       ndev->stats.rx_bytes = stats_rx.bytes;
+       ndev->stats.rx_errors = stats_rx.errors;
+       ndev->stats.tx_packets = stats_tx.packets;
+       ndev->stats.tx_bytes = stats_tx.bytes;
+       ndev->stats.tx_errors = stats_tx.errors;
+
+err_exit:
+       if (is_busy)
+               atomic_dec(&self->header.busy_count);
+       mod_timer(&self->service_timer,
+                 jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
+}
+
+static void aq_nic_polling_timer_cb(unsigned long param)
+{
+       struct aq_nic_s *self = (struct aq_nic_s *)param;
+       struct aq_vec_s *aq_vec = NULL;
+       unsigned int i = 0U;
+
+       for (i = 0U, aq_vec = self->aq_vec[0];
+               self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+               aq_vec_isr(i, (void *)aq_vec);
+
+       mod_timer(&self->polling_timer, jiffies +
+               AQ_CFG_POLLING_TIMER_INTERVAL);
+}
+
+static struct net_device *aq_nic_ndev_alloc(void)
+{
+       return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
+}
+
+struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
+                                  const struct ethtool_ops *et_ops,
+                                  struct device *dev,
+                                  struct aq_pci_func_s *aq_pci_func,
+                                  unsigned int port,
+                                  const struct aq_hw_ops *aq_hw_ops)
+{
+       struct net_device *ndev = NULL;
+       struct aq_nic_s *self = NULL;
+       int err = 0;
+
+       ndev = aq_nic_ndev_alloc();
+       self = netdev_priv(ndev);
+       if (!self) {
+               err = -EINVAL;
+               goto err_exit;
+       }
+
+       ndev->netdev_ops = ndev_ops;
+       ndev->ethtool_ops = et_ops;
+
+       SET_NETDEV_DEV(ndev, dev);
+
+       ndev->if_port = port;
+       self->ndev = ndev;
+
+       self->aq_pci_func = aq_pci_func;
+
+       self->aq_hw_ops = *aq_hw_ops;
+       self->port = (u8)port;
+
+       self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
+                                               &self->aq_hw_ops);
+       err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps);
+       if (err < 0)
+               goto err_exit;
+
+       aq_nic_cfg_init_defaults(self);
+
+err_exit:
+       if (err < 0) {
+               aq_nic_free_hot_resources(self);
+               self = NULL;
+       }
+       return self;
+}
+
+int aq_nic_ndev_register(struct aq_nic_s *self)
+{
+       int err = 0;
+       unsigned int i = 0U;
+
+       if (!self->ndev) {
+               err = -EINVAL;
+               goto err_exit;
+       }
+       err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw,
+                           self->aq_nic_cfg.aq_hw_caps,
+                           self->ndev->dev_addr);
+       if (err < 0)
+               goto err_exit;
+
+#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
+       {
+               static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
+
+               ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
+       }
+#endif
+       err = register_netdev(self->ndev);
+       if (err < 0)
+               goto err_exit;
+
+       self->is_ndev_registered = true;
+       netif_carrier_off(self->ndev);
+
+       for (i = AQ_CFG_VECS_MAX; i--;)
+               aq_nic_ndev_queue_stop(self, i);
+
+err_exit:
+       return err;
+}
+
+int aq_nic_ndev_init(struct aq_nic_s *self)
+{
+       struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
+       struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
+
+       self->ndev->hw_features |= aq_hw_caps->hw_features;
+       self->ndev->features = aq_hw_caps->hw_features;
+       self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
+       self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
+
+       return 0;
+}
+
+void aq_nic_ndev_free(struct aq_nic_s *self)
+{
+       if (!self->ndev)
+               goto err_exit;
+
+       if (self->is_ndev_registered)
+               unregister_netdev(self->ndev);
+
+       if (self->aq_hw)
+               self->aq_hw_ops.destroy(self->aq_hw);
+
+       free_netdev(self->ndev);
+
+err_exit:;
+}
+
+struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
+{
+       struct aq_nic_s *self = NULL;
+       int err = 0;
+
+       if (!ndev) {
+               err = -EINVAL;
+               goto err_exit;
+       }
+       self = netdev_priv(ndev);
+
+       if (!self) {
+               err = -EINVAL;
+               goto err_exit;
+       }
+       if (netif_running(ndev)) {
+               unsigned int i;
+
+               for (i = AQ_CFG_VECS_MAX; i--;)
+                       netif_stop_subqueue(ndev, i);
+       }
+
+       for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
+               self->aq_vecs++) {
+               self->aq_vec[self->aq_vecs] =
+                   aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg);
+               if (!self->aq_vec[self->aq_vecs]) {
+                       err = -ENOMEM;
+                       goto err_exit;
+               }
+       }
+
+err_exit:
+       if (err < 0) {
+               aq_nic_free_hot_resources(self);
+               self = NULL;
+       }
+       return self;
+}
+
+void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
+                       struct aq_ring_s *ring)
+{
+       self->aq_ring_tx[idx] = ring;
+}
+
+struct device *aq_nic_get_dev(struct aq_nic_s *self)
+{
+       return self->ndev->dev.parent;
+}
+
+struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
+{
+       return self->ndev;
+}
+
+int aq_nic_init(struct aq_nic_s *self)
+{
+       struct aq_vec_s *aq_vec = NULL;
+       int err = 0;
+       unsigned int i = 0U;
+
+       self->power_state = AQ_HW_POWER_STATE_D0;
+       err = self->aq_hw_ops.hw_reset(self->aq_hw);
+       if (err < 0)
+               goto err_exit;
+
+       err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg,
+                           aq_nic_get_ndev(self)->dev_addr);
+       if (err < 0)
+               goto err_exit;
+
+       for (i = 0U, aq_vec = self->aq_vec[0];
+               self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+               aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw);
+
+err_exit:
+       return err;
+}
+
+void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
+{
+       netif_start_subqueue(self->ndev, idx);
+}
+
+void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
+{
+       netif_stop_subqueue(self->ndev, idx);
+}
+
+int aq_nic_start(struct aq_nic_s *self)
+{
+       struct aq_vec_s *aq_vec = NULL;
+       int err = 0;
+       unsigned int i = 0U;
+
+       err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+                                                   self->mc_list.ar,
+                                                   self->mc_list.count);
+       if (err < 0)
+               goto err_exit;
+
+       err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
+                                                  self->packet_filter);
+       if (err < 0)
+               goto err_exit;
+
+       for (i = 0U, aq_vec = self->aq_vec[0];
+               self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+               err = aq_vec_start(aq_vec);
+               if (err < 0)
+                       goto err_exit;
+       }
+
+       err = self->aq_hw_ops.hw_start(self->aq_hw);
+       if (err < 0)
+               goto err_exit;
+
+       err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
+                           self->aq_nic_cfg.is_interrupt_moderation);
+       if (err < 0)
+               goto err_exit;
+       setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
+                   (unsigned long)self);
+       mod_timer(&self->service_timer, jiffies +
+                       AQ_CFG_SERVICE_TIMER_INTERVAL);
+
+       if (self->aq_nic_cfg.is_polling) {
+               setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb,
+                           (unsigned long)self);
+               mod_timer(&self->polling_timer, jiffies +
+                         AQ_CFG_POLLING_TIMER_INTERVAL);
+       } else {
+               for (i = 0U, aq_vec = self->aq_vec[0];
+                       self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+                       err = aq_pci_func_alloc_irq(self->aq_pci_func, i,
+                                                   self->ndev->name, aq_vec,
+                                       aq_vec_get_affinity_mask(aq_vec));
+                       if (err < 0)
+                               goto err_exit;
+               }
+
+               err = self->aq_hw_ops.hw_irq_enable(self->aq_hw,
+                                   AQ_CFG_IRQ_MASK);
+               if (err < 0)
+                       goto err_exit;
+       }
+
+       for (i = 0U, aq_vec = self->aq_vec[0];
+               self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+               aq_nic_ndev_queue_start(self, i);
+
+       err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
+       if (err < 0)
+               goto err_exit;
+
+       err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
+       if (err < 0)
+               goto err_exit;
+
+err_exit:
+       return err;
+}
+
+static unsigned int aq_nic_map_skb_frag(struct aq_nic_s *self,
+                                       struct sk_buff *skb,
+                                       struct aq_ring_buff_s *dx)
+{
+       unsigned int ret = 0U;
+       unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+       unsigned int frag_count = 0U;
+
+       dx->flags = 0U;
+       dx->len = skb_headlen(skb);
+       dx->pa = dma_map_single(aq_nic_get_dev(self), skb->data, dx->len,
+                               DMA_TO_DEVICE);
+       dx->len_pkt = skb->len;
+       dx->is_sop = 1U;
+       dx->is_mapped = 1U;
+
+       ++ret;
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               dx->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 1U : 0U;
+               dx->is_tcp_cso =
+                       (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U;
+               dx->is_udp_cso =
+                       (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U;
+       }
+
+       for (; nr_frags--; ++frag_count) {
+               unsigned int frag_len;
+               dma_addr_t frag_pa;
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
+
+               frag_len = skb_frag_size(frag);
+
+               frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
+                                          frag_len, DMA_TO_DEVICE);
+
+               while (frag_len > AQ_CFG_TX_FRAME_MAX) {
+                       ++dx;
+                       ++ret;
+                       dx->flags = 0U;
+                       dx->len = AQ_CFG_TX_FRAME_MAX;
+                       dx->pa = frag_pa;
+                       dx->is_mapped = 1U;
+
+                       frag_len -= AQ_CFG_TX_FRAME_MAX;
+                       frag_pa += AQ_CFG_TX_FRAME_MAX;
+               }
+
+               ++dx;
+               ++ret;
+
+               dx->flags = 0U;
+               dx->len = frag_len;
+               dx->pa = frag_pa;
+               dx->is_mapped = 1U;
+       }
+
+       dx->is_eop = 1U;
+       dx->skb = skb;
+
+       return ret;
+}
+
+static unsigned int aq_nic_map_skb_lso(struct aq_nic_s *self,
+                                      struct sk_buff *skb,
+                                      struct aq_ring_buff_s *dx)
+{
+       dx->flags = 0U;
+       dx->len_pkt = skb->len;
+       dx->len_l2 = ETH_HLEN;
+       dx->len_l3 = ip_hdrlen(skb);
+       dx->len_l4 = tcp_hdrlen(skb);
+       dx->mss = skb_shinfo(skb)->gso_size;
+       dx->is_txc = 1U;
+       return 1U;
+}
+
+static unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
+                                  struct aq_ring_buff_s *dx)
+{
+       unsigned int ret = 0U;
+
+       if (unlikely(skb_is_gso(skb))) {
+               ret = aq_nic_map_skb_lso(self, skb, dx);
+               ++dx;
+       }
+
+       ret += aq_nic_map_skb_frag(self, skb, dx);
+
+       return ret;
+}
+
+int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
+__releases(&ring->lock)
+__acquires(&ring->lock)
+{
+       struct aq_ring_s *ring = NULL;
+       unsigned int frags = 0U;
+       unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
+       unsigned int tc = 0U;
+       unsigned int trys = AQ_CFG_LOCK_TRYS;
+       int err = 0;
+       bool is_nic_in_bad_state;
+       bool is_busy = false;
+       struct aq_ring_buff_s buffers[AQ_CFG_SKB_FRAGS_MAX];
+
+       frags = skb_shinfo(skb)->nr_frags + 1;
+
+       ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
+
+       atomic_inc(&self->header.busy_count);
+       is_busy = true;
+
+       if (frags > AQ_CFG_SKB_FRAGS_MAX) {
+               dev_kfree_skb_any(skb);
+               goto err_exit;
+       }
+
+       is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags,
+                                               AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
+                                               (aq_ring_avail_dx(ring) <
+                                               AQ_CFG_SKB_FRAGS_MAX);
+
+       if (is_nic_in_bad_state) {
+               aq_nic_ndev_queue_stop(self, ring->idx);
+               err = NETDEV_TX_BUSY;
+               goto err_exit;
+       }
+
+       do {
+               if (spin_trylock(&ring->header.lock)) {
+                       frags = aq_nic_map_skb(self, skb, &buffers[0]);
+
+                       aq_ring_tx_append_buffs(ring, &buffers[0], frags);
+
+                       err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
+                                                             ring, frags);
+                       if (err >= 0) {
+                               if (aq_ring_avail_dx(ring) <
+                                   AQ_CFG_SKB_FRAGS_MAX + 1)
+                                       aq_nic_ndev_queue_stop(self, ring->idx);
+                       }
+                       spin_unlock(&ring->header.lock);
+
+                       if (err >= 0) {
+                               ++ring->stats.tx.packets;
+                               ring->stats.tx.bytes += skb->len;
+                       }
+                       break;
+               }
+       } while (--trys);
+
+       if (!trys) {
+               err = NETDEV_TX_BUSY;
+               goto err_exit;
+       }
+
+err_exit:
+       if (is_busy)
+               atomic_dec(&self->header.busy_count);
+       return err;
+}
+
+int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
+{
+       int err = 0;
+
+       err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags);
+       if (err < 0)
+               goto err_exit;
+
+       self->packet_filter = flags;
+
+err_exit:
+       return err;
+}
+
+int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
+{
+       struct netdev_hw_addr *ha = NULL;
+       unsigned int i = 0U;
+
+       self->mc_list.count = 0U;
+
+       netdev_for_each_mc_addr(ha, ndev) {
+               ether_addr_copy(self->mc_list.ar[i++], ha->addr);
+               ++self->mc_list.count;
+       }
+
+       return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+                                                   self->mc_list.ar,
+                                                   self->mc_list.count);
+}
+
+int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
+{
+       int err = 0;
+
+       if (new_mtu > self->aq_hw_caps.mtu) {
+               err = -EINVAL;
+               goto err_exit;
+       }
+       self->aq_nic_cfg.mtu = new_mtu;
+
+err_exit:
+       return err;
+}
+
+int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
+{
+       return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr);
+}
+
+unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
+{
+       return self->link_status.mbps;
+}
+
+int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
+{
+       u32 *regs_buff = p;
+       int err = 0;
+
+       regs->version = 1;
+
+       err = self->aq_hw_ops.hw_get_regs(self->aq_hw,
+                                         &self->aq_hw_caps, regs_buff);
+       if (err < 0)
+               goto err_exit;
+
+err_exit:
+       return err;
+}
+
+int aq_nic_get_regs_count(struct aq_nic_s *self)
+{
+       return self->aq_hw_caps.mac_regs_count;
+}
+
+void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
+{
+       struct aq_vec_s *aq_vec = NULL;
+       unsigned int i = 0U;
+       unsigned int count = 0U;
+       int err = 0;
+
+       err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count);
+       if (err < 0)
+               goto err_exit;
+
+       data += count;
+       count = 0U;
+
+       for (i = 0U, aq_vec = self->aq_vec[0];
+               self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+               data += count;
+               aq_vec_get_sw_stats(aq_vec, data, &count);
+       }
+
+err_exit:;
+       (void)err;
+}
+
+void aq_nic_get_link_ksettings(struct aq_nic_s *self,
+                              struct ethtool_link_ksettings *cmd)
+{
+       cmd->base.port = PORT_TP;
+       /* This driver supports only 10G capable adapters, so DUPLEX_FULL */
+       cmd->base.duplex = DUPLEX_FULL;
+       cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
+
+       ethtool_link_ksettings_zero_link_mode(cmd, supported);
+
+       if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G)
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    10000baseT_Full);
+
+       if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_5G)
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    5000baseT_Full);
+
+       if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_2GS)
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    2500baseT_Full);
+
+       if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G)
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    1000baseT_Full);
+
+       if (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M)
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    100baseT_Full);
+
+       if (self->aq_hw_caps.flow_control)
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    Pause);
+
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+       ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+
+       if (self->aq_nic_cfg.is_autoneg)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+
+       if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_10G)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    10000baseT_Full);
+
+       if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_5G)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    5000baseT_Full);
+
+       if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_2GS)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    2500baseT_Full);
+
+       if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_1G)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    1000baseT_Full);
+
+       if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_100M)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    100baseT_Full);
+
+       if (self->aq_nic_cfg.flow_control)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    Pause);
+
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+}
+
+int aq_nic_set_link_ksettings(struct aq_nic_s *self,
+                             const struct ethtool_link_ksettings *cmd)
+{
+       u32 speed = 0U;
+       u32 rate = 0U;
+       int err = 0;
+
+       if (cmd->base.autoneg == AUTONEG_ENABLE) {
+               rate = self->aq_hw_caps.link_speed_msk;
+               self->aq_nic_cfg.is_autoneg = true;
+       } else {
+               speed = cmd->base.speed;
+
+               switch (speed) {
+               case SPEED_100:
+                       rate = AQ_NIC_RATE_100M;
+                       break;
+
+               case SPEED_1000:
+                       rate = AQ_NIC_RATE_1G;
+                       break;
+
+               case SPEED_2500:
+                       rate = AQ_NIC_RATE_2GS;
+                       break;
+
+               case SPEED_5000:
+                       rate = AQ_NIC_RATE_5G;
+                       break;
+
+               case SPEED_10000:
+                       rate = AQ_NIC_RATE_10G;
+                       break;
+
+               default:
+                       err = -1;
+                       goto err_exit;
+               break;
+               }
+               if (!(self->aq_hw_caps.link_speed_msk & rate)) {
+                       err = -1;
+                       goto err_exit;
+               }
+
+               self->aq_nic_cfg.is_autoneg = false;
+       }
+
+       err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate);
+       if (err < 0)
+               goto err_exit;
+
+       self->aq_nic_cfg.link_speed_msk = rate;
+
+err_exit:
+       return err;
+}
+
+struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
+{
+       return &self->aq_nic_cfg;
+}
+
+u32 aq_nic_get_fw_version(struct aq_nic_s *self)
+{
+       u32 fw_version = 0U;
+
+       self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version);
+
+       return fw_version;
+}
+
+int aq_nic_stop(struct aq_nic_s *self)
+{
+       struct aq_vec_s *aq_vec = NULL;
+       unsigned int i = 0U;
+
+       for (i = 0U, aq_vec = self->aq_vec[0];
+               self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+               aq_nic_ndev_queue_stop(self, i);
+
+       del_timer_sync(&self->service_timer);
+
+       self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
+
+       if (self->aq_nic_cfg.is_polling)
+               del_timer_sync(&self->polling_timer);
+       else
+               aq_pci_func_free_irqs(self->aq_pci_func);
+
+       for (i = 0U, aq_vec = self->aq_vec[0];
+               self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+               aq_vec_stop(aq_vec);
+
+       return self->aq_hw_ops.hw_stop(self->aq_hw);
+}
+
+void aq_nic_deinit(struct aq_nic_s *self)
+{
+       struct aq_vec_s *aq_vec = NULL;
+       unsigned int i = 0U;
+
+       if (!self)
+               goto err_exit;
+
+       for (i = 0U, aq_vec = self->aq_vec[0];
+               self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+               aq_vec_deinit(aq_vec);
+
+       if (self->power_state == AQ_HW_POWER_STATE_D0) {
+               (void)self->aq_hw_ops.hw_deinit(self->aq_hw);
+       } else {
+               (void)self->aq_hw_ops.hw_set_power(self->aq_hw,
+                                                  self->power_state);
+       }
+
+err_exit:;
+}
+
+void aq_nic_free_hot_resources(struct aq_nic_s *self)
+{
+       unsigned int i = 0U;
+
+       if (!self)
+               goto err_exit;
+
+       for (i = AQ_DIMOF(self->aq_vec); i--;) {
+               if (self->aq_vec[i])
+                       aq_vec_free(self->aq_vec[i]);
+       }
+
+err_exit:;
+}
+
+int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
+{
+       int err = 0;
+
+       if (!netif_running(self->ndev)) {
+               err = 0;
+               goto err_exit;
+       }
+       rtnl_lock();
+       if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
+               self->power_state = AQ_HW_POWER_STATE_D3;
+               netif_device_detach(self->ndev);
+               netif_tx_stop_all_queues(self->ndev);
+
+               err = aq_nic_stop(self);
+               if (err < 0)
+                       goto err_exit;
+
+               aq_nic_deinit(self);
+       } else {
+               err = aq_nic_init(self);
+               if (err < 0)
+                       goto err_exit;
+
+               err = aq_nic_start(self);
+               if (err < 0)
+                       goto err_exit;
+
+               netif_device_attach(self->ndev);
+               netif_tx_start_all_queues(self->ndev);
+       }
+       rtnl_unlock();
+
+err_exit:
+       return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
new file mode 100644 (file)
index 0000000..7fc2a5e
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_nic.h: Declaration of common code for NIC. */
+
+#ifndef AQ_NIC_H
+#define AQ_NIC_H
+
+#include "aq_common.h"
+#include "aq_rss.h"
+
+struct aq_ring_s;
+struct aq_pci_func_s;
+struct aq_hw_ops;
+
+#define AQ_NIC_FC_OFF    0U
+#define AQ_NIC_FC_TX     1U
+#define AQ_NIC_FC_RX     2U
+#define AQ_NIC_FC_FULL   3U
+#define AQ_NIC_FC_AUTO   4U
+
+#define AQ_NIC_RATE_10G        BIT(0)
+#define AQ_NIC_RATE_5G         BIT(1)
+#define AQ_NIC_RATE_5GSR       BIT(2)
+#define AQ_NIC_RATE_2GS        BIT(3)
+#define AQ_NIC_RATE_1G         BIT(4)
+#define AQ_NIC_RATE_100M       BIT(5)
+
+struct aq_nic_cfg_s {
+       struct aq_hw_caps_s *aq_hw_caps;
+       u64 hw_features;
+       u32 rxds;               /* rx ring size, descriptors # */
+       u32 txds;               /* tx ring size, descriptors # */
+       u32 vecs;               /* vecs==allocated irqs */
+       u32 irq_type;
+       u32 itr;
+       u32 num_rss_queues;
+       u32 mtu;
+       u32 ucp_0x364;
+       u32 flow_control;
+       u32 link_speed_msk;
+       u32 vlan_id;
+       u16 is_mc_list_enabled;
+       u16 mc_list_count;
+       bool is_autoneg;
+       bool is_interrupt_moderation;
+       bool is_polling;
+       bool is_rss;
+       bool is_lro;
+       u8  tcs;
+       struct aq_rss_parameters aq_rss;
+};
+
+#define AQ_NIC_FLAG_STARTED     0x00000004U
+#define AQ_NIC_FLAG_STOPPING    0x00000008U
+#define AQ_NIC_FLAG_RESETTING   0x00000010U
+#define AQ_NIC_FLAG_CLOSING     0x00000020U
+#define AQ_NIC_LINK_DOWN        0x04000000U
+#define AQ_NIC_FLAG_ERR_UNPLUG  0x40000000U
+#define AQ_NIC_FLAG_ERR_HW      0x80000000U
+
+#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
+       ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
+
+struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
+                                  const struct ethtool_ops *et_ops,
+                                  struct device *dev,
+                                  struct aq_pci_func_s *aq_pci_func,
+                                  unsigned int port,
+                                  const struct aq_hw_ops *aq_hw_ops);
+int aq_nic_ndev_init(struct aq_nic_s *self);
+struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev);
+void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
+                       struct aq_ring_s *ring);
+struct device *aq_nic_get_dev(struct aq_nic_s *self);
+struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
+int aq_nic_init(struct aq_nic_s *self);
+int aq_nic_cfg_start(struct aq_nic_s *self);
+int aq_nic_ndev_register(struct aq_nic_s *self);
+void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx);
+void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx);
+void aq_nic_ndev_free(struct aq_nic_s *self);
+int aq_nic_start(struct aq_nic_s *self);
+int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
+int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
+int aq_nic_get_regs_count(struct aq_nic_s *self);
+void aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
+int aq_nic_stop(struct aq_nic_s *self);
+void aq_nic_deinit(struct aq_nic_s *self);
+void aq_nic_free_hot_resources(struct aq_nic_s *self);
+int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
+int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev);
+int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags);
+int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev);
+unsigned int aq_nic_get_link_speed(struct aq_nic_s *self);
+void aq_nic_get_link_ksettings(struct aq_nic_s *self,
+                              struct ethtool_link_ksettings *cmd);
+int aq_nic_set_link_ksettings(struct aq_nic_s *self,
+                             const struct ethtool_link_ksettings *cmd);
+struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
+u32 aq_nic_get_fw_version(struct aq_nic_s *self);
+int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
+
+#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
new file mode 100644 (file)
index 0000000..f81738a
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_nic_internal.h: Definition of private object structure. */
+
+#ifndef AQ_NIC_INTERNAL_H
+#define AQ_NIC_INTERNAL_H
+
+struct aq_nic_s {
+       struct aq_obj_s header;
+       struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
+       struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
+       struct aq_hw_s *aq_hw;
+       struct net_device *ndev;
+       struct aq_pci_func_s *aq_pci_func;
+       unsigned int aq_vecs;
+       unsigned int packet_filter;
+       unsigned int power_state;
+       bool is_ndev_registered;
+       u8 port;
+       struct aq_hw_ops aq_hw_ops;
+       struct aq_hw_caps_s aq_hw_caps;
+       struct aq_nic_cfg_s aq_nic_cfg;
+       struct timer_list service_timer;
+       struct timer_list polling_timer;
+       struct aq_hw_link_status_s link_status;
+       struct {
+               u32 count;
+               u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+       } mc_list;
+};
+
+#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
+                       AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
+                       AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
+
+#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
+                                       AQ_NIC_LINK_DOWN)
+
+#endif /* AQ_NIC_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
new file mode 100644 (file)
index 0000000..581de71
--- /dev/null
@@ -0,0 +1,292 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_pci_func.c: Definition of PCI functions. */
+
+#include "aq_pci_func.h"
+#include "aq_nic.h"
+#include "aq_vec.h"
+#include "aq_hw.h"
+#include <linux/interrupt.h>
+
+struct aq_pci_func_s {
+       struct pci_dev *pdev;
+       struct aq_nic_s *port[AQ_CFG_PCI_FUNC_PORTS];
+       void __iomem *mmio;
+       void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS];
+       resource_size_t mmio_pa;
+       unsigned int msix_entry_mask;
+       unsigned int ports;
+       bool is_pci_enabled;
+       bool is_regions;
+       bool is_pci_using_dac;
+       struct aq_hw_caps_s aq_hw_caps;
+};
+
+struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
+                                       struct pci_dev *pdev,
+                                       const struct net_device_ops *ndev_ops,
+                                       const struct ethtool_ops *eth_ops)
+{
+       struct aq_pci_func_s *self = NULL;
+       int err = 0;
+       unsigned int port = 0U;
+
+       if (!aq_hw_ops) {
+               err = -EFAULT;
+               goto err_exit;
+       }
+       self = kzalloc(sizeof(*self), GFP_KERNEL);
+       if (!self) {
+               err = -ENOMEM;
+               goto err_exit;
+       }
+
+       pci_set_drvdata(pdev, self);
+       self->pdev = pdev;
+
+       err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps);
+       if (err < 0)
+               goto err_exit;
+
+       self->ports = self->aq_hw_caps.ports;
+
+       for (port = 0; port < self->ports; ++port) {
+               struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
+                                                           &pdev->dev, self,
+                                                           port, aq_hw_ops);
+
+               if (!aq_nic) {
+                       err = -ENOMEM;
+                       goto err_exit;
+               }
+               self->port[port] = aq_nic;
+       }
+
+err_exit:
+       if (err < 0) {
+               if (self)
+                       aq_pci_func_free(self);
+               self = NULL;
+       }
+
+       (void)err;
+       return self;
+}
+
+int aq_pci_func_init(struct aq_pci_func_s *self)
+{
+       int err = 0;
+       unsigned int bar = 0U;
+       unsigned int port = 0U;
+
+       err = pci_enable_device(self->pdev);
+       if (err < 0)
+               goto err_exit;
+
+       self->is_pci_enabled = true;
+
+       err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(64));
+       if (!err) {
+               err = pci_set_consistent_dma_mask(self->pdev, DMA_BIT_MASK(64));
+               self->is_pci_using_dac = 1;
+       }
+       if (err) {
+               err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(32));
+               if (!err)
+                       err = pci_set_consistent_dma_mask(self->pdev,
+                                                         DMA_BIT_MASK(32));
+               self->is_pci_using_dac = 0;
+       }
+       if (err != 0) {
+               err = -ENOSR;
+               goto err_exit;
+       }
+
+       err = pci_request_regions(self->pdev, AQ_CFG_DRV_NAME "_mmio");
+       if (err < 0)
+               goto err_exit;
+
+       self->is_regions = true;
+
+       pci_set_master(self->pdev);
+
+       for (bar = 0; bar < 4; ++bar) {
+               if (IORESOURCE_MEM & pci_resource_flags(self->pdev, bar)) {
+                       resource_size_t reg_sz;
+
+                       self->mmio_pa = pci_resource_start(self->pdev, bar);
+                       if (self->mmio_pa == 0U) {
+                               err = -EIO;
+                               goto err_exit;
+                       }
+
+                       reg_sz = pci_resource_len(self->pdev, bar);
+                       if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
+                               err = -EIO;
+                               goto err_exit;
+                       }
+
+                       self->mmio = ioremap_nocache(self->mmio_pa, reg_sz);
+                       if (!self->mmio) {
+                               err = -EIO;
+                               goto err_exit;
+                       }
+                       break;
+               }
+       }
+
+       /*enable interrupts */
+#if !AQ_CFG_FORCE_LEGACY_INT
+       err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs,
+                             self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX);
+
+       if (err < 0) {
+               err = pci_alloc_irq_vectors(self->pdev, 1, 1,
+                               PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+               if (err < 0)
+                       goto err_exit;
+       }
+#endif
+
+       /* net device init */
+       for (port = 0; port < self->ports; ++port) {
+               if (!self->port[port])
+                       continue;
+
+               err = aq_nic_cfg_start(self->port[port]);
+               if (err < 0)
+                       goto err_exit;
+
+               err = aq_nic_ndev_init(self->port[port]);
+               if (err < 0)
+                       goto err_exit;
+
+               err = aq_nic_ndev_register(self->port[port]);
+               if (err < 0)
+                       goto err_exit;
+       }
+
+err_exit:
+       if (err < 0)
+               aq_pci_func_deinit(self);
+       return err;
+}
+
+int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
+                         char *name, void *aq_vec, cpumask_t *affinity_mask)
+{
+       struct pci_dev *pdev = self->pdev;
+       int err = 0;
+
+       if (pdev->msix_enabled || pdev->msi_enabled)
+               err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0,
+                                 name, aq_vec);
+       else
+               err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
+                                 IRQF_SHARED, name, aq_vec);
+
+       if (err >= 0) {
+               self->msix_entry_mask |= (1 << i);
+               self->aq_vec[i] = aq_vec;
+
+               if (pdev->msix_enabled)
+                       irq_set_affinity_hint(pci_irq_vector(pdev, i),
+                                             affinity_mask);
+       }
+
+       return err;
+}
+
+void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
+{
+       struct pci_dev *pdev = self->pdev;
+       unsigned int i = 0U;
+
+       for (i = 32U; i--;) {
+               if (!((1U << i) & self->msix_entry_mask))
+                       continue;
+
+               free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
+               if (pdev->msix_enabled)
+                       irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
+               self->msix_entry_mask &= ~(1U << i);
+       }
+}
+
+void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self)
+{
+       return self->mmio;
+}
+
+unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self)
+{
+       if (self->pdev->msix_enabled)
+               return AQ_HW_IRQ_MSIX;
+       if (self->pdev->msi_enabled)
+               return AQ_HW_IRQ_MSIX;
+       return AQ_HW_IRQ_LEGACY;
+}
+
+void aq_pci_func_deinit(struct aq_pci_func_s *self)
+{
+       if (!self)
+               goto err_exit;
+
+       aq_pci_func_free_irqs(self);
+       pci_free_irq_vectors(self->pdev);
+
+       if (self->is_regions)
+               pci_release_regions(self->pdev);
+
+       if (self->is_pci_enabled)
+               pci_disable_device(self->pdev);
+
+err_exit:;
+}
+
+void aq_pci_func_free(struct aq_pci_func_s *self)
+{
+       unsigned int port = 0U;
+
+       if (!self)
+               goto err_exit;
+
+       for (port = 0; port < self->ports; ++port) {
+               if (!self->port[port])
+                       continue;
+
+               aq_nic_ndev_free(self->port[port]);
+       }
+
+       kfree(self);
+
+err_exit:;
+}
+
+int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
+                               pm_message_t *pm_msg)
+{
+       int err = 0;
+       unsigned int port = 0U;
+
+       if (!self) {
+               err = -EFAULT;
+               goto err_exit;
+       }
+       for (port = 0; port < self->ports; ++port) {
+               if (!self->port[port])
+                       continue;
+
+               (void)aq_nic_change_pm_state(self->port[port], pm_msg);
+       }
+
+err_exit:
+       return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
new file mode 100644 (file)
index 0000000..ecb0337
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_pci_func.h: Declaration of PCI functions. */
+
+#ifndef AQ_PCI_FUNC_H
+#define AQ_PCI_FUNC_H
+
+#include "aq_common.h"
+
+struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops,
+                                       struct pci_dev *pdev,
+                                       const struct net_device_ops *ndev_ops,
+                                       const struct ethtool_ops *eth_ops);
+int aq_pci_func_init(struct aq_pci_func_s *self);
+int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
+                         char *name, void *aq_vec,
+                         cpumask_t *affinity_mask);
+void aq_pci_func_free_irqs(struct aq_pci_func_s *self);
+int aq_pci_func_start(struct aq_pci_func_s *self);
+void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self);
+unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self);
+void aq_pci_func_deinit(struct aq_pci_func_s *self);
+void aq_pci_func_free(struct aq_pci_func_s *self);
+int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
+                               pm_message_t *pm_msg);
+
+#endif /* AQ_PCI_FUNC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
new file mode 100644 (file)
index 0000000..dea9e9b
--- /dev/null
@@ -0,0 +1,375 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
+
+#include "aq_ring.h"
+#include "aq_nic.h"
+#include "aq_hw.h"
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
+                                      struct aq_nic_s *aq_nic)
+{
+       int err = 0;
+
+       self->buff_ring =
+               kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
+
+       if (!self->buff_ring) {
+               err = -ENOMEM;
+               goto err_exit;
+       }
+       self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
+                                               self->size * self->dx_size,
+                                               &self->dx_ring_pa, GFP_KERNEL);
+       if (!self->dx_ring) {
+               err = -ENOMEM;
+               goto err_exit;
+       }
+
+err_exit:
+       if (err < 0) {
+               aq_ring_free(self);
+               self = NULL;
+       }
+       return self;
+}
+
+struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
+                                  struct aq_nic_s *aq_nic,
+                                  unsigned int idx,
+                                  struct aq_nic_cfg_s *aq_nic_cfg)
+{
+       int err = 0;
+
+       self->aq_nic = aq_nic;
+       self->idx = idx;
+       self->size = aq_nic_cfg->txds;
+       self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
+
+       self = aq_ring_alloc(self, aq_nic);
+       if (!self) {
+               err = -ENOMEM;
+               goto err_exit;
+       }
+
+err_exit:
+       if (err < 0) {
+               aq_ring_free(self);
+               self = NULL;
+       }
+       return self;
+}
+
+struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
+                                  struct aq_nic_s *aq_nic,
+                                  unsigned int idx,
+                                  struct aq_nic_cfg_s *aq_nic_cfg)
+{
+       int err = 0;
+
+       self->aq_nic = aq_nic;
+       self->idx = idx;
+       self->size = aq_nic_cfg->rxds;
+       self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
+
+       self = aq_ring_alloc(self, aq_nic);
+       if (!self) {
+               err = -ENOMEM;
+               goto err_exit;
+       }
+
+err_exit:
+       if (err < 0) {
+               aq_ring_free(self);
+               self = NULL;
+       }
+       return self;
+}
+
+int aq_ring_init(struct aq_ring_s *self)
+{
+       self->hw_head = 0;
+       self->sw_head = 0;
+       self->sw_tail = 0;
+       return 0;
+}
+
+void aq_ring_tx_append_buffs(struct aq_ring_s *self,
+                            struct aq_ring_buff_s *buffer,
+                            unsigned int buffers)
+{
+       if (likely(self->sw_tail + buffers < self->size)) {
+               memcpy(&self->buff_ring[self->sw_tail], buffer,
+                      sizeof(buffer[0]) * buffers);
+       } else {
+               unsigned int first_part = self->size - self->sw_tail;
+               unsigned int second_part = buffers - first_part;
+
+               memcpy(&self->buff_ring[self->sw_tail], buffer,
+                      sizeof(buffer[0]) * first_part);
+
+               memcpy(&self->buff_ring[0], &buffer[first_part],
+                      sizeof(buffer[0]) * second_part);
+       }
+}
+
+int aq_ring_tx_clean(struct aq_ring_s *self)
+{
+       struct device *dev = aq_nic_get_dev(self->aq_nic);
+
+       for (; self->sw_head != self->hw_head;
+               self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
+               struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+
+               if (likely(buff->is_mapped)) {
+                       if (unlikely(buff->is_sop))
+                               dma_unmap_single(dev, buff->pa, buff->len,
+                                                DMA_TO_DEVICE);
+                       else
+                               dma_unmap_page(dev, buff->pa, buff->len,
+                                              DMA_TO_DEVICE);
+               }
+
+               if (unlikely(buff->is_eop))
+                       dev_kfree_skb_any(buff->skb);
+       }
+
+       if (aq_ring_avail_dx(self) > AQ_CFG_SKB_FRAGS_MAX)
+               aq_nic_ndev_queue_start(self->aq_nic, self->idx);
+
+       return 0;
+}
+
+static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i,
+                                              unsigned int t)
+{
+       return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
+}
+
+#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget)
+{
+       struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
+       int err = 0;
+       bool is_rsc_completed = true;
+
+       for (; (self->sw_head != self->hw_head) && budget;
+               self->sw_head = aq_ring_next_dx(self, self->sw_head),
+               --budget, ++(*work_done)) {
+               struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+               struct sk_buff *skb = NULL;
+               unsigned int next_ = 0U;
+               unsigned int i = 0U;
+               struct aq_ring_buff_s *buff_ = NULL;
+
+               if (buff->is_error) {
+                       __free_pages(buff->page, 0);
+                       continue;
+               }
+
+               if (buff->is_cleaned)
+                       continue;
+
+               if (!buff->is_eop) {
+                       for (next_ = buff->next,
+                            buff_ = &self->buff_ring[next_]; true;
+                            next_ = buff_->next,
+                            buff_ = &self->buff_ring[next_]) {
+                               is_rsc_completed =
+                                       aq_ring_dx_in_range(self->sw_head,
+                                                           next_,
+                                                           self->hw_head);
+
+                               if (unlikely(!is_rsc_completed)) {
+                                       is_rsc_completed = false;
+                                       break;
+                               }
+
+                               if (buff_->is_eop)
+                                       break;
+                       }
+
+                       if (!is_rsc_completed) {
+                               err = 0;
+                               goto err_exit;
+                       }
+               }
+
+               /* for single fragment packets use build_skb() */
+               if (buff->is_eop) {
+                       skb = build_skb(page_address(buff->page),
+                                       buff->len + AQ_SKB_ALIGN);
+                       if (unlikely(!skb)) {
+                               err = -ENOMEM;
+                               goto err_exit;
+                       }
+
+                       skb->dev = ndev;
+                       skb_put(skb, buff->len);
+               } else {
+                       skb = netdev_alloc_skb(ndev, ETH_HLEN);
+                       if (unlikely(!skb)) {
+                               err = -ENOMEM;
+                               goto err_exit;
+                       }
+                       skb_put(skb, ETH_HLEN);
+                       memcpy(skb->data, page_address(buff->page), ETH_HLEN);
+
+                       skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN,
+                                       buff->len - ETH_HLEN,
+                                       SKB_TRUESIZE(buff->len - ETH_HLEN));
+
+                       for (i = 1U, next_ = buff->next,
+                            buff_ = &self->buff_ring[next_]; true;
+                            next_ = buff_->next,
+                            buff_ = &self->buff_ring[next_], ++i) {
+                               skb_add_rx_frag(skb, i, buff_->page, 0,
+                                               buff_->len,
+                                               SKB_TRUESIZE(buff->len -
+                                               ETH_HLEN));
+                               buff_->is_cleaned = 1;
+
+                               if (buff_->is_eop)
+                                       break;
+                       }
+               }
+
+               skb->protocol = eth_type_trans(skb, ndev);
+               if (unlikely(buff->is_cso_err)) {
+                       ++self->stats.rx.errors;
+                       __skb_mark_checksum_bad(skb);
+               } else {
+                       if (buff->is_ip_cso) {
+                               __skb_incr_checksum_unnecessary(skb);
+                               if (buff->is_udp_cso || buff->is_tcp_cso)
+                                       __skb_incr_checksum_unnecessary(skb);
+                       } else {
+                               skb->ip_summed = CHECKSUM_NONE;
+                       }
+               }
+
+               skb_set_hash(skb, buff->rss_hash,
+                            buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
+                            PKT_HASH_TYPE_NONE);
+
+               skb_record_rx_queue(skb, self->idx);
+
+               netif_receive_skb(skb);
+
+               ++self->stats.rx.packets;
+               self->stats.rx.bytes += skb->len;
+       }
+
+err_exit:
+       return err;
+}
+
+int aq_ring_rx_fill(struct aq_ring_s *self)
+{
+       struct aq_ring_buff_s *buff = NULL;
+       int err = 0;
+       int i = 0;
+
+       for (i = aq_ring_avail_dx(self); i--;
+               self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
+               buff = &self->buff_ring[self->sw_tail];
+
+               buff->flags = 0U;
+               buff->len = AQ_CFG_RX_FRAME_MAX;
+
+               buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD |
+                                        __GFP_COMP, 0);
+               if (!buff->page) {
+                       err = -ENOMEM;
+                       goto err_exit;
+               }
+
+               buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic),
+                                       buff->page, 0,
+                                       AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
+
+               if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) {
+                       err = -ENOMEM;
+                       goto err_exit;
+               }
+
+               buff = NULL;
+       }
+
+err_exit:
+       if (err < 0) {
+               if (buff && buff->page)
+                       __free_pages(buff->page, 0);
+       }
+
+       return err;
+}
+
+void aq_ring_rx_deinit(struct aq_ring_s *self)
+{
+       if (!self)
+               goto err_exit;
+
+       for (; self->sw_head != self->sw_tail;
+               self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
+               struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+
+               dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa,
+                              AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
+
+               __free_pages(buff->page, 0);
+       }
+
+err_exit:;
+}
+
+void aq_ring_tx_deinit(struct aq_ring_s *self)
+{
+       if (!self)
+               goto err_exit;
+
+       for (; self->sw_head != self->sw_tail;
+               self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
+               struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+               struct device *ndev = aq_nic_get_dev(self->aq_nic);
+
+               if (likely(buff->is_mapped)) {
+                       if (unlikely(buff->is_sop)) {
+                               dma_unmap_single(ndev, buff->pa, buff->len,
+                                                DMA_TO_DEVICE);
+                       } else {
+                               dma_unmap_page(ndev, buff->pa, buff->len,
+                                              DMA_TO_DEVICE);
+                       }
+               }
+
+               if (unlikely(buff->is_eop))
+                       dev_kfree_skb_any(buff->skb);
+       }
+err_exit:;
+}
+
+void aq_ring_free(struct aq_ring_s *self)
+{
+       if (!self)
+               goto err_exit;
+
+       kfree(self->buff_ring);
+
+       if (self->dx_ring)
+               dma_free_coherent(aq_nic_get_dev(self->aq_nic),
+                                 self->size * self->dx_size, self->dx_ring,
+                                 self->dx_ring_pa);
+
+err_exit:;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
new file mode 100644 (file)
index 0000000..0ac3f9e
--- /dev/null
@@ -0,0 +1,157 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_ring.h: Declaration of functions for Rx/Tx rings. */
+
+#ifndef AQ_RING_H
+#define AQ_RING_H
+
+#include "aq_common.h"
+
+struct page;
+
+/*           TxC       SOP        DX         EOP
+ *         +----------+----------+----------+-----------
+ *   8bytes|len l3,l4 | pa       | pa       | pa
+ *         +----------+----------+----------+-----------
+ * 4/8bytes|len pkt   |len pkt   |          | skb
+ *         +----------+----------+----------+-----------
+ * 4/8bytes|is_txc    |len,flags |len       |len,is_eop
+ *         +----------+----------+----------+-----------
+ *
+ *  This aq_ring_buff_s doesn't have endianness dependency.
+ *  It is __packed for cache line optimizations.
+ */
+struct __packed aq_ring_buff_s {
+       union {
+               /* RX */
+               struct {
+                       u32 rss_hash;
+                       u16 next;
+                       u8 is_hash_l4;
+                       u8 rsvd1;
+                       struct page *page;
+               };
+               /* EOP */
+               struct {
+                       dma_addr_t pa_eop;
+                       struct sk_buff *skb;
+               };
+               /* DX */
+               struct {
+                       dma_addr_t pa;
+               };
+               /* SOP */
+               struct {
+                       dma_addr_t pa_sop;
+                       u32 len_pkt_sop;
+               };
+               /* TxC */
+               struct {
+                       u32 mss;
+                       u8 len_l2;
+                       u8 len_l3;
+                       u8 len_l4;
+                       u8 rsvd2;
+                       u32 len_pkt;
+               };
+       };
+       union {
+               struct {
+                       u32 len:16;
+                       u32 is_ip_cso:1;
+                       u32 is_udp_cso:1;
+                       u32 is_tcp_cso:1;
+                       u32 is_cso_err:1;
+                       u32 is_sop:1;
+                       u32 is_eop:1;
+                       u32 is_txc:1;
+                       u32 is_mapped:1;
+                       u32 is_cleaned:1;
+                       u32 is_error:1;
+                       u32 rsvd3:6;
+               };
+               u32 flags;
+       };
+};
+
+struct aq_ring_stats_rx_s {
+       u64 errors;
+       u64 packets;
+       u64 bytes;
+       u64 lro_packets;
+       u64 jumbo_packets;
+};
+
+struct aq_ring_stats_tx_s {
+       u64 errors;
+       u64 packets;
+       u64 bytes;
+};
+
+union aq_ring_stats_s {
+       struct aq_ring_stats_rx_s rx;
+       struct aq_ring_stats_tx_s tx;
+};
+
+struct aq_ring_s {
+       struct aq_obj_s header;
+       struct aq_ring_buff_s *buff_ring;
+       u8 *dx_ring;            /* descriptors ring, dma shared mem */
+       struct aq_nic_s *aq_nic;
+       unsigned int idx;       /* for HW layer registers operations */
+       unsigned int hw_head;
+       unsigned int sw_head;
+       unsigned int sw_tail;
+       unsigned int size;      /* descriptors number */
+       unsigned int dx_size;   /* TX or RX descriptor size,  */
+                               /* stored here for fater math */
+       union aq_ring_stats_s stats;
+       dma_addr_t dx_ring_pa;
+};
+
+struct aq_ring_param_s {
+       unsigned int vec_idx;
+       unsigned int cpu;
+       cpumask_t affinity_mask;
+};
+
+static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self,
+                                          unsigned int dx)
+{
+       return (++dx >= self->size) ? 0U : dx;
+}
+
+static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self)
+{
+       return (((self->sw_tail >= self->sw_head)) ?
+               (self->size - 1) - self->sw_tail + self->sw_head :
+               self->sw_head - self->sw_tail - 1);
+}
+
+struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
+                                  struct aq_nic_s *aq_nic,
+                                  unsigned int idx,
+                                  struct aq_nic_cfg_s *aq_nic_cfg);
+struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
+                                  struct aq_nic_s *aq_nic,
+                                  unsigned int idx,
+                                  struct aq_nic_cfg_s *aq_nic_cfg);
+int aq_ring_init(struct aq_ring_s *self);
+void aq_ring_tx_deinit(struct aq_ring_s *self);
+void aq_ring_rx_deinit(struct aq_ring_s *self);
+void aq_ring_free(struct aq_ring_s *self);
+void aq_ring_tx_append_buffs(struct aq_ring_s *ring,
+                            struct aq_ring_buff_s *buffer,
+                            unsigned int buffers);
+int aq_ring_tx_clean(struct aq_ring_s *self);
+int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget);
+int aq_ring_rx_fill(struct aq_ring_s *self);
+
+#endif /* AQ_RING_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_rss.h b/drivers/net/ethernet/aquantia/atlantic/aq_rss.h
new file mode 100644 (file)
index 0000000..1db6eb2
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_rss.h: Receive Side Scaling definitions. */
+
+#ifndef AQ_RSS_H
+#define AQ_RSS_H
+
+#include "aq_common.h"
+#include "aq_cfg.h"
+
+struct aq_rss_parameters {
+       u16 base_cpu_number;
+       u16 indirection_table_size;
+       u16 hash_secret_key_size;
+       u32 hash_secret_key[AQ_CFG_RSS_HASHKEY_SIZE / sizeof(u32)];
+       u8 indirection_table[AQ_CFG_RSS_INDIRECTION_TABLE_MAX];
+};
+
+#endif /* AQ_RSS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
new file mode 100644 (file)
index 0000000..4446bd9
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_utils.h: Useful macro and structures used in all layers of driver. */
+
+#ifndef AQ_UTILS_H
+#define AQ_UTILS_H
+
+#include "aq_common.h"
+
+#define AQ_DIMOF(_ARY_)  ARRAY_SIZE(_ARY_)
+
+struct aq_obj_s {
+       spinlock_t lock; /* spinlock for nic/rings processing */
+       atomic_t flags;
+       atomic_t busy_count;
+};
+
+static inline void aq_utils_obj_set(atomic_t *flags, u32 mask)
+{
+       unsigned long flags_old, flags_new;
+
+       do {
+               flags_old = atomic_read(flags);
+               flags_new = flags_old | (mask);
+       } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old);
+}
+
+static inline void aq_utils_obj_clear(atomic_t *flags, u32 mask)
+{
+       unsigned long flags_old, flags_new;
+
+       do {
+               flags_old = atomic_read(flags);
+               flags_new = flags_old & ~(mask);
+       } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old);
+}
+
+static inline bool aq_utils_obj_test(atomic_t *flags, u32 mask)
+{
+       return atomic_read(flags) & mask;
+}
+
+#endif /* AQ_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
new file mode 100644 (file)
index 0000000..cb30a63
--- /dev/null
@@ -0,0 +1,392 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
+ * Definition of functions for Rx and Tx rings. Friendly module for aq_nic.
+ */
+
+#include "aq_vec.h"
+#include "aq_nic.h"
+#include "aq_ring.h"
+#include "aq_hw.h"
+
+#include <linux/netdevice.h>
+
+struct aq_vec_s {
+       struct aq_obj_s header;
+       struct aq_hw_ops *aq_hw_ops;
+       struct aq_hw_s *aq_hw;
+       struct aq_nic_s *aq_nic;
+       unsigned int tx_rings;
+       unsigned int rx_rings;
+       struct aq_ring_param_s aq_ring_param;
+       struct napi_struct napi;
+       struct aq_ring_s ring[AQ_CFG_TCS_MAX][2];
+};
+
+#define AQ_VEC_TX_ID 0
+#define AQ_VEC_RX_ID 1
+
+static int aq_vec_poll(struct napi_struct *napi, int budget)
+__releases(&self->lock)
+__acquires(&self->lock)
+{
+       struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
+       struct aq_ring_s *ring = NULL;
+       int work_done = 0;
+       int err = 0;
+       unsigned int i = 0U;
+       unsigned int sw_tail_old = 0U;
+       bool was_tx_cleaned = false;
+
+       if (!self) {
+               err = -EINVAL;
+       } else if (spin_trylock(&self->header.lock)) {
+               for (i = 0U, ring = self->ring[0];
+                       self->tx_rings > i; ++i, ring = self->ring[i]) {
+                       if (self->aq_hw_ops->hw_ring_tx_head_update) {
+                               err = self->aq_hw_ops->hw_ring_tx_head_update(
+                                                       self->aq_hw,
+                                                       &ring[AQ_VEC_TX_ID]);
+                               if (err < 0)
+                                       goto err_exit;
+                       }
+
+                       if (ring[AQ_VEC_TX_ID].sw_head !=
+                               ring[AQ_VEC_TX_ID].hw_head) {
+                               err = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
+                               if (err < 0)
+                                       goto err_exit;
+                               was_tx_cleaned = true;
+                       }
+
+                       err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
+                                           &ring[AQ_VEC_RX_ID]);
+                       if (err < 0)
+                               goto err_exit;
+
+                       if (ring[AQ_VEC_RX_ID].sw_head !=
+                               ring[AQ_VEC_RX_ID].hw_head) {
+                               err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID],
+                                                      &work_done,
+                                                      budget - work_done);
+                               if (err < 0)
+                                       goto err_exit;
+
+                               sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail;
+
+                               err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
+                               if (err < 0)
+                                       goto err_exit;
+
+                               err = self->aq_hw_ops->hw_ring_rx_fill(
+                                       self->aq_hw,
+                                       &ring[AQ_VEC_RX_ID], sw_tail_old);
+                               if (err < 0)
+                                       goto err_exit;
+                       }
+               }
+
+               if (was_tx_cleaned)
+                       work_done = budget;
+
+               if (work_done < budget) {
+                       napi_complete_done(napi, work_done);
+                       self->aq_hw_ops->hw_irq_enable(self->aq_hw,
+                                       1U << self->aq_ring_param.vec_idx);
+               }
+
+err_exit:
+               spin_unlock(&self->header.lock);
+       }
+
+       return work_done;
+}
+
+struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
+                             struct aq_nic_cfg_s *aq_nic_cfg)
+{
+       struct aq_vec_s *self = NULL;
+       struct aq_ring_s *ring = NULL;
+       unsigned int i = 0U;
+       int err = 0;
+
+       self = kzalloc(sizeof(*self), GFP_KERNEL);
+       if (!self) {
+               err = -ENOMEM;
+               goto err_exit;
+       }
+
+       self->aq_nic = aq_nic;
+       self->aq_ring_param.vec_idx = idx;
+       self->aq_ring_param.cpu =
+               idx + aq_nic_cfg->aq_rss.base_cpu_number;
+
+       cpumask_set_cpu(self->aq_ring_param.cpu,
+                       &self->aq_ring_param.affinity_mask);
+
+       self->tx_rings = 0;
+       self->rx_rings = 0;
+
+       netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
+                      aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
+
+       for (i = 0; i < aq_nic_cfg->tcs; ++i) {
+               unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic,
+                                               self->tx_rings,
+                                               self->aq_ring_param.vec_idx);
+
+               ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
+                                       idx_ring, aq_nic_cfg);
+               if (!ring) {
+                       err = -ENOMEM;
+                       goto err_exit;
+               }
+
+               ++self->tx_rings;
+
+               aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
+
+               ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
+                                       idx_ring, aq_nic_cfg);
+               if (!ring) {
+                       err = -ENOMEM;
+                       goto err_exit;
+               }
+
+               ++self->rx_rings;
+       }
+
+err_exit:
+       if (err < 0) {
+               aq_vec_free(self);
+               self = NULL;
+       }
+       return self;
+}
+
+int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+               struct aq_hw_s *aq_hw)
+{
+       struct aq_ring_s *ring = NULL;
+       unsigned int i = 0U;
+       int err = 0;
+
+       self->aq_hw_ops = aq_hw_ops;
+       self->aq_hw = aq_hw;
+
+       spin_lock_init(&self->header.lock);
+
+       for (i = 0U, ring = self->ring[0];
+               self->tx_rings > i; ++i, ring = self->ring[i]) {
+               err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
+               if (err < 0)
+                       goto err_exit;
+
+               err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw,
+                                                      &ring[AQ_VEC_TX_ID],
+                                                      &self->aq_ring_param);
+               if (err < 0)
+                       goto err_exit;
+
+               err = aq_ring_init(&ring[AQ_VEC_RX_ID]);
+               if (err < 0)
+                       goto err_exit;
+
+               err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw,
+                                                      &ring[AQ_VEC_RX_ID],
+                                                      &self->aq_ring_param);
+               if (err < 0)
+                       goto err_exit;
+
+               err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
+               if (err < 0)
+                       goto err_exit;
+
+               err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw,
+                                                      &ring[AQ_VEC_RX_ID], 0U);
+               if (err < 0)
+                       goto err_exit;
+       }
+
+err_exit:
+       return err;
+}
+
+int aq_vec_start(struct aq_vec_s *self)
+{
+       struct aq_ring_s *ring = NULL;
+       unsigned int i = 0U;
+       int err = 0;
+
+       for (i = 0U, ring = self->ring[0];
+               self->tx_rings > i; ++i, ring = self->ring[i]) {
+               err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
+                                                       &ring[AQ_VEC_TX_ID]);
+               if (err < 0)
+                       goto err_exit;
+
+               err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw,
+                                                       &ring[AQ_VEC_RX_ID]);
+               if (err < 0)
+                       goto err_exit;
+       }
+
+       napi_enable(&self->napi);
+
+err_exit:
+       return err;
+}
+
+void aq_vec_stop(struct aq_vec_s *self)
+{
+       struct aq_ring_s *ring = NULL;
+       unsigned int i = 0U;
+
+       for (i = 0U, ring = self->ring[0];
+               self->tx_rings > i; ++i, ring = self->ring[i]) {
+               self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
+                                                &ring[AQ_VEC_TX_ID]);
+
+               self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw,
+                                                &ring[AQ_VEC_RX_ID]);
+       }
+
+       napi_disable(&self->napi);
+}
+
+void aq_vec_deinit(struct aq_vec_s *self)
+{
+       struct aq_ring_s *ring = NULL;
+       unsigned int i = 0U;
+
+       if (!self)
+               goto err_exit;
+
+       for (i = 0U, ring = self->ring[0];
+               self->tx_rings > i; ++i, ring = self->ring[i]) {
+               aq_ring_tx_deinit(&ring[AQ_VEC_TX_ID]);
+               aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
+       }
+err_exit:;
+}
+
+void aq_vec_free(struct aq_vec_s *self)
+{
+       struct aq_ring_s *ring = NULL;
+       unsigned int i = 0U;
+
+       if (!self)
+               goto err_exit;
+
+       for (i = 0U, ring = self->ring[0];
+               self->tx_rings > i; ++i, ring = self->ring[i]) {
+               aq_ring_free(&ring[AQ_VEC_TX_ID]);
+               aq_ring_free(&ring[AQ_VEC_RX_ID]);
+       }
+
+       netif_napi_del(&self->napi);
+
+       kfree(self);
+
+err_exit:;
+}
+
+irqreturn_t aq_vec_isr(int irq, void *private)
+{
+       struct aq_vec_s *self = private;
+       int err = 0;
+
+       if (!self) {
+               err = -EINVAL;
+               goto err_exit;
+       }
+       napi_schedule(&self->napi);
+
+err_exit:
+       return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
+}
+
+irqreturn_t aq_vec_isr_legacy(int irq, void *private)
+{
+       struct aq_vec_s *self = private;
+       u64 irq_mask = 0U;
+       irqreturn_t err = 0;
+
+       if (!self) {
+               err = -EINVAL;
+               goto err_exit;
+       }
+       err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
+       if (err < 0)
+               goto err_exit;
+
+       if (irq_mask) {
+               self->aq_hw_ops->hw_irq_disable(self->aq_hw,
+                             1U << self->aq_ring_param.vec_idx);
+               napi_schedule(&self->napi);
+       } else {
+               self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
+               err = IRQ_NONE;
+       }
+
+err_exit:
+       return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
+}
+
+cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
+{
+       return &self->aq_ring_param.affinity_mask;
+}
+
+void aq_vec_add_stats(struct aq_vec_s *self,
+                     struct aq_ring_stats_rx_s *stats_rx,
+                     struct aq_ring_stats_tx_s *stats_tx)
+{
+       struct aq_ring_s *ring = NULL;
+       unsigned int r = 0U;
+
+       for (r = 0U, ring = self->ring[0];
+               self->tx_rings > r; ++r, ring = self->ring[r]) {
+               struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
+               struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx;
+
+               stats_rx->packets += rx->packets;
+               stats_rx->bytes += rx->bytes;
+               stats_rx->errors += rx->errors;
+               stats_rx->jumbo_packets += rx->jumbo_packets;
+               stats_rx->lro_packets += rx->lro_packets;
+
+               stats_tx->packets += tx->packets;
+               stats_tx->bytes += tx->bytes;
+               stats_tx->errors += tx->errors;
+       }
+}
+
+int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
+{
+       unsigned int count = 0U;
+       struct aq_ring_stats_rx_s stats_rx;
+       struct aq_ring_stats_tx_s stats_tx;
+
+       memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
+       memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
+       aq_vec_add_stats(self, &stats_rx, &stats_tx);
+
+       data[count] += stats_rx.packets;
+       data[++count] += stats_tx.packets;
+       data[++count] += stats_rx.jumbo_packets;
+       data[++count] += stats_rx.lro_packets;
+       data[++count] += stats_rx.errors;
+
+       if (p_count)
+               *p_count = ++count;
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
new file mode 100644 (file)
index 0000000..6c68b18
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_vec.h: Definition of common structures for vector of Rx and Tx rings.
+ * Declaration of functions for Rx and Tx rings.
+ */
+
+#ifndef AQ_VEC_H
+#define AQ_VEC_H
+
+#include "aq_common.h"
+#include <linux/irqreturn.h>
+
+struct aq_hw_s;
+struct aq_hw_ops;
+struct aq_ring_stats_rx_s;
+struct aq_ring_stats_tx_s;
+
+irqreturn_t aq_vec_isr(int irq, void *private);
+irqreturn_t aq_vec_isr_legacy(int irq, void *private);
+struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
+                             struct aq_nic_cfg_s *aq_nic_cfg);
+int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+               struct aq_hw_s *aq_hw);
+void aq_vec_deinit(struct aq_vec_s *self);
+void aq_vec_free(struct aq_vec_s *self);
+int aq_vec_start(struct aq_vec_s *self);
+void aq_vec_stop(struct aq_vec_s *self);
+cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self);
+int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data,
+                       unsigned int *p_count);
+void aq_vec_add_stats(struct aq_vec_s *self,
+                     struct aq_ring_stats_rx_s *stats_rx,
+                     struct aq_ring_stats_tx_s *stats_tx);
+
+#endif /* AQ_VEC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
new file mode 100644 (file)
index 0000000..1f38805
--- /dev/null
@@ -0,0 +1,905 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_ring.h"
+#include "hw_atl_a0.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+#include "hw_atl_a0_internal.h"
+
+static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
+                                struct aq_hw_caps_s *aq_hw_caps)
+{
+       memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
+       return 0;
+}
+
+static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func,
+                                       unsigned int port,
+                                       struct aq_hw_ops *ops)
+{
+       struct hw_atl_s *self = NULL;
+
+       self = kzalloc(sizeof(*self), GFP_KERNEL);
+       if (!self)
+               goto err_exit;
+
+       self->base.aq_pci_func = aq_pci_func;
+
+       self->base.not_ff_addr = 0x10U;
+
+err_exit:
+       return (struct aq_hw_s *)self;
+}
+
+static void hw_atl_a0_destroy(struct aq_hw_s *self)
+{
+       kfree(self);
+}
+
+static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
+{
+       int err = 0;
+
+       glb_glb_reg_res_dis_set(self, 1U);
+       pci_pci_reg_res_dis_set(self, 0U);
+       rx_rx_reg_res_dis_set(self, 0U);
+       tx_tx_reg_res_dis_set(self, 0U);
+
+       HW_ATL_FLUSH();
+       glb_soft_res_set(self, 1);
+
+       /* check 10 times by 1ms */
+       AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
+       if (err < 0)
+               goto err_exit;
+
+       itr_irq_reg_res_dis_set(self, 0U);
+       itr_res_irq_set(self, 1U);
+
+       /* check 10 times by 1ms */
+       AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
+       if (err < 0)
+               goto err_exit;
+
+       hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
+
+       err = aq_hw_err_from_flags(self);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
+{
+       u32 tc = 0U;
+       u32 buff_size = 0U;
+       unsigned int i_priority = 0U;
+       bool is_rx_flow_control = false;
+
+       /* TPS Descriptor rate init */
+       tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+       tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+
+       /* TPS VM init */
+       tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+
+       /* TPS TC credits init */
+       tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+       tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+
+       tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+       tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+       tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+       tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+
+       /* Tx buf size */
+       buff_size = HW_ATL_A0_TXBUF_MAX;
+
+       tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+       tpb_tx_buff_hi_threshold_per_tc_set(self,
+                                           (buff_size * (1024 / 32U) * 66U) /
+                                           100U, tc);
+       tpb_tx_buff_lo_threshold_per_tc_set(self,
+                                           (buff_size * (1024 / 32U) * 50U) /
+                                           100U, tc);
+
+       /* QoS Rx buf size per TC */
+       tc = 0;
+       is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
+       buff_size = HW_ATL_A0_RXBUF_MAX;
+
+       rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+       rpb_rx_buff_hi_threshold_per_tc_set(self,
+                                           (buff_size *
+                                           (1024U / 32U) * 66U) /
+                                           100U, tc);
+       rpb_rx_buff_lo_threshold_per_tc_set(self,
+                                           (buff_size *
+                                           (1024U / 32U) * 50U) /
+                                           100U, tc);
+       rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+
+       /* QoS 802.1p priority -> TC mapping */
+       for (i_priority = 8U; i_priority--;)
+               rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
+                                    struct aq_rss_parameters *rss_params)
+{
+       struct aq_nic_cfg_s *cfg = NULL;
+       int err = 0;
+       unsigned int i = 0U;
+       unsigned int addr = 0U;
+
+       cfg = self->aq_nic_cfg;
+
+       for (i = 10, addr = 0U; i--; ++addr) {
+               u32 key_data = cfg->is_rss ?
+                       __swab32(rss_params->hash_secret_key[i]) : 0U;
+               rpf_rss_key_wr_data_set(self, key_data);
+               rpf_rss_key_addr_set(self, addr);
+               rpf_rss_key_wr_en_set(self, 1U);
+               AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+               if (err < 0)
+                       goto err_exit;
+       }
+
+       err = aq_hw_err_from_flags(self);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
+                               struct aq_rss_parameters *rss_params)
+{
+       u8 *indirection_table = rss_params->indirection_table;
+       u32 i = 0U;
+       u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
+       int err = 0;
+       u16 bitary[(HW_ATL_A0_RSS_REDIRECTION_MAX *
+                                       HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)];
+
+       memset(bitary, 0, sizeof(bitary));
+
+       for (i = HW_ATL_A0_RSS_REDIRECTION_MAX; i--; ) {
+               (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
+                       ((indirection_table[i] % num_rss_queues) <<
+                       ((i * 3U) & 0xFU));
+       }
+
+       for (i = AQ_DIMOF(bitary); i--;) {
+               rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+               rpf_rss_redir_tbl_addr_set(self, i);
+               rpf_rss_redir_wr_en_set(self, 1U);
+               AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+               if (err < 0)
+                       goto err_exit;
+       }
+
+       err = aq_hw_err_from_flags(self);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
+                                   struct aq_nic_cfg_s *aq_nic_cfg)
+{
+       int err = 0;
+
+       /* TX checksums offloads*/
+       tpo_ipv4header_crc_offload_en_set(self, 1);
+       tpo_tcp_udp_crc_offload_en_set(self, 1);
+       if (err < 0)
+               goto err_exit;
+
+       /* RX checksums offloads*/
+       rpo_ipv4header_crc_offload_en_set(self, 1);
+       rpo_tcp_udp_crc_offload_en_set(self, 1);
+       if (err < 0)
+               goto err_exit;
+
+       /* LSO offloads*/
+       tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+       if (err < 0)
+               goto err_exit;
+
+       err = aq_hw_err_from_flags(self);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
+{
+       thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+       thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+       thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+
+       /* Tx interrupts */
+       tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+
+       /* misc */
+       aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
+                       0x00010000U : 0x00000000U);
+       tdm_tx_dca_en_set(self, 0U);
+       tdm_tx_dca_mode_set(self, 0U);
+
+       tpb_tx_path_scp_ins_en_set(self, 1U);
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
+{
+       struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+       int i;
+
+       /* Rx TC/RSS number config */
+       rpb_rpf_rx_traf_class_mode_set(self, 1U);
+
+       /* Rx flow control */
+       rpb_rx_flow_ctl_mode_set(self, 1U);
+
+       /* RSS Ring selection */
+       reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+                                       0xB3333333U : 0x00000000U);
+
+       /* Multicast filters */
+       for (i = HW_ATL_A0_MAC_MAX; i--;) {
+               rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+               rpfl2unicast_flr_act_set(self, 1U, i);
+       }
+
+       reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+       reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+
+       /* Vlan filters */
+       rpf_vlan_outer_etht_set(self, 0x88A8U);
+       rpf_vlan_inner_etht_set(self, 0x8100U);
+       rpf_vlan_prom_mode_en_set(self, 1);
+
+       /* Rx Interrupts */
+       rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+
+       /* misc */
+       rpfl2broadcast_flr_act_set(self, 1U);
+       rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+
+       rdm_rx_dca_en_set(self, 0U);
+       rdm_rx_dca_mode_set(self, 0U);
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+{
+       int err = 0;
+       unsigned int h = 0U;
+       unsigned int l = 0U;
+
+       if (!mac_addr) {
+               err = -EINVAL;
+               goto err_exit;
+       }
+       h = (mac_addr[0] << 8) | (mac_addr[1]);
+       l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+               (mac_addr[4] << 8) | mac_addr[5];
+
+       rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
+       rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
+       rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
+       rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
+
+       err = aq_hw_err_from_flags(self);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_a0_hw_init(struct aq_hw_s *self,
+                            struct aq_nic_cfg_s *aq_nic_cfg,
+                            u8 *mac_addr)
+{
+       static u32 aq_hw_atl_igcr_table_[4][2] = {
+               { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
+               { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
+               { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
+               { 0x20000022U, 0x20000026U }  /* AQ_IRQ_MSIX */
+       };
+
+       int err = 0;
+
+       self->aq_nic_cfg = aq_nic_cfg;
+
+       hw_atl_utils_hw_chip_features_init(self,
+                                          &PHAL_ATLANTIC_A0->chip_features);
+
+       hw_atl_a0_hw_init_tx_path(self);
+       hw_atl_a0_hw_init_rx_path(self);
+
+       hw_atl_a0_hw_mac_addr_set(self, mac_addr);
+
+       hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
+
+       reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
+       reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
+
+       hw_atl_a0_hw_qos_set(self);
+       hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
+       hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+
+       err = aq_hw_err_from_flags(self);
+       if (err < 0)
+               goto err_exit;
+
+       /* Interrupts */
+       reg_irq_glb_ctl_set(self,
+                           aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+                                                [(aq_nic_cfg->vecs > 1U) ?
+                                                1 : 0]);
+
+       itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+
+       /* Interrupts */
+       reg_gen_irq_map_set(self,
+                           ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
+                           ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
+                           ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
+                           ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
+
+       hw_atl_a0_hw_offload_set(self, aq_nic_cfg);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
+                                     struct aq_ring_s *ring)
+{
+       tdm_tx_desc_en_set(self, 1, ring->idx);
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
+                                     struct aq_ring_s *ring)
+{
+       rdm_rx_desc_en_set(self, 1, ring->idx);
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_start(struct aq_hw_s *self)
+{
+       tpb_tx_buff_en_set(self, 1);
+       rpb_rx_buff_en_set(self, 1);
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
+                                           struct aq_ring_s *ring)
+{
+       reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+       return 0;
+}
+
+static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
+                                    struct aq_ring_s *ring,
+                                    unsigned int frags)
+{
+       struct aq_ring_buff_s *buff = NULL;
+       struct hw_atl_txd_s *txd = NULL;
+       unsigned int buff_pa_len = 0U;
+       unsigned int pkt_len = 0U;
+       unsigned int frag_count = 0U;
+       bool is_gso = false;
+
+       buff = &ring->buff_ring[ring->sw_tail];
+       pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
+
+       for (frag_count = 0; frag_count < frags; frag_count++) {
+               txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
+                                               HW_ATL_A0_TXD_SIZE];
+               txd->ctl = 0;
+               txd->ctl2 = 0;
+               txd->buf_addr = 0;
+
+               buff = &ring->buff_ring[ring->sw_tail];
+
+               if (buff->is_txc) {
+                       txd->ctl |= (buff->len_l3 << 31) |
+                               (buff->len_l2 << 24) |
+                               HW_ATL_A0_TXD_CTL_CMD_TCP |
+                               HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC;
+                       txd->ctl2 |= (buff->mss << 16) |
+                               (buff->len_l4 << 8) |
+                               (buff->len_l3 >> 1);
+
+                       pkt_len -= (buff->len_l4 +
+                                   buff->len_l3 +
+                                   buff->len_l2);
+                       is_gso = true;
+               } else {
+                       buff_pa_len = buff->len;
+
+                       txd->buf_addr = buff->pa;
+                       txd->ctl |= (HW_ATL_A0_TXD_CTL_BLEN &
+                                               ((u32)buff_pa_len << 4));
+                       txd->ctl |= HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD;
+                       /* PAY_LEN */
+                       txd->ctl2 |= HW_ATL_A0_TXD_CTL2_LEN & (pkt_len << 14);
+
+                       if (is_gso) {
+                               txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_LSO;
+                               txd->ctl2 |= HW_ATL_A0_TXD_CTL2_CTX_EN;
+                       }
+
+                       /* Tx checksum offloads */
+                       if (buff->is_ip_cso)
+                               txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPCSO;
+
+                       if (buff->is_udp_cso || buff->is_tcp_cso)
+                               txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_TUCSO;
+
+                       if (unlikely(buff->is_eop)) {
+                               txd->ctl |= HW_ATL_A0_TXD_CTL_EOP;
+                               txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB;
+                       }
+               }
+
+               ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
+       }
+
+       hw_atl_a0_hw_tx_ring_tail_update(self, ring);
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
+                                    struct aq_ring_s *aq_ring,
+                                    struct aq_ring_param_s *aq_ring_param)
+{
+       u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
+       u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+
+       rdm_rx_desc_en_set(self, false, aq_ring->idx);
+
+       rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+
+       reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+                                          aq_ring->idx);
+
+       reg_rx_dma_desc_base_addressmswset(self,
+                                          dma_desc_addr_msw, aq_ring->idx);
+
+       rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+       rdm_rx_desc_data_buff_size_set(self,
+                                      AQ_CFG_RX_FRAME_MAX / 1024U,
+                                      aq_ring->idx);
+
+       rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+       rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+       rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+
+       /* Rx ring set mode */
+
+       /* Mapping interrupt vector */
+       itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+       itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+
+       rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+       rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+       rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+       rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
+                                    struct aq_ring_s *aq_ring,
+                                    struct aq_ring_param_s *aq_ring_param)
+{
+       u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
+       u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+
+       reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+                                          aq_ring->idx);
+
+       reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+                                          aq_ring->idx);
+
+       tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+       hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring);
+
+       /* Set Tx threshold */
+       tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+
+       /* Mapping interrupt vector */
+       itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+       itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+
+       tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+       tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
+                                    struct aq_ring_s *ring,
+                                    unsigned int sw_tail_old)
+{
+       for (; sw_tail_old != ring->sw_tail;
+               sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
+               struct hw_atl_rxd_s *rxd =
+                       (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
+                                                       HW_ATL_A0_RXD_SIZE];
+
+               struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
+
+               rxd->buf_addr = buff->pa;
+               rxd->hdr_addr = 0U;
+       }
+
+       reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
+                                           struct aq_ring_s *ring)
+{
+       int err = 0;
+       unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+
+       if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+               err = -ENXIO;
+               goto err_exit;
+       }
+       ring->hw_head = hw_head_;
+       err = aq_hw_err_from_flags(self);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
+                                       struct aq_ring_s *ring)
+{
+       struct device *ndev = aq_nic_get_dev(ring->aq_nic);
+
+       for (; ring->hw_head != ring->sw_tail;
+               ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
+               struct aq_ring_buff_s *buff = NULL;
+               struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
+                       &ring->dx_ring[ring->hw_head * HW_ATL_A0_RXD_SIZE];
+
+               unsigned int is_err = 1U;
+               unsigned int is_rx_check_sum_enabled = 0U;
+               unsigned int pkt_type = 0U;
+
+               if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */
+                       if ((1U << 4) &
+                               reg_rx_dma_desc_status_get(self, ring->idx)) {
+                       rdm_rx_desc_en_set(self, false, ring->idx);
+                       rdm_rx_desc_res_set(self, true, ring->idx);
+                       rdm_rx_desc_res_set(self, false, ring->idx);
+                       rdm_rx_desc_en_set(self, true, ring->idx);
+                       }
+
+                       if (ring->hw_head ||
+                           (rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) {
+                               break;
+                       } else if (!(rxd_wb->status & 0x1U)) {
+                               struct hw_atl_rxd_wb_s *rxd_wb1 =
+                                       (struct hw_atl_rxd_wb_s *)
+                                       (&ring->dx_ring[(1U) *
+                                               HW_ATL_A0_RXD_SIZE]);
+
+                               if ((rxd_wb1->status & 0x1U)) {
+                                       rxd_wb->pkt_len = 1514U;
+                                       rxd_wb->status = 3U;
+                               } else {
+                                       break;
+                               }
+                       }
+               }
+
+               buff = &ring->buff_ring[ring->hw_head];
+
+               if (0x3U != (rxd_wb->status & 0x3U))
+                       rxd_wb->status |= 4;
+
+               is_err = (0x0000001CU & rxd_wb->status);
+               is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
+               pkt_type = 0xFFU & (rxd_wb->type >> 4);
+
+               if (is_rx_check_sum_enabled) {
+                       if (0x0U == (pkt_type & 0x3U))
+                               buff->is_ip_cso = (is_err & 0x08U) ? 0 : 1;
+
+                       if (0x4U == (pkt_type & 0x1CU))
+                               buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1;
+                       else if (0x0U == (pkt_type & 0x1CU))
+                               buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1;
+               }
+
+               is_err &= ~0x18U;
+               is_err &= ~0x04U;
+
+               dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
+
+               if (is_err || rxd_wb->type & 0x1000U) {
+                       /* status error or DMA error */
+                       buff->is_error = 1U;
+               } else {
+                       if (self->aq_nic_cfg->is_rss) {
+                               /* last 4 byte */
+                               u16 rss_type = rxd_wb->type & 0xFU;
+
+                               if (rss_type && rss_type < 0x8U) {
+                                       buff->is_hash_l4 = (rss_type == 0x4 ||
+                                                       rss_type == 0x5);
+                                       buff->rss_hash = rxd_wb->rss_hash;
+                               }
+                       }
+
+                       if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) {
+                               buff->len = (rxd_wb->pkt_len &
+                                               (AQ_CFG_RX_FRAME_MAX - 1U));
+                               buff->len = buff->len ?
+                                       buff->len : AQ_CFG_RX_FRAME_MAX;
+                               buff->next = 0U;
+                               buff->is_eop = 1U;
+                       } else {
+                               /* jumbo */
+                               buff->next = aq_ring_next_dx(ring,
+                                                            ring->hw_head);
+                               ++ring->stats.rx.jumbo_packets;
+                       }
+               }
+       }
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
+{
+       itr_irq_msk_setlsw_set(self, LODWORD(mask) |
+                              (1U << HW_ATL_A0_ERR_INT));
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
+{
+       itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+       itr_irq_status_clearlsw_set(self, LODWORD(mask));
+
+       if ((1U << 16) & reg_gen_irq_status_get(self))
+
+               atomic_inc(&PHAL_ATLANTIC_A0->dpc);
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
+{
+       *mask = itr_irq_statuslsw_get(self);
+       return aq_hw_err_from_flags(self);
+}
+
+#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
+
+static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
+                                         unsigned int packet_filter)
+{
+       unsigned int i = 0U;
+
+       rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
+       rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+       rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+
+       self->aq_nic_cfg->is_mc_list_enabled =
+                       IS_FILTER_ENABLED(IFF_MULTICAST);
+
+       for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i)
+               rpfl2_uc_flr_en_set(self,
+                                   (self->aq_nic_cfg->is_mc_list_enabled &&
+                                   (i <= self->aq_nic_cfg->mc_list_count)) ?
+                                   1U : 0U, i);
+
+       return aq_hw_err_from_flags(self);
+}
+
+#undef IS_FILTER_ENABLED
+
+static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
+                                          u8 ar_mac
+                                          [AQ_CFG_MULTICAST_ADDRESS_MAX]
+                                          [ETH_ALEN],
+                                          u32 count)
+{
+       int err = 0;
+
+       if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) {
+               err = EBADRQC;
+               goto err_exit;
+       }
+       for (self->aq_nic_cfg->mc_list_count = 0U;
+                       self->aq_nic_cfg->mc_list_count < count;
+                       ++self->aq_nic_cfg->mc_list_count) {
+               u32 i = self->aq_nic_cfg->mc_list_count;
+               u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
+               u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
+                                       (ar_mac[i][4] << 8) | ar_mac[i][5];
+
+               rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
+
+               rpfl2unicast_dest_addresslsw_set(self,
+                                                l, HW_ATL_A0_MAC_MIN + i);
+
+               rpfl2unicast_dest_addressmsw_set(self,
+                                                h, HW_ATL_A0_MAC_MIN + i);
+
+               rpfl2_uc_flr_en_set(self,
+                                   (self->aq_nic_cfg->is_mc_list_enabled),
+                                   HW_ATL_A0_MAC_MIN + i);
+       }
+
+       err = aq_hw_err_from_flags(self);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
+                                                bool itr_enabled)
+{
+       unsigned int i = 0U;
+
+       if (itr_enabled && self->aq_nic_cfg->itr) {
+               if (self->aq_nic_cfg->itr != 0xFFFFU) {
+                       u32 itr_ = (self->aq_nic_cfg->itr >> 1);
+
+                       itr_ = min(AQ_CFG_IRQ_MASK, itr_);
+
+                       PHAL_ATLANTIC_A0->itr_rx = 0x80000000U |
+                                       (itr_ << 0x10);
+               } else  {
+                       u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U);
+
+                       if (n < self->aq_link_status.mbps) {
+                               PHAL_ATLANTIC_A0->itr_rx = 0U;
+                       } else {
+                               static unsigned int hw_timers_tbl_[] = {
+                                       0x01CU, /* 10Gbit */
+                                       0x039U, /* 5Gbit */
+                                       0x039U, /* 5Gbit 5GS */
+                                       0x073U, /* 2.5Gbit */
+                                       0x120U, /* 1Gbit */
+                                       0x1FFU, /* 100Mbit */
+                               };
+
+                               unsigned int speed_index =
+                                       hw_atl_utils_mbps_2_speed_index(
+                                               self->aq_link_status.mbps);
+
+                               PHAL_ATLANTIC_A0->itr_rx =
+                                       0x80000000U |
+                                       (hw_timers_tbl_[speed_index] << 0x10U);
+                       }
+
+                       aq_hw_write_reg(self, 0x00002A00U, 0x40000000U);
+                       aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U);
+               }
+       } else {
+               PHAL_ATLANTIC_A0->itr_rx = 0U;
+       }
+
+       for (i = HW_ATL_A0_RINGS_MAX; i--;)
+               reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i);
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
+{
+       hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK);
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
+                                    struct aq_ring_s *ring)
+{
+       tdm_tx_desc_en_set(self, 0U, ring->idx);
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
+                                    struct aq_ring_s *ring)
+{
+       rdm_rx_desc_en_set(self, 0U, ring->idx);
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_set_speed(struct aq_hw_s *self, u32 speed)
+{
+       int err = 0;
+
+       err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
+       if (err < 0)
+               goto err_exit;
+
+err_exit:
+       return err;
+}
+
+static struct aq_hw_ops hw_atl_ops_ = {
+       .create               = hw_atl_a0_create,
+       .destroy              = hw_atl_a0_destroy,
+       .get_hw_caps          = hw_atl_a0_get_hw_caps,
+
+       .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
+       .hw_set_mac_address   = hw_atl_a0_hw_mac_addr_set,
+       .hw_get_link_status   = hw_atl_utils_mpi_get_link_status,
+       .hw_set_link_speed    = hw_atl_a0_hw_set_speed,
+       .hw_init              = hw_atl_a0_hw_init,
+       .hw_deinit            = hw_atl_utils_hw_deinit,
+       .hw_set_power         = hw_atl_utils_hw_set_power,
+       .hw_reset             = hw_atl_a0_hw_reset,
+       .hw_start             = hw_atl_a0_hw_start,
+       .hw_ring_tx_start     = hw_atl_a0_hw_ring_tx_start,
+       .hw_ring_tx_stop      = hw_atl_a0_hw_ring_tx_stop,
+       .hw_ring_rx_start     = hw_atl_a0_hw_ring_rx_start,
+       .hw_ring_rx_stop      = hw_atl_a0_hw_ring_rx_stop,
+       .hw_stop              = hw_atl_a0_hw_stop,
+
+       .hw_ring_tx_xmit         = hw_atl_a0_hw_ring_tx_xmit,
+       .hw_ring_tx_head_update  = hw_atl_a0_hw_ring_tx_head_update,
+
+       .hw_ring_rx_receive      = hw_atl_a0_hw_ring_rx_receive,
+       .hw_ring_rx_fill         = hw_atl_a0_hw_ring_rx_fill,
+
+       .hw_irq_enable           = hw_atl_a0_hw_irq_enable,
+       .hw_irq_disable          = hw_atl_a0_hw_irq_disable,
+       .hw_irq_read             = hw_atl_a0_hw_irq_read,
+
+       .hw_ring_rx_init             = hw_atl_a0_hw_ring_rx_init,
+       .hw_ring_tx_init             = hw_atl_a0_hw_ring_tx_init,
+       .hw_packet_filter_set        = hw_atl_a0_hw_packet_filter_set,
+       .hw_multicast_list_set       = hw_atl_a0_hw_multicast_list_set,
+       .hw_interrupt_moderation_set = hw_atl_a0_hw_interrupt_moderation_set,
+       .hw_rss_set                  = hw_atl_a0_hw_rss_set,
+       .hw_rss_hash_set             = hw_atl_a0_hw_rss_hash_set,
+       .hw_get_regs                 = hw_atl_utils_hw_get_regs,
+       .hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
+       .hw_get_fw_version           = hw_atl_utils_get_fw_version,
+};
+
+struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev)
+{
+       bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
+       bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
+                       (pdev->device == HW_ATL_DEVICE_ID_D100) ||
+                       (pdev->device == HW_ATL_DEVICE_ID_D107) ||
+                       (pdev->device == HW_ATL_DEVICE_ID_D108) ||
+                       (pdev->device == HW_ATL_DEVICE_ID_D109));
+
+       bool is_rev_ok = (pdev->revision == 1U);
+
+       return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
new file mode 100644 (file)
index 0000000..6e1d527
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_a0.h: Declaration of abstract interface for Atlantic hardware
+ * specific functions.
+ */
+
+#ifndef HW_ATL_A0_H
+#define HW_ATL_A0_H
+
+#include "../aq_common.h"
+
+#ifndef PCI_VENDOR_ID_AQUANTIA
+
+#define PCI_VENDOR_ID_AQUANTIA  0x1D6A
+#define HW_ATL_DEVICE_ID_0001   0x0001
+#define HW_ATL_DEVICE_ID_D100   0xD100
+#define HW_ATL_DEVICE_ID_D107   0xD107
+#define HW_ATL_DEVICE_ID_D108   0xD108
+#define HW_ATL_DEVICE_ID_D109   0xD109
+
+#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
+
+#endif
+
+struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev);
+
+#endif /* HW_ATL_A0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
new file mode 100644 (file)
index 0000000..1093ea1
--- /dev/null
@@ -0,0 +1,155 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_a0_internal.h: Definition of Atlantic A0 chip specific
+ * constants.
+ */
+
+#ifndef HW_ATL_A0_INTERNAL_H
+#define HW_ATL_A0_INTERNAL_H
+
+#include "../aq_common.h"
+
+#define HW_ATL_A0_MTU_JUMBO 9014U
+
+#define HW_ATL_A0_TX_RINGS 4U
+#define HW_ATL_A0_RX_RINGS 4U
+
+#define HW_ATL_A0_RINGS_MAX 32U
+#define HW_ATL_A0_TXD_SIZE  16U
+#define HW_ATL_A0_RXD_SIZE  16U
+
+#define HW_ATL_A0_MAC      0U
+#define HW_ATL_A0_MAC_MIN  1U
+#define HW_ATL_A0_MAC_MAX  33U
+
+/* interrupts */
+#define HW_ATL_A0_ERR_INT 8U
+#define HW_ATL_A0_INT_MASK  0xFFFFFFFFU
+
+#define HW_ATL_A0_TXD_CTL2_LEN        0xFFFFC000U
+#define HW_ATL_A0_TXD_CTL2_CTX_EN     0x00002000U
+#define HW_ATL_A0_TXD_CTL2_CTX_IDX    0x00001000U
+
+#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD   0x00000001U
+#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC   0x00000002U
+#define HW_ATL_A0_TXD_CTL_BLEN        0x000FFFF0U
+#define HW_ATL_A0_TXD_CTL_DD          0x00100000U
+#define HW_ATL_A0_TXD_CTL_EOP         0x00200000U
+
+#define HW_ATL_A0_TXD_CTL_CMD_X       0x3FC00000U
+
+#define HW_ATL_A0_TXD_CTL_CMD_VLAN    BIT(22)
+#define HW_ATL_A0_TXD_CTL_CMD_FCS     BIT(23)
+#define HW_ATL_A0_TXD_CTL_CMD_IPCSO   BIT(24)
+#define HW_ATL_A0_TXD_CTL_CMD_TUCSO   BIT(25)
+#define HW_ATL_A0_TXD_CTL_CMD_LSO     BIT(26)
+#define HW_ATL_A0_TXD_CTL_CMD_WB      BIT(27)
+#define HW_ATL_A0_TXD_CTL_CMD_VXLAN   BIT(28)
+
+#define HW_ATL_A0_TXD_CTL_CMD_IPV6    BIT(21)
+#define HW_ATL_A0_TXD_CTL_CMD_TCP     BIT(22)
+
+#define HW_ATL_A0_MPI_CONTROL_ADR     0x0368U
+#define HW_ATL_A0_MPI_STATE_ADR       0x036CU
+
+#define HW_ATL_A0_MPI_SPEED_MSK       0xFFFFU
+#define HW_ATL_A0_MPI_SPEED_SHIFT     16U
+
+#define HW_ATL_A0_RATE_10G            BIT(0)
+#define HW_ATL_A0_RATE_5G             BIT(1)
+#define HW_ATL_A0_RATE_2G5            BIT(3)
+#define HW_ATL_A0_RATE_1G             BIT(4)
+#define HW_ATL_A0_RATE_100M           BIT(5)
+
+#define HW_ATL_A0_TXBUF_MAX 160U
+#define HW_ATL_A0_RXBUF_MAX 320U
+
+#define HW_ATL_A0_RSS_REDIRECTION_MAX 64U
+#define HW_ATL_A0_RSS_REDIRECTION_BITS 3U
+
+#define HW_ATL_A0_TC_MAX 1U
+#define HW_ATL_A0_RSS_MAX 8U
+
+#define HW_ATL_A0_FW_SEMA_RAM           0x2U
+
+#define HW_ATL_A0_RXD_DD    0x1U
+#define HW_ATL_A0_RXD_NCEA0 0x1U
+
+#define HW_ATL_A0_RXD_WB_STAT2_EOP     0x0002U
+
+#define HW_ATL_A0_UCP_0X370_REG  0x370U
+
+#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
+
+/* Hardware tx descriptor */
+struct __packed hw_atl_txd_s {
+       u64 buf_addr;
+       u32 ctl;
+       u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
+};
+
+/* Hardware tx context descriptor */
+struct __packed hw_atl_txc_s {
+       u32 rsvd;
+       u32 len;
+       u32 ctl;
+       u32 len2;
+};
+
+/* Hardware rx descriptor */
+struct __packed hw_atl_rxd_s {
+       u64 buf_addr;
+       u64 hdr_addr;
+};
+
+/* Hardware rx descriptor writeback */
+struct __packed hw_atl_rxd_wb_s {
+       u32 type;
+       u32 rss_hash;
+       u16 status;
+       u16 pkt_len;
+       u16 next_desc_ptr;
+       u16 vlan;
+};
+
+/* HW layer capabilities */
+static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
+       .ports = 1U,
+       .is_64_dma = true,
+       .msix_irqs = 4U,
+       .irq_mask = ~0U,
+       .vecs = HW_ATL_A0_RSS_MAX,
+       .tcs = HW_ATL_A0_TC_MAX,
+       .rxd_alignment = 1U,
+       .rxd_size = HW_ATL_A0_RXD_SIZE,
+       .rxds = 248U,
+       .txd_alignment = 1U,
+       .txd_size = HW_ATL_A0_TXD_SIZE,
+       .txds = 8U * 1024U,
+       .txhwb_alignment = 4096U,
+       .tx_rings = HW_ATL_A0_TX_RINGS,
+       .rx_rings = HW_ATL_A0_RX_RINGS,
+       .hw_features = NETIF_F_HW_CSUM |
+                       NETIF_F_RXHASH |
+                       NETIF_F_SG |
+                       NETIF_F_TSO,
+       .hw_priv_flags = IFF_UNICAST_FLT,
+       .link_speed_msk = (HW_ATL_A0_RATE_10G |
+                       HW_ATL_A0_RATE_5G |
+                       HW_ATL_A0_RATE_2G5 |
+                       HW_ATL_A0_RATE_1G |
+                       HW_ATL_A0_RATE_100M),
+       .flow_control = true,
+       .mtu = HW_ATL_A0_MTU_JUMBO,
+       .mac_regs_count = 88,
+       .fw_ver_expected = HW_ATL_A0_FW_VER_EXPECTED,
+};
+
+#endif /* HW_ATL_A0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
new file mode 100644 (file)
index 0000000..e7e694f
--- /dev/null
@@ -0,0 +1,958 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_ring.h"
+#include "hw_atl_b0.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+#include "hw_atl_b0_internal.h"
+
+static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
+                                struct aq_hw_caps_s *aq_hw_caps)
+{
+       memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
+       return 0;
+}
+
+static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func,
+                                       unsigned int port,
+                                       struct aq_hw_ops *ops)
+{
+       struct hw_atl_s *self = NULL;
+
+       self = kzalloc(sizeof(*self), GFP_KERNEL);
+       if (!self)
+               goto err_exit;
+
+       self->base.aq_pci_func = aq_pci_func;
+
+       self->base.not_ff_addr = 0x10U;
+
+err_exit:
+       return (struct aq_hw_s *)self;
+}
+
+static void hw_atl_b0_destroy(struct aq_hw_s *self)
+{
+       kfree(self);
+}
+
+static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
+{
+       int err = 0;
+
+       glb_glb_reg_res_dis_set(self, 1U);
+       pci_pci_reg_res_dis_set(self, 0U);
+       rx_rx_reg_res_dis_set(self, 0U);
+       tx_tx_reg_res_dis_set(self, 0U);
+
+       HW_ATL_FLUSH();
+       glb_soft_res_set(self, 1);
+
+       /* check 10 times by 1ms */
+       AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
+       if (err < 0)
+               goto err_exit;
+
+       itr_irq_reg_res_dis_set(self, 0U);
+       itr_res_irq_set(self, 1U);
+
+       /* check 10 times by 1ms */
+       AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
+       if (err < 0)
+               goto err_exit;
+
+       hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
+
+       err = aq_hw_err_from_flags(self);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
+{
+       u32 tc = 0U;
+       u32 buff_size = 0U;
+       unsigned int i_priority = 0U;
+       bool is_rx_flow_control = false;
+
+       /* TPS Descriptor rate init */
+       tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+       tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+
+       /* TPS VM init */
+       tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+
+       /* TPS TC credits init */
+       tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+       tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+
+       tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+       tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+       tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+       tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+
+       /* Tx buf size */
+       buff_size = HW_ATL_B0_TXBUF_MAX;
+
+       tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+       tpb_tx_buff_hi_threshold_per_tc_set(self,
+                                           (buff_size * (1024 / 32U) * 66U) /
+                                           100U, tc);
+       tpb_tx_buff_lo_threshold_per_tc_set(self,
+                                           (buff_size * (1024 / 32U) * 50U) /
+                                           100U, tc);
+
+       /* QoS Rx buf size per TC */
+       tc = 0;
+       is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
+       buff_size = HW_ATL_B0_RXBUF_MAX;
+
+       rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+       rpb_rx_buff_hi_threshold_per_tc_set(self,
+                                           (buff_size *
+                                           (1024U / 32U) * 66U) /
+                                           100U, tc);
+       rpb_rx_buff_lo_threshold_per_tc_set(self,
+                                           (buff_size *
+                                           (1024U / 32U) * 50U) /
+                                           100U, tc);
+       rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+
+       /* QoS 802.1p priority -> TC mapping */
+       for (i_priority = 8U; i_priority--;)
+               rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
+                                    struct aq_rss_parameters *rss_params)
+{
+       struct aq_nic_cfg_s *cfg = NULL;
+       int err = 0;
+       unsigned int i = 0U;
+       unsigned int addr = 0U;
+
+       cfg = self->aq_nic_cfg;
+
+       for (i = 10, addr = 0U; i--; ++addr) {
+               u32 key_data = cfg->is_rss ?
+                       __swab32(rss_params->hash_secret_key[i]) : 0U;
+               rpf_rss_key_wr_data_set(self, key_data);
+               rpf_rss_key_addr_set(self, addr);
+               rpf_rss_key_wr_en_set(self, 1U);
+               AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+               if (err < 0)
+                       goto err_exit;
+       }
+
+       err = aq_hw_err_from_flags(self);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
+                               struct aq_rss_parameters *rss_params)
+{
+       u8 *indirection_table = rss_params->indirection_table;
+       u32 i = 0U;
+       u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
+       int err = 0;
+       u16 bitary[(HW_ATL_B0_RSS_REDIRECTION_MAX *
+                                       HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
+
+       memset(bitary, 0, sizeof(bitary));
+
+       for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) {
+               (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
+                       ((indirection_table[i] % num_rss_queues) <<
+                       ((i * 3U) & 0xFU));
+       }
+
+       for (i = AQ_DIMOF(bitary); i--;) {
+               rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+               rpf_rss_redir_tbl_addr_set(self, i);
+               rpf_rss_redir_wr_en_set(self, 1U);
+               AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+               if (err < 0)
+                       goto err_exit;
+       }
+
+       err = aq_hw_err_from_flags(self);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
+                                   struct aq_nic_cfg_s *aq_nic_cfg)
+{
+       int err = 0;
+       unsigned int i;
+
+       /* TX checksums offloads*/
+       tpo_ipv4header_crc_offload_en_set(self, 1);
+       tpo_tcp_udp_crc_offload_en_set(self, 1);
+       if (err < 0)
+               goto err_exit;
+
+       /* RX checksums offloads*/
+       rpo_ipv4header_crc_offload_en_set(self, 1);
+       rpo_tcp_udp_crc_offload_en_set(self, 1);
+       if (err < 0)
+               goto err_exit;
+
+       /* LSO offloads*/
+       tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+       if (err < 0)
+               goto err_exit;
+
+/* LRO offloads */
+       {
+               unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
+                       ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
+                       ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
+
+               for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
+                       rpo_lro_max_num_of_descriptors_set(self, val, i);
+
+               rpo_lro_time_base_divider_set(self, 0x61AU);
+               rpo_lro_inactive_interval_set(self, 0);
+               rpo_lro_max_coalescing_interval_set(self, 2);
+
+               rpo_lro_qsessions_lim_set(self, 1U);
+
+               rpo_lro_total_desc_lim_set(self, 2U);
+
+               rpo_lro_patch_optimization_en_set(self, 0U);
+
+               rpo_lro_min_pay_of_first_pkt_set(self, 10U);
+
+               rpo_lro_pkt_lim_set(self, 1U);
+
+               rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+       }
+       err = aq_hw_err_from_flags(self);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
+{
+       thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+       thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+       thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+
+       /* Tx interrupts */
+       tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+
+       /* misc */
+       aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
+                       0x00010000U : 0x00000000U);
+       tdm_tx_dca_en_set(self, 0U);
+       tdm_tx_dca_mode_set(self, 0U);
+
+       tpb_tx_path_scp_ins_en_set(self, 1U);
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
+{
+       struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+       int i;
+
+       /* Rx TC/RSS number config */
+       rpb_rpf_rx_traf_class_mode_set(self, 1U);
+
+       /* Rx flow control */
+       rpb_rx_flow_ctl_mode_set(self, 1U);
+
+       /* RSS Ring selection */
+       reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+                                       0xB3333333U : 0x00000000U);
+
+       /* Multicast filters */
+       for (i = HW_ATL_B0_MAC_MAX; i--;) {
+               rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+               rpfl2unicast_flr_act_set(self, 1U, i);
+       }
+
+       reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+       reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+
+       /* Vlan filters */
+       rpf_vlan_outer_etht_set(self, 0x88A8U);
+       rpf_vlan_inner_etht_set(self, 0x8100U);
+
+       if (cfg->vlan_id) {
+               rpf_vlan_flr_act_set(self, 1U, 0U);
+               rpf_vlan_id_flr_set(self, 0U, 0U);
+               rpf_vlan_flr_en_set(self, 0U, 0U);
+
+               rpf_vlan_accept_untagged_packets_set(self, 1U);
+               rpf_vlan_untagged_act_set(self, 1U);
+
+               rpf_vlan_flr_act_set(self, 1U, 1U);
+               rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
+               rpf_vlan_flr_en_set(self, 1U, 1U);
+       } else {
+               rpf_vlan_prom_mode_en_set(self, 1);
+       }
+
+       /* Rx Interrupts */
+       rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+
+       /* misc */
+       aq_hw_write_reg(self, 0x00005040U,
+                       IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
+
+       rpfl2broadcast_flr_act_set(self, 1U);
+       rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+
+       rdm_rx_dca_en_set(self, 0U);
+       rdm_rx_dca_mode_set(self, 0U);
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+{
+       int err = 0;
+       unsigned int h = 0U;
+       unsigned int l = 0U;
+
+       if (!mac_addr) {
+               err = -EINVAL;
+               goto err_exit;
+       }
+       h = (mac_addr[0] << 8) | (mac_addr[1]);
+       l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+               (mac_addr[4] << 8) | mac_addr[5];
+
+       rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
+       rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
+       rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
+       rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
+
+       err = aq_hw_err_from_flags(self);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_b0_hw_init(struct aq_hw_s *self,
+                            struct aq_nic_cfg_s *aq_nic_cfg,
+                            u8 *mac_addr)
+{
+       static u32 aq_hw_atl_igcr_table_[4][2] = {
+               { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
+               { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
+               { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
+               { 0x20000022U, 0x20000026U }  /* AQ_IRQ_MSIX */
+       };
+
+       int err = 0;
+
+       self->aq_nic_cfg = aq_nic_cfg;
+
+       hw_atl_utils_hw_chip_features_init(self,
+                                          &PHAL_ATLANTIC_B0->chip_features);
+
+       hw_atl_b0_hw_init_tx_path(self);
+       hw_atl_b0_hw_init_rx_path(self);
+
+       hw_atl_b0_hw_mac_addr_set(self, mac_addr);
+
+       hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
+
+       hw_atl_b0_hw_qos_set(self);
+       hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
+       hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+
+       err = aq_hw_err_from_flags(self);
+       if (err < 0)
+               goto err_exit;
+
+       /* Interrupts */
+       reg_irq_glb_ctl_set(self,
+                           aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+                                                [(aq_nic_cfg->vecs > 1U) ?
+                                                1 : 0]);
+
+       itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+
+       /* Interrupts */
+       reg_gen_irq_map_set(self,
+                           ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
+                           ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
+
+       hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
+                                     struct aq_ring_s *ring)
+{
+       tdm_tx_desc_en_set(self, 1, ring->idx);
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
+                                     struct aq_ring_s *ring)
+{
+       rdm_rx_desc_en_set(self, 1, ring->idx);
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_start(struct aq_hw_s *self)
+{
+       tpb_tx_buff_en_set(self, 1);
+       rpb_rx_buff_en_set(self, 1);
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
+                                           struct aq_ring_s *ring)
+{
+       reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+       return 0;
+}
+
+static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
+                                    struct aq_ring_s *ring,
+                                    unsigned int frags)
+{
+       struct aq_ring_buff_s *buff = NULL;
+       struct hw_atl_txd_s *txd = NULL;
+       unsigned int buff_pa_len = 0U;
+       unsigned int pkt_len = 0U;
+       unsigned int frag_count = 0U;
+       bool is_gso = false;
+
+       buff = &ring->buff_ring[ring->sw_tail];
+       pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
+
+       for (frag_count = 0; frag_count < frags; frag_count++) {
+               txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
+                                               HW_ATL_B0_TXD_SIZE];
+               txd->ctl = 0;
+               txd->ctl2 = 0;
+               txd->buf_addr = 0;
+
+               buff = &ring->buff_ring[ring->sw_tail];
+
+               if (buff->is_txc) {
+                       txd->ctl |= (buff->len_l3 << 31) |
+                               (buff->len_l2 << 24) |
+                               HW_ATL_B0_TXD_CTL_CMD_TCP |
+                               HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
+                       txd->ctl2 |= (buff->mss << 16) |
+                               (buff->len_l4 << 8) |
+                               (buff->len_l3 >> 1);
+
+                       pkt_len -= (buff->len_l4 +
+                                   buff->len_l3 +
+                                   buff->len_l2);
+                       is_gso = true;
+               } else {
+                       buff_pa_len = buff->len;
+
+                       txd->buf_addr = buff->pa;
+                       txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
+                                               ((u32)buff_pa_len << 4));
+                       txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
+                       /* PAY_LEN */
+                       txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
+
+                       if (is_gso) {
+                               txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
+                               txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
+                       }
+
+                       /* Tx checksum offloads */
+                       if (buff->is_ip_cso)
+                               txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO;
+
+                       if (buff->is_udp_cso || buff->is_tcp_cso)
+                               txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
+
+                       if (unlikely(buff->is_eop)) {
+                               txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
+                               txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
+                       }
+               }
+
+               ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
+       }
+
+       hw_atl_b0_hw_tx_ring_tail_update(self, ring);
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
+                                    struct aq_ring_s *aq_ring,
+                                    struct aq_ring_param_s *aq_ring_param)
+{
+       u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
+       u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+
+       rdm_rx_desc_en_set(self, false, aq_ring->idx);
+
+       rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+
+       reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+                                          aq_ring->idx);
+
+       reg_rx_dma_desc_base_addressmswset(self,
+                                          dma_desc_addr_msw, aq_ring->idx);
+
+       rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+       rdm_rx_desc_data_buff_size_set(self,
+                                      AQ_CFG_RX_FRAME_MAX / 1024U,
+                                      aq_ring->idx);
+
+       rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+       rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+       rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+
+       /* Rx ring set mode */
+
+       /* Mapping interrupt vector */
+       itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+       itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+
+       rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+       rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+       rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+       rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
+                                    struct aq_ring_s *aq_ring,
+                                    struct aq_ring_param_s *aq_ring_param)
+{
+       u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
+       u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+
+       reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+                                          aq_ring->idx);
+
+       reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+                                          aq_ring->idx);
+
+       tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+       hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
+
+       /* Set Tx threshold */
+       tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+
+       /* Mapping interrupt vector */
+       itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+       itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+
+       tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+       tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
+                                    struct aq_ring_s *ring,
+                                    unsigned int sw_tail_old)
+{
+       for (; sw_tail_old != ring->sw_tail;
+               sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
+               struct hw_atl_rxd_s *rxd =
+                       (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
+                                                       HW_ATL_B0_RXD_SIZE];
+
+               struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
+
+               rxd->buf_addr = buff->pa;
+               rxd->hdr_addr = 0U;
+       }
+
+       reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
+                                           struct aq_ring_s *ring)
+{
+       int err = 0;
+       unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+
+       if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+               err = -ENXIO;
+               goto err_exit;
+       }
+       ring->hw_head = hw_head_;
+       err = aq_hw_err_from_flags(self);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
+                                       struct aq_ring_s *ring)
+{
+       struct device *ndev = aq_nic_get_dev(ring->aq_nic);
+
+       for (; ring->hw_head != ring->sw_tail;
+               ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
+               struct aq_ring_buff_s *buff = NULL;
+               struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
+                       &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
+
+               unsigned int is_err = 1U;
+               unsigned int is_rx_check_sum_enabled = 0U;
+               unsigned int pkt_type = 0U;
+
+               if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
+                       break;
+               }
+
+               buff = &ring->buff_ring[ring->hw_head];
+
+               is_err = (0x0000003CU & rxd_wb->status);
+
+               is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
+               is_err &= ~0x20U; /* exclude validity bit */
+
+               pkt_type = 0xFFU & (rxd_wb->type >> 4);
+
+               if (is_rx_check_sum_enabled) {
+                       if (0x0U == (pkt_type & 0x3U))
+                               buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U;
+
+                       if (0x4U == (pkt_type & 0x1CU))
+                               buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
+                       else if (0x0U == (pkt_type & 0x1CU))
+                               buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
+               }
+
+               is_err &= ~0x18U;
+
+               dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
+
+               if (is_err || rxd_wb->type & 0x1000U) {
+                       /* status error or DMA error */
+                       buff->is_error = 1U;
+               } else {
+                       if (self->aq_nic_cfg->is_rss) {
+                               /* last 4 byte */
+                               u16 rss_type = rxd_wb->type & 0xFU;
+
+                               if (rss_type && rss_type < 0x8U) {
+                                       buff->is_hash_l4 = (rss_type == 0x4 ||
+                                       rss_type == 0x5);
+                                       buff->rss_hash = rxd_wb->rss_hash;
+                               }
+                       }
+
+                       if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
+                               buff->len = (rxd_wb->pkt_len &
+                                               (AQ_CFG_RX_FRAME_MAX - 1U));
+                               buff->len = buff->len ?
+                                       buff->len : AQ_CFG_RX_FRAME_MAX;
+                               buff->next = 0U;
+                               buff->is_eop = 1U;
+                       } else {
+                               if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
+                                       rxd_wb->status) {
+                                       /* LRO */
+                                       buff->next = rxd_wb->next_desc_ptr;
+                                       ++ring->stats.rx.lro_packets;
+                               } else {
+                                       /* jumbo */
+                                       buff->next =
+                                               aq_ring_next_dx(ring,
+                                                               ring->hw_head);
+                                       ++ring->stats.rx.jumbo_packets;
+                               }
+                       }
+               }
+       }
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
+{
+       itr_irq_msk_setlsw_set(self, LODWORD(mask));
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
+{
+       itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+       itr_irq_status_clearlsw_set(self, LODWORD(mask));
+
+       atomic_inc(&PHAL_ATLANTIC_B0->dpc);
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
+{
+       *mask = itr_irq_statuslsw_get(self);
+       return aq_hw_err_from_flags(self);
+}
+
+#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
+
+static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
+                                         unsigned int packet_filter)
+{
+       unsigned int i = 0U;
+
+       rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
+       rpfl2multicast_flr_en_set(self,
+                                 IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+
+       rpfl2_accept_all_mc_packets_set(self,
+                                       IS_FILTER_ENABLED(IFF_ALLMULTI));
+
+       rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+
+       self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
+
+       for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
+               rpfl2_uc_flr_en_set(self,
+                                   (self->aq_nic_cfg->is_mc_list_enabled &&
+                                   (i <= self->aq_nic_cfg->mc_list_count)) ?
+                                   1U : 0U, i);
+
+       return aq_hw_err_from_flags(self);
+}
+
+#undef IS_FILTER_ENABLED
+
+static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
+                                          u8 ar_mac
+                                          [AQ_CFG_MULTICAST_ADDRESS_MAX]
+                                          [ETH_ALEN],
+                                          u32 count)
+{
+       int err = 0;
+
+       if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
+               err = -EBADRQC;
+               goto err_exit;
+       }
+       for (self->aq_nic_cfg->mc_list_count = 0U;
+                       self->aq_nic_cfg->mc_list_count < count;
+                       ++self->aq_nic_cfg->mc_list_count) {
+               u32 i = self->aq_nic_cfg->mc_list_count;
+               u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
+               u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
+                                       (ar_mac[i][4] << 8) | ar_mac[i][5];
+
+               rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
+
+               rpfl2unicast_dest_addresslsw_set(self,
+                                                l, HW_ATL_B0_MAC_MIN + i);
+
+               rpfl2unicast_dest_addressmsw_set(self,
+                                                h, HW_ATL_B0_MAC_MIN + i);
+
+               rpfl2_uc_flr_en_set(self,
+                                   (self->aq_nic_cfg->is_mc_list_enabled),
+                                   HW_ATL_B0_MAC_MIN + i);
+       }
+
+       err = aq_hw_err_from_flags(self);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
+                                                bool itr_enabled)
+{
+       unsigned int i = 0U;
+
+       if (itr_enabled && self->aq_nic_cfg->itr) {
+               tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
+               tdm_tdm_intr_moder_en_set(self, 1U);
+               rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
+               rdm_rdm_intr_moder_en_set(self, 1U);
+
+               PHAL_ATLANTIC_B0->itr_tx = 2U;
+               PHAL_ATLANTIC_B0->itr_rx = 2U;
+
+               if (self->aq_nic_cfg->itr != 0xFFFFU) {
+                       unsigned int max_timer = self->aq_nic_cfg->itr / 2U;
+                       unsigned int min_timer = self->aq_nic_cfg->itr / 32U;
+
+                       max_timer = min(0x1FFU, max_timer);
+                       min_timer = min(0xFFU, min_timer);
+
+                       PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U;
+                       PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U;
+                       PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U;
+                       PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U;
+               } else {
+                       static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
+                               {0xffU, 0xffU}, /* 10Gbit */
+                               {0xffU, 0x1ffU}, /* 5Gbit */
+                               {0xffU, 0x1ffU}, /* 5Gbit 5GS */
+                               {0xffU, 0x1ffU}, /* 2.5Gbit */
+                               {0xffU, 0x1ffU}, /* 1Gbit */
+                               {0xffU, 0x1ffU}, /* 100Mbit */
+                       };
+
+                       static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
+                               {0x6U, 0x38U},/* 10Gbit */
+                               {0xCU, 0x70U},/* 5Gbit */
+                               {0xCU, 0x70U},/* 5Gbit 5GS */
+                               {0x18U, 0xE0U},/* 2.5Gbit */
+                               {0x30U, 0x80U},/* 1Gbit */
+                               {0x4U, 0x50U},/* 100Mbit */
+                       };
+
+                       unsigned int speed_index =
+                                       hw_atl_utils_mbps_2_speed_index(
+                                               self->aq_link_status.mbps);
+
+                       PHAL_ATLANTIC_B0->itr_tx |=
+                               hw_atl_b0_timers_table_tx_[speed_index]
+                               [0] << 0x8U; /* set min timer value */
+                       PHAL_ATLANTIC_B0->itr_tx |=
+                               hw_atl_b0_timers_table_tx_[speed_index]
+                               [1] << 0x10U; /* set max timer value */
+
+                       PHAL_ATLANTIC_B0->itr_rx |=
+                               hw_atl_b0_timers_table_rx_[speed_index]
+                               [0] << 0x8U; /* set min timer value */
+                       PHAL_ATLANTIC_B0->itr_rx |=
+                               hw_atl_b0_timers_table_rx_[speed_index]
+                               [1] << 0x10U; /* set max timer value */
+               }
+       } else {
+               tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+               tdm_tdm_intr_moder_en_set(self, 0U);
+               rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+               rdm_rdm_intr_moder_en_set(self, 0U);
+               PHAL_ATLANTIC_B0->itr_tx = 0U;
+               PHAL_ATLANTIC_B0->itr_rx = 0U;
+       }
+
+       for (i = HW_ATL_B0_RINGS_MAX; i--;) {
+               reg_tx_intr_moder_ctrl_set(self,
+                                          PHAL_ATLANTIC_B0->itr_tx, i);
+               reg_rx_intr_moder_ctrl_set(self,
+                                          PHAL_ATLANTIC_B0->itr_rx, i);
+       }
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
+{
+       hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
+                                    struct aq_ring_s *ring)
+{
+       tdm_tx_desc_en_set(self, 0U, ring->idx);
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
+                                    struct aq_ring_s *ring)
+{
+       rdm_rx_desc_en_set(self, 0U, ring->idx);
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed)
+{
+       int err = 0;
+
+       err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
+       if (err < 0)
+               goto err_exit;
+
+err_exit:
+       return err;
+}
+
+static struct aq_hw_ops hw_atl_ops_ = {
+       .create               = hw_atl_b0_create,
+       .destroy              = hw_atl_b0_destroy,
+       .get_hw_caps          = hw_atl_b0_get_hw_caps,
+
+       .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
+       .hw_set_mac_address   = hw_atl_b0_hw_mac_addr_set,
+       .hw_get_link_status   = hw_atl_utils_mpi_get_link_status,
+       .hw_set_link_speed    = hw_atl_b0_hw_set_speed,
+       .hw_init              = hw_atl_b0_hw_init,
+       .hw_deinit            = hw_atl_utils_hw_deinit,
+       .hw_set_power         = hw_atl_utils_hw_set_power,
+       .hw_reset             = hw_atl_b0_hw_reset,
+       .hw_start             = hw_atl_b0_hw_start,
+       .hw_ring_tx_start     = hw_atl_b0_hw_ring_tx_start,
+       .hw_ring_tx_stop      = hw_atl_b0_hw_ring_tx_stop,
+       .hw_ring_rx_start     = hw_atl_b0_hw_ring_rx_start,
+       .hw_ring_rx_stop      = hw_atl_b0_hw_ring_rx_stop,
+       .hw_stop              = hw_atl_b0_hw_stop,
+
+       .hw_ring_tx_xmit         = hw_atl_b0_hw_ring_tx_xmit,
+       .hw_ring_tx_head_update  = hw_atl_b0_hw_ring_tx_head_update,
+
+       .hw_ring_rx_receive      = hw_atl_b0_hw_ring_rx_receive,
+       .hw_ring_rx_fill         = hw_atl_b0_hw_ring_rx_fill,
+
+       .hw_irq_enable           = hw_atl_b0_hw_irq_enable,
+       .hw_irq_disable          = hw_atl_b0_hw_irq_disable,
+       .hw_irq_read             = hw_atl_b0_hw_irq_read,
+
+       .hw_ring_rx_init             = hw_atl_b0_hw_ring_rx_init,
+       .hw_ring_tx_init             = hw_atl_b0_hw_ring_tx_init,
+       .hw_packet_filter_set        = hw_atl_b0_hw_packet_filter_set,
+       .hw_multicast_list_set       = hw_atl_b0_hw_multicast_list_set,
+       .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
+       .hw_rss_set                  = hw_atl_b0_hw_rss_set,
+       .hw_rss_hash_set             = hw_atl_b0_hw_rss_hash_set,
+       .hw_get_regs                 = hw_atl_utils_hw_get_regs,
+       .hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
+       .hw_get_fw_version           = hw_atl_utils_get_fw_version,
+};
+
+struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev)
+{
+       bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
+       bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
+                       (pdev->device == HW_ATL_DEVICE_ID_D100) ||
+                       (pdev->device == HW_ATL_DEVICE_ID_D107) ||
+                       (pdev->device == HW_ATL_DEVICE_ID_D108) ||
+                       (pdev->device == HW_ATL_DEVICE_ID_D109));
+
+       bool is_rev_ok = (pdev->revision == 2U);
+
+       return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
new file mode 100644 (file)
index 0000000..a1e1bce
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_b0.h: Declaration of abstract interface for Atlantic hardware
+ * specific functions.
+ */
+
+#ifndef HW_ATL_B0_H
+#define HW_ATL_B0_H
+
+#include "../aq_common.h"
+
+#ifndef PCI_VENDOR_ID_AQUANTIA
+
+#define PCI_VENDOR_ID_AQUANTIA  0x1D6A
+#define HW_ATL_DEVICE_ID_0001   0x0001
+#define HW_ATL_DEVICE_ID_D100   0xD100
+#define HW_ATL_DEVICE_ID_D107   0xD107
+#define HW_ATL_DEVICE_ID_D108   0xD108
+#define HW_ATL_DEVICE_ID_D109   0xD109
+
+#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
+
+#endif
+
+struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev);
+
+#endif /* HW_ATL_B0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
new file mode 100644 (file)
index 0000000..8bdee3d
--- /dev/null
@@ -0,0 +1,207 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_b0_internal.h: Definition of Atlantic B0 chip specific
+ * constants.
+ */
+
+#ifndef HW_ATL_B0_INTERNAL_H
+#define HW_ATL_B0_INTERNAL_H
+
+#include "../aq_common.h"
+
+#define HW_ATL_B0_MTU_JUMBO (16000U)
+#define HW_ATL_B0_MTU        1514U
+
+#define HW_ATL_B0_TX_RINGS 4U
+#define HW_ATL_B0_RX_RINGS 4U
+
+#define HW_ATL_B0_RINGS_MAX 32U
+#define HW_ATL_B0_TXD_SIZE       (16U)
+#define HW_ATL_B0_RXD_SIZE       (16U)
+
+#define HW_ATL_B0_MAC      0U
+#define HW_ATL_B0_MAC_MIN  1U
+#define HW_ATL_B0_MAC_MAX  33U
+
+/* UCAST/MCAST filters */
+#define HW_ATL_B0_UCAST_FILTERS_MAX 38
+#define HW_ATL_B0_MCAST_FILTERS_MAX 8
+
+/* interrupts */
+#define HW_ATL_B0_ERR_INT 8U
+#define HW_ATL_B0_INT_MASK  (0xFFFFFFFFU)
+
+#define HW_ATL_B0_TXD_CTL2_LEN        (0xFFFFC000)
+#define HW_ATL_B0_TXD_CTL2_CTX_EN     (0x00002000)
+#define HW_ATL_B0_TXD_CTL2_CTX_IDX    (0x00001000)
+
+#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD   (0x00000001)
+#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC   (0x00000002)
+#define HW_ATL_B0_TXD_CTL_BLEN        (0x000FFFF0)
+#define HW_ATL_B0_TXD_CTL_DD          (0x00100000)
+#define HW_ATL_B0_TXD_CTL_EOP         (0x00200000)
+
+#define HW_ATL_B0_TXD_CTL_CMD_X       (0x3FC00000)
+
+#define HW_ATL_B0_TXD_CTL_CMD_VLAN    BIT(22)
+#define HW_ATL_B0_TXD_CTL_CMD_FCS     BIT(23)
+#define HW_ATL_B0_TXD_CTL_CMD_IPCSO   BIT(24)
+#define HW_ATL_B0_TXD_CTL_CMD_TUCSO   BIT(25)
+#define HW_ATL_B0_TXD_CTL_CMD_LSO     BIT(26)
+#define HW_ATL_B0_TXD_CTL_CMD_WB      BIT(27)
+#define HW_ATL_B0_TXD_CTL_CMD_VXLAN   BIT(28)
+
+#define HW_ATL_B0_TXD_CTL_CMD_IPV6    BIT(21)
+#define HW_ATL_B0_TXD_CTL_CMD_TCP     BIT(22)
+
+#define HW_ATL_B0_MPI_CONTROL_ADR       0x0368U
+#define HW_ATL_B0_MPI_STATE_ADR         0x036CU
+
+#define HW_ATL_B0_MPI_SPEED_MSK         0xFFFFU
+#define HW_ATL_B0_MPI_SPEED_SHIFT       16U
+
+#define HW_ATL_B0_RATE_10G              BIT(0)
+#define HW_ATL_B0_RATE_5G               BIT(1)
+#define HW_ATL_B0_RATE_2G5              BIT(3)
+#define HW_ATL_B0_RATE_1G               BIT(4)
+#define HW_ATL_B0_RATE_100M             BIT(5)
+
+#define HW_ATL_B0_TXBUF_MAX  160U
+#define HW_ATL_B0_RXBUF_MAX  320U
+
+#define HW_ATL_B0_RSS_REDIRECTION_MAX 64U
+#define HW_ATL_B0_RSS_REDIRECTION_BITS 3U
+#define HW_ATL_B0_RSS_HASHKEY_BITS 320U
+
+#define HW_ATL_B0_TCRSS_4_8  1
+#define HW_ATL_B0_TC_MAX 1U
+#define HW_ATL_B0_RSS_MAX 8U
+
+#define HW_ATL_B0_LRO_RXD_MAX 2U
+#define HW_ATL_B0_RS_SLIP_ENABLED  0U
+
+/* (256k -1(max pay_len) - 54(header)) */
+#define HAL_ATL_B0_LSO_MAX_SEGMENT_SIZE 262089U
+
+/* (256k -1(max pay_len) - 74(header)) */
+#define HAL_ATL_B0_LSO_IPV6_MAX_SEGMENT_SIZE 262069U
+
+#define HW_ATL_B0_CHIP_REVISION_B0      0xA0U
+#define HW_ATL_B0_CHIP_REVISION_UNKNOWN 0xFFU
+
+#define HW_ATL_B0_FW_SEMA_RAM           0x2U
+
+#define HW_ATL_B0_TXC_LEN_TUNLEN    (0x0000FF00)
+#define HW_ATL_B0_TXC_LEN_OUTLEN    (0xFFFF0000)
+
+#define HW_ATL_B0_TXC_CTL_DESC_TYPE (0x00000007)
+#define HW_ATL_B0_TXC_CTL_CTX_ID    (0x00000008)
+#define HW_ATL_B0_TXC_CTL_VLAN      (0x000FFFF0)
+#define HW_ATL_B0_TXC_CTL_CMD       (0x00F00000)
+#define HW_ATL_B0_TXC_CTL_L2LEN     (0x7F000000)
+
+#define HW_ATL_B0_TXC_CTL_L3LEN     (0x80000000)       /* L3LEN lsb */
+#define HW_ATL_B0_TXC_LEN2_L3LEN    (0x000000FF)       /* L3LE upper bits */
+#define HW_ATL_B0_TXC_LEN2_L4LEN    (0x0000FF00)
+#define HW_ATL_B0_TXC_LEN2_MSSLEN   (0xFFFF0000)
+
+#define HW_ATL_B0_RXD_DD    (0x1)
+#define HW_ATL_B0_RXD_NCEA0 (0x1)
+
+#define HW_ATL_B0_RXD_WB_STAT_RSSTYPE (0x0000000F)
+#define HW_ATL_B0_RXD_WB_STAT_PKTTYPE (0x00000FF0)
+#define HW_ATL_B0_RXD_WB_STAT_RXCTRL  (0x00180000)
+#define HW_ATL_B0_RXD_WB_STAT_SPLHDR  (0x00200000)
+#define HW_ATL_B0_RXD_WB_STAT_HDRLEN  (0xFFC00000)
+
+#define HW_ATL_B0_RXD_WB_STAT2_DD      (0x0001)
+#define HW_ATL_B0_RXD_WB_STAT2_EOP     (0x0002)
+#define HW_ATL_B0_RXD_WB_STAT2_RXSTAT  (0x003C)
+#define HW_ATL_B0_RXD_WB_STAT2_MACERR  (0x0004)
+#define HW_ATL_B0_RXD_WB_STAT2_IP4ERR  (0x0008)
+#define HW_ATL_B0_RXD_WB_STAT2_TCPUPDERR  (0x0010)
+#define HW_ATL_B0_RXD_WB_STAT2_RXESTAT (0x0FC0)
+#define HW_ATL_B0_RXD_WB_STAT2_RSCCNT  (0xF000)
+
+#define L2_FILTER_ACTION_DISCARD (0x0)
+#define L2_FILTER_ACTION_HOST    (0x1)
+
+#define HW_ATL_B0_UCP_0X370_REG  (0x370)
+
+#define HW_ATL_B0_FLUSH() AQ_HW_READ_REG(self, 0x10)
+
+#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U
+
+/* Hardware tx descriptor */
+struct __packed hw_atl_txd_s {
+       u64 buf_addr;
+       u32 ctl;
+       u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
+};
+
+/* Hardware tx context descriptor */
+struct __packed hw_atl_txc_s {
+       u32 rsvd;
+       u32 len;
+       u32 ctl;
+       u32 len2;
+};
+
+/* Hardware rx descriptor */
+struct __packed hw_atl_rxd_s {
+       u64 buf_addr;
+       u64 hdr_addr;
+};
+
+/* Hardware rx descriptor writeback */
+struct __packed hw_atl_rxd_wb_s {
+       u32 type;
+       u32 rss_hash;
+       u16 status;
+       u16 pkt_len;
+       u16 next_desc_ptr;
+       u16 vlan;
+};
+
+/* HW layer capabilities */
+static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
+       .ports = 1U,
+       .is_64_dma = true,
+       .msix_irqs = 4U,
+       .irq_mask = ~0U,
+       .vecs = HW_ATL_B0_RSS_MAX,
+       .tcs = HW_ATL_B0_TC_MAX,
+       .rxd_alignment = 1U,
+       .rxd_size = HW_ATL_B0_RXD_SIZE,
+       .rxds = 8U * 1024U,
+       .txd_alignment = 1U,
+       .txd_size = HW_ATL_B0_TXD_SIZE,
+       .txds = 8U * 1024U,
+       .txhwb_alignment = 4096U,
+       .tx_rings = HW_ATL_B0_TX_RINGS,
+       .rx_rings = HW_ATL_B0_RX_RINGS,
+       .hw_features = NETIF_F_HW_CSUM |
+                       NETIF_F_RXHASH |
+                       NETIF_F_SG |
+                       NETIF_F_TSO |
+                       NETIF_F_LRO,
+       .hw_priv_flags = IFF_UNICAST_FLT,
+       .link_speed_msk = (HW_ATL_B0_RATE_10G |
+                       HW_ATL_B0_RATE_5G |
+                       HW_ATL_B0_RATE_2G5 |
+                       HW_ATL_B0_RATE_1G |
+                       HW_ATL_B0_RATE_100M),
+       .flow_control = true,
+       .mtu = HW_ATL_B0_MTU_JUMBO,
+       .mac_regs_count = 88,
+       .fw_ver_expected = HW_ATL_B0_FW_VER_EXPECTED,
+};
+
+#endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
new file mode 100644 (file)
index 0000000..3de651a
--- /dev/null
@@ -0,0 +1,1394 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_llh.c: Definitions of bitfield and register access functions for
+ * Atlantic registers.
+ */
+
+#include "hw_atl_llh.h"
+#include "hw_atl_llh_internal.h"
+#include "../aq_hw_utils.h"
+
+/* global */
+void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore)
+{
+       aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem);
+}
+
+u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
+{
+       return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore));
+}
+
+void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
+{
+       aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr,
+                           glb_reg_res_dis_msk,
+                           glb_reg_res_dis_shift,
+                           glb_reg_res_dis);
+}
+
+void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
+{
+       aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk,
+                           glb_soft_res_shift, soft_res);
+}
+
+u32 glb_soft_res_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr,
+                                 glb_soft_res_msk,
+                                 glb_soft_res_shift);
+}
+
+u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr);
+}
+
+u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg(aq_hw, glb_mif_id_adr);
+}
+
+/* stats */
+u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr);
+}
+
+u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr);
+}
+
+u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr);
+}
+
+u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr);
+}
+
+u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr);
+}
+
+u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr);
+}
+
+u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr);
+}
+
+u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr);
+}
+
+u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr);
+}
+
+/* interrupt */
+void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw)
+{
+       aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw);
+}
+
+void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
+{
+/* register address for bitfield imr_rx{r}_en */
+       static u32 itr_imr_rxren_adr[32] = {
+                       0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+                       0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+                       0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+                       0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+                       0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+                       0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+                       0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+                       0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+               };
+
+/* bitmask for bitfield imr_rx{r}_en */
+       static u32 itr_imr_rxren_msk[32] = {
+                       0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+                       0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+                       0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+                       0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+                       0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+                       0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+                       0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+                       0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U
+               };
+
+/* lower bit position of bitfield imr_rx{r}_en */
+       static u32 itr_imr_rxren_shift[32] = {
+                       15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+                       15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+                       15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+                       15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U
+               };
+
+       aq_hw_write_reg_bit(aq_hw, itr_imr_rxren_adr[rx],
+                           itr_imr_rxren_msk[rx],
+                           itr_imr_rxren_shift[rx],
+                           irq_map_en_rx);
+}
+
+void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
+{
+/* register address for bitfield imr_tx{t}_en */
+       static u32 itr_imr_txten_adr[32] = {
+                       0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+                       0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+                       0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+                       0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+                       0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+                       0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+                       0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+                       0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+               };
+
+/* bitmask for bitfield imr_tx{t}_en */
+       static u32 itr_imr_txten_msk[32] = {
+                       0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+                       0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+                       0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+                       0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+                       0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+                       0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+                       0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+                       0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U
+               };
+
+/* lower bit position of bitfield imr_tx{t}_en */
+       static u32 itr_imr_txten_shift[32] = {
+                       31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+                       31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+                       31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+                       31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U
+               };
+
+       aq_hw_write_reg_bit(aq_hw, itr_imr_txten_adr[tx],
+                           itr_imr_txten_msk[tx],
+                           itr_imr_txten_shift[tx],
+                           irq_map_en_tx);
+}
+
+void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
+{
+/* register address for bitfield imr_rx{r}[4:0] */
+       static u32 itr_imr_rxr_adr[32] = {
+                       0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+                       0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+                       0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+                       0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+                       0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+                       0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+                       0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+                       0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+               };
+
+/* bitmask for bitfield imr_rx{r}[4:0] */
+       static u32 itr_imr_rxr_msk[32] = {
+                       0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+                       0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+                       0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+                       0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+                       0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+                       0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+                       0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+                       0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU
+               };
+
+/* lower bit position of bitfield imr_rx{r}[4:0] */
+       static u32 itr_imr_rxr_shift[32] = {
+                       8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+                       8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+                       8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+                       8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U
+               };
+
+       aq_hw_write_reg_bit(aq_hw, itr_imr_rxr_adr[rx],
+                           itr_imr_rxr_msk[rx],
+                           itr_imr_rxr_shift[rx],
+                           irq_map_rx);
+}
+
+void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
+{
+/* register address for bitfield imr_tx{t}[4:0] */
+       static u32 itr_imr_txt_adr[32] = {
+                       0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+                       0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+                       0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+                       0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+                       0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+                       0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+                       0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+                       0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+               };
+
+/* bitmask for bitfield imr_tx{t}[4:0] */
+       static u32 itr_imr_txt_msk[32] = {
+                       0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+                       0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+                       0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+                       0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+                       0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+                       0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+                       0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+                       0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U
+               };
+
+/* lower bit position of bitfield imr_tx{t}[4:0] */
+       static u32 itr_imr_txt_shift[32] = {
+                       24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+                       24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+                       24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+                       24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U
+               };
+
+       aq_hw_write_reg_bit(aq_hw, itr_imr_txt_adr[tx],
+                           itr_imr_txt_msk[tx],
+                           itr_imr_txt_shift[tx],
+                           irq_map_tx);
+}
+
+void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw)
+{
+       aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw);
+}
+
+void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
+{
+       aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw);
+}
+
+void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
+{
+       aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr,
+                           itr_reg_res_dsbl_msk,
+                           itr_reg_res_dsbl_shift, irq_reg_res_dis);
+}
+
+void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+                                u32 irq_status_clearlsw)
+{
+       aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw);
+}
+
+u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg(aq_hw, itr_isrlsw_adr);
+}
+
+u32 itr_res_irq_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
+                                 itr_res_shift);
+}
+
+void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
+{
+       aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
+                           itr_res_shift, res_irq);
+}
+
+/* rdm */
+void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+{
+       aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca),
+                           rdm_dcadcpuid_msk,
+                           rdm_dcadcpuid_shift, cpuid);
+}
+
+void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
+{
+       aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk,
+                           rdm_dca_en_shift, rx_dca_en);
+}
+
+void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
+{
+       aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk,
+                           rdm_dca_mode_shift, rx_dca_mode);
+}
+
+void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+                                   u32 rx_desc_data_buff_size, u32 descriptor)
+{
+       aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor),
+                           rdm_descddata_size_msk,
+                           rdm_descddata_size_shift,
+                           rx_desc_data_buff_size);
+}
+
+void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca)
+{
+       aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca),
+                           rdm_dcaddesc_en_msk,
+                           rdm_dcaddesc_en_shift,
+                           rx_desc_dca_en);
+}
+
+void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor)
+{
+       aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor),
+                           rdm_descden_msk,
+                           rdm_descden_shift,
+                           rx_desc_en);
+}
+
+void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+                                   u32 rx_desc_head_buff_size, u32 descriptor)
+{
+       aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor),
+                           rdm_descdhdr_size_msk,
+                           rdm_descdhdr_size_shift,
+                           rx_desc_head_buff_size);
+}
+
+void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+                                   u32 rx_desc_head_splitting, u32 descriptor)
+{
+       aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor),
+                           rdm_descdhdr_split_msk,
+                           rdm_descdhdr_split_shift,
+                           rx_desc_head_splitting);
+}
+
+u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+{
+       return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor),
+                                 rdm_descdhd_msk, rdm_descdhd_shift);
+}
+
+void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor)
+{
+       aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor),
+                           rdm_descdlen_msk, rdm_descdlen_shift,
+                           rx_desc_len);
+}
+
+void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor)
+{
+       aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor),
+                           rdm_descdreset_msk, rdm_descdreset_shift,
+                           rx_desc_res);
+}
+
+void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+                                 u32 rx_desc_wr_wb_irq_en)
+{
+       aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr,
+                           rdm_int_desc_wrb_en_msk,
+                           rdm_int_desc_wrb_en_shift,
+                           rx_desc_wr_wb_irq_en);
+}
+
+void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca)
+{
+       aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca),
+                           rdm_dcadhdr_en_msk,
+                           rdm_dcadhdr_en_shift,
+                           rx_head_dca_en);
+}
+
+void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca)
+{
+       aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca),
+                           rdm_dcadpay_en_msk, rdm_dcadpay_en_shift,
+                           rx_pld_dca_en);
+}
+
+void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en)
+{
+       aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr,
+                           rdm_int_rim_en_msk,
+                           rdm_int_rim_en_shift,
+                           rdm_intr_moder_en);
+}
+
+/* reg */
+void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx)
+{
+       aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map);
+}
+
+u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg(aq_hw, gen_intr_stat_adr);
+}
+
+void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
+{
+       aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl);
+}
+
+void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
+{
+       aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr);
+}
+
+void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+                                       u32 rx_dma_desc_base_addrlsw,
+                                       u32 descriptor)
+{
+       aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor),
+                       rx_dma_desc_base_addrlsw);
+}
+
+void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+                                       u32 rx_dma_desc_base_addrmsw,
+                                       u32 descriptor)
+{
+       aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor),
+                       rx_dma_desc_base_addrmsw);
+}
+
+u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
+{
+       return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor));
+}
+
+void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+                                 u32 rx_dma_desc_tail_ptr, u32 descriptor)
+{
+       aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor),
+                       rx_dma_desc_tail_ptr);
+}
+
+void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk)
+{
+       aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk);
+}
+
+void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+                            u32 filter)
+{
+       aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr);
+}
+
+void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1)
+{
+       aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1);
+}
+
+void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2)
+{
+       aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2);
+}
+
+void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+                               u32 rx_intr_moderation_ctl,
+                               u32 queue)
+{
+       aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue),
+                       rx_intr_moderation_ctl);
+}
+
+void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl)
+{
+       aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl);
+}
+
+void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+                                       u32 tx_dma_desc_base_addrlsw,
+                                       u32 descriptor)
+{
+       aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor),
+                       tx_dma_desc_base_addrlsw);
+}
+
+void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+                                       u32 tx_dma_desc_base_addrmsw,
+                                       u32 descriptor)
+{
+       aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor),
+                       tx_dma_desc_base_addrmsw);
+}
+
+void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+                                 u32 tx_dma_desc_tail_ptr, u32 descriptor)
+{
+       aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor),
+                       tx_dma_desc_tail_ptr);
+}
+
+void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+                               u32 tx_intr_moderation_ctl,
+                               u32 queue)
+{
+       aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue),
+                       tx_intr_moderation_ctl);
+}
+
+/* RPB: rx packet buffer */
+void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
+{
+       aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr,
+                           rpb_dma_sys_lbk_msk,
+                           rpb_dma_sys_lbk_shift, dma_sys_lbk);
+}
+
+void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+                                   u32 rx_traf_class_mode)
+{
+       aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr,
+                           rpb_rpf_rx_tc_mode_msk,
+                           rpb_rpf_rx_tc_mode_shift,
+                           rx_traf_class_mode);
+}
+
+void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
+{
+       aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk,
+                           rpb_rx_buf_en_shift, rx_buff_en);
+}
+
+void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+                                        u32 rx_buff_hi_threshold_per_tc,
+                                        u32 buffer)
+{
+       aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer),
+                           rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift,
+                           rx_buff_hi_threshold_per_tc);
+}
+
+void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+                                        u32 rx_buff_lo_threshold_per_tc,
+                                        u32 buffer)
+{
+       aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer),
+                           rpb_rxblo_thresh_msk,
+                           rpb_rxblo_thresh_shift,
+                           rx_buff_lo_threshold_per_tc);
+}
+
+void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
+{
+       aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr,
+                           rpb_rx_fc_mode_msk,
+                           rpb_rx_fc_mode_shift, rx_flow_ctl_mode);
+}
+
+void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+                                    u32 rx_pkt_buff_size_per_tc, u32 buffer)
+{
+       aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer),
+                           rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift,
+                           rx_pkt_buff_size_per_tc);
+}
+
+void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+                              u32 buffer)
+{
+       aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer),
+                           rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift,
+                           rx_xoff_en_per_tc);
+}
+
+/* rpf */
+
+void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+                                       u32 l2broadcast_count_threshold)
+{
+       aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr,
+                           rpfl2bc_thresh_msk,
+                           rpfl2bc_thresh_shift,
+                           l2broadcast_count_threshold);
+}
+
+void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
+{
+       aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk,
+                           rpfl2bc_en_shift, l2broadcast_en);
+}
+
+void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act)
+{
+       aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk,
+                           rpfl2bc_act_shift, l2broadcast_flr_act);
+}
+
+void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
+                              u32 filter)
+{
+       aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter),
+                           rpfl2mc_enf_msk,
+                           rpfl2mc_enf_shift, l2multicast_flr_en);
+}
+
+void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+                                 u32 l2promiscuous_mode_en)
+{
+       aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr,
+                           rpfl2promis_mode_msk,
+                           rpfl2promis_mode_shift,
+                           l2promiscuous_mode_en);
+}
+
+void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
+                             u32 filter)
+{
+       aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter),
+                           rpfl2uc_actf_msk, rpfl2uc_actf_shift,
+                           l2unicast_flr_act);
+}
+
+void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+                        u32 filter)
+{
+       aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter),
+                           rpfl2uc_enf_msk,
+                           rpfl2uc_enf_shift, l2unicast_flr_en);
+}
+
+void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+                                     u32 l2unicast_dest_addresslsw,
+                                     u32 filter)
+{
+       aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter),
+                       l2unicast_dest_addresslsw);
+}
+
+void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+                                     u32 l2unicast_dest_addressmsw,
+                                     u32 filter)
+{
+       aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter),
+                           rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift,
+                           l2unicast_dest_addressmsw);
+}
+
+void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+                                    u32 l2_accept_all_mc_packets)
+{
+       aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr,
+                           rpfl2mc_accept_all_msk,
+                           rpfl2mc_accept_all_shift,
+                           l2_accept_all_mc_packets);
+}
+
+void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+                                     u32 user_priority_tc_map, u32 tc)
+{
+/* register address for bitfield rx_tc_up{t}[2:0] */
+       static u32 rpf_rpb_rx_tc_upt_adr[8] = {
+                       0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U,
+                       0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U
+               };
+
+/* bitmask for bitfield rx_tc_up{t}[2:0] */
+       static u32 rpf_rpb_rx_tc_upt_msk[8] = {
+                       0x00000007U, 0x00000070U, 0x00000700U, 0x00007000U,
+                       0x00070000U, 0x00700000U, 0x07000000U, 0x70000000U
+               };
+
+/* lower bit position of bitfield rx_tc_up{t}[2:0] */
+       static u32 rpf_rpb_rx_tc_upt_shft[8] = {
+                       0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
+               };
+
+       aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc],
+                           rpf_rpb_rx_tc_upt_msk[tc],
+                           rpf_rpb_rx_tc_upt_shft[tc],
+                           user_priority_tc_map);
+}
+
+void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr,
+                           rpf_rss_key_addr_msk,
+                           rpf_rss_key_addr_shift,
+                           rss_key_addr);
+}
+
+void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
+{
+       aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr,
+                       rss_key_wr_data);
+}
+
+u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
+                                 rpf_rss_key_wr_eni_msk,
+                                 rpf_rss_key_wr_eni_shift);
+}
+
+void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
+                           rpf_rss_key_wr_eni_msk,
+                           rpf_rss_key_wr_eni_shift,
+                           rss_key_wr_en);
+}
+
+void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr,
+                           rpf_rss_redir_addr_msk,
+                           rpf_rss_redir_addr_shift, rss_redir_tbl_addr);
+}
+
+void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+                                  u32 rss_redir_tbl_wr_data)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr,
+                           rpf_rss_redir_wr_data_msk,
+                           rpf_rss_redir_wr_data_shift,
+                           rss_redir_tbl_wr_data);
+}
+
+u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
+                                 rpf_rss_redir_wr_eni_msk,
+                                 rpf_rss_redir_wr_eni_shift);
+}
+
+void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
+                           rpf_rss_redir_wr_eni_msk,
+                           rpf_rss_redir_wr_eni_shift, rss_redir_wr_en);
+}
+
+void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr,
+                           rpf_tpo_rpf_sys_lbk_msk,
+                           rpf_tpo_rpf_sys_lbk_shift,
+                           tpo_to_rpf_sys_lbk);
+}
+
+void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr,
+                           rpf_vl_inner_tpid_msk,
+                           rpf_vl_inner_tpid_shift,
+                           vlan_inner_etht);
+}
+
+void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr,
+                           rpf_vl_outer_tpid_msk,
+                           rpf_vl_outer_tpid_shift,
+                           vlan_outer_etht);
+}
+
+void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr,
+                           rpf_vl_promis_mode_msk,
+                           rpf_vl_promis_mode_shift,
+                           vlan_prom_mode_en);
+}
+
+void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+                                         u32 vlan_accept_untagged_packets)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr,
+                           rpf_vl_accept_untagged_mode_msk,
+                           rpf_vl_accept_untagged_mode_shift,
+                           vlan_accept_untagged_packets);
+}
+
+void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr,
+                           rpf_vl_untagged_act_msk,
+                           rpf_vl_untagged_act_shift,
+                           vlan_untagged_act);
+}
+
+void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter),
+                           rpf_vl_en_f_msk,
+                           rpf_vl_en_f_shift,
+                           vlan_flr_en);
+}
+
+void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter),
+                           rpf_vl_act_f_msk,
+                           rpf_vl_act_f_shift,
+                           vlan_flr_act);
+}
+
+void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter),
+                           rpf_vl_id_f_msk,
+                           rpf_vl_id_f_shift,
+                           vlan_id_flr);
+}
+
+void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter),
+                           rpf_et_enf_msk,
+                           rpf_et_enf_shift, etht_flr_en);
+}
+
+void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+                                  u32 etht_user_priority_en, u32 filter)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter),
+                           rpf_et_upfen_msk, rpf_et_upfen_shift,
+                           etht_user_priority_en);
+}
+
+void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
+                             u32 filter)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter),
+                           rpf_et_rxqfen_msk, rpf_et_rxqfen_shift,
+                           etht_rx_queue_en);
+}
+
+void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
+                               u32 filter)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter),
+                           rpf_et_upf_msk,
+                           rpf_et_upf_shift, etht_user_priority);
+}
+
+void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+                          u32 filter)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter),
+                           rpf_et_rxqf_msk,
+                           rpf_et_rxqf_shift, etht_rx_queue);
+}
+
+void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+                           u32 filter)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter),
+                           rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift,
+                           etht_mgt_queue);
+}
+
+void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter),
+                           rpf_et_actf_msk,
+                           rpf_et_actf_shift, etht_flr_act);
+}
+
+void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
+{
+       aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter),
+                           rpf_et_valf_msk,
+                           rpf_et_valf_shift, etht_flr);
+}
+
+/* RPO: rx packet offload */
+void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+                                      u32 ipv4header_crc_offload_en)
+{
+       aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr,
+                           rpo_ipv4chk_en_msk,
+                           rpo_ipv4chk_en_shift,
+                           ipv4header_crc_offload_en);
+}
+
+void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+                                   u32 rx_desc_vlan_stripping, u32 descriptor)
+{
+       aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor),
+                           rpo_descdvl_strip_msk,
+                           rpo_descdvl_strip_shift,
+                           rx_desc_vlan_stripping);
+}
+
+void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+                                   u32 tcp_udp_crc_offload_en)
+{
+       aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk,
+                           rpol4chk_en_shift, tcp_udp_crc_offload_en);
+}
+
+void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
+{
+       aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en);
+}
+
+void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+                                      u32 lro_patch_optimization_en)
+{
+       aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr,
+                           rpo_lro_ptopt_en_msk,
+                           rpo_lro_ptopt_en_shift,
+                           lro_patch_optimization_en);
+}
+
+void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+                              u32 lro_qsessions_lim)
+{
+       aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr,
+                           rpo_lro_qses_lmt_msk,
+                           rpo_lro_qses_lmt_shift,
+                           lro_qsessions_lim);
+}
+
+void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim)
+{
+       aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr,
+                           rpo_lro_tot_dsc_lmt_msk,
+                           rpo_lro_tot_dsc_lmt_shift,
+                           lro_total_desc_lim);
+}
+
+void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+                                     u32 lro_min_pld_of_first_pkt)
+{
+       aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr,
+                           rpo_lro_pkt_min_msk,
+                           rpo_lro_pkt_min_shift,
+                           lro_min_pld_of_first_pkt);
+}
+
+void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
+{
+       aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim);
+}
+
+void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+                                       u32 lro_max_number_of_descriptors,
+                                       u32 lro)
+{
+/* Register address for bitfield lro{L}_des_max[1:0] */
+       static u32 rpo_lro_ldes_max_adr[32] = {
+                       0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
+                       0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
+                       0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
+                       0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
+                       0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
+                       0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
+                       0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU,
+                       0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU
+               };
+
+/* Bitmask for bitfield lro{L}_des_max[1:0] */
+       static u32 rpo_lro_ldes_max_msk[32] = {
+                       0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+                       0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+                       0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+                       0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+                       0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+                       0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+                       0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+                       0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U
+               };
+
+/* Lower bit position of bitfield lro{L}_des_max[1:0] */
+       static u32 rpo_lro_ldes_max_shift[32] = {
+                       0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+                       0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+                       0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+                       0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
+               };
+
+       aq_hw_write_reg_bit(aq_hw, rpo_lro_ldes_max_adr[lro],
+                           rpo_lro_ldes_max_msk[lro],
+                           rpo_lro_ldes_max_shift[lro],
+                           lro_max_number_of_descriptors);
+}
+
+void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+                                  u32 lro_time_base_divider)
+{
+       aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr,
+                           rpo_lro_tb_div_msk,
+                           rpo_lro_tb_div_shift,
+                           lro_time_base_divider);
+}
+
+void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+                                  u32 lro_inactive_interval)
+{
+       aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr,
+                           rpo_lro_ina_ival_msk,
+                           rpo_lro_ina_ival_shift,
+                           lro_inactive_interval);
+}
+
+void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+                                        u32 lro_max_coalescing_interval)
+{
+       aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr,
+                           rpo_lro_max_ival_msk,
+                           rpo_lro_max_ival_shift,
+                           lro_max_coalescing_interval);
+}
+
+/* rx */
+void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
+{
+       aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr,
+                           rx_reg_res_dsbl_msk,
+                           rx_reg_res_dsbl_shift,
+                           rx_reg_res_dis);
+}
+
+/* tdm */
+void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+{
+       aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca),
+                           tdm_dcadcpuid_msk,
+                           tdm_dcadcpuid_shift, cpuid);
+}
+
+void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+                                  u32 large_send_offload_en)
+{
+       aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en);
+}
+
+void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
+{
+       aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk,
+                           tdm_dca_en_shift, tx_dca_en);
+}
+
+void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
+{
+       aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk,
+                           tdm_dca_mode_shift, tx_dca_mode);
+}
+
+void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca)
+{
+       aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca),
+                           tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift,
+                           tx_desc_dca_en);
+}
+
+void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor)
+{
+       aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor),
+                           tdm_descden_msk,
+                           tdm_descden_shift,
+                           tx_desc_en);
+}
+
+u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+{
+       return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor),
+                                 tdm_descdhd_msk, tdm_descdhd_shift);
+}
+
+void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+                        u32 descriptor)
+{
+       aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor),
+                           tdm_descdlen_msk,
+                           tdm_descdlen_shift,
+                           tx_desc_len);
+}
+
+void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+                                 u32 tx_desc_wr_wb_irq_en)
+{
+       aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr,
+                           tdm_int_desc_wrb_en_msk,
+                           tdm_int_desc_wrb_en_shift,
+                           tx_desc_wr_wb_irq_en);
+}
+
+void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+                                    u32 tx_desc_wr_wb_threshold,
+                                    u32 descriptor)
+{
+       aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor),
+                           tdm_descdwrb_thresh_msk,
+                           tdm_descdwrb_thresh_shift,
+                           tx_desc_wr_wb_threshold);
+}
+
+void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+                              u32 tdm_irq_moderation_en)
+{
+       aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr,
+                           tdm_int_mod_en_msk,
+                           tdm_int_mod_en_shift,
+                           tdm_irq_moderation_en);
+}
+
+/* thm */
+void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+                                      u32 lso_tcp_flag_of_first_pkt)
+{
+       aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr,
+                           thm_lso_tcp_flag_first_msk,
+                           thm_lso_tcp_flag_first_shift,
+                           lso_tcp_flag_of_first_pkt);
+}
+
+void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+                                     u32 lso_tcp_flag_of_last_pkt)
+{
+       aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr,
+                           thm_lso_tcp_flag_last_msk,
+                           thm_lso_tcp_flag_last_shift,
+                           lso_tcp_flag_of_last_pkt);
+}
+
+void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+                                       u32 lso_tcp_flag_of_middle_pkt)
+{
+       aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr,
+                           thm_lso_tcp_flag_mid_msk,
+                           thm_lso_tcp_flag_mid_shift,
+                           lso_tcp_flag_of_middle_pkt);
+}
+
+/* TPB: tx packet buffer */
+void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
+{
+       aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk,
+                           tpb_tx_buf_en_shift, tx_buff_en);
+}
+
+void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+                                        u32 tx_buff_hi_threshold_per_tc,
+                                        u32 buffer)
+{
+       aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer),
+                           tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift,
+                           tx_buff_hi_threshold_per_tc);
+}
+
+void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+                                        u32 tx_buff_lo_threshold_per_tc,
+                                        u32 buffer)
+{
+       aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer),
+                           tpb_txblo_thresh_msk, tpb_txblo_thresh_shift,
+                           tx_buff_lo_threshold_per_tc);
+}
+
+void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
+{
+       aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr,
+                           tpb_dma_sys_lbk_msk,
+                           tpb_dma_sys_lbk_shift,
+                           tx_dma_sys_lbk_en);
+}
+
+void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+                                    u32 tx_pkt_buff_size_per_tc, u32 buffer)
+{
+       aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer),
+                           tpb_txbbuf_size_msk,
+                           tpb_txbbuf_size_shift,
+                           tx_pkt_buff_size_per_tc);
+}
+
+void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
+{
+       aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr,
+                           tpb_tx_scp_ins_en_msk,
+                           tpb_tx_scp_ins_en_shift,
+                           tx_path_scp_ins_en);
+}
+
+/* TPO: tx packet offload */
+void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+                                      u32 ipv4header_crc_offload_en)
+{
+       aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr,
+                           tpo_ipv4chk_en_msk,
+                           tpo_ipv4chk_en_shift,
+                           ipv4header_crc_offload_en);
+}
+
+void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+                                   u32 tcp_udp_crc_offload_en)
+{
+       aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr,
+                           tpol4chk_en_msk,
+                           tpol4chk_en_shift,
+                           tcp_udp_crc_offload_en);
+}
+
+void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en)
+{
+       aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr,
+                           tpo_pkt_sys_lbk_msk,
+                           tpo_pkt_sys_lbk_shift,
+                           tx_pkt_sys_lbk_en);
+}
+
+/* TPS: tx packet scheduler */
+void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+                                      u32 tx_pkt_shed_data_arb_mode)
+{
+       aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr,
+                           tps_data_tc_arb_mode_msk,
+                           tps_data_tc_arb_mode_shift,
+                           tx_pkt_shed_data_arb_mode);
+}
+
+void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+                                                u32 curr_time_res)
+{
+       aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr,
+                           tps_desc_rate_ta_rst_msk,
+                           tps_desc_rate_ta_rst_shift,
+                           curr_time_res);
+}
+
+void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+                                      u32 tx_pkt_shed_desc_rate_lim)
+{
+       aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr,
+                           tps_desc_rate_lim_msk,
+                           tps_desc_rate_lim_shift,
+                           tx_pkt_shed_desc_rate_lim);
+}
+
+void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+                                         u32 tx_pkt_shed_desc_tc_arb_mode)
+{
+       aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr,
+                           tps_desc_tc_arb_mode_msk,
+                           tps_desc_tc_arb_mode_shift,
+                           tx_pkt_shed_desc_tc_arb_mode);
+}
+
+void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+                                           u32 tx_pkt_shed_desc_tc_max_credit,
+                                           u32 tc)
+{
+       aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc),
+                           tps_desc_tctcredit_max_msk,
+                           tps_desc_tctcredit_max_shift,
+                           tx_pkt_shed_desc_tc_max_credit);
+}
+
+void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+                                       u32 tx_pkt_shed_desc_tc_weight, u32 tc)
+{
+       aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc),
+                           tps_desc_tctweight_msk,
+                           tps_desc_tctweight_shift,
+                           tx_pkt_shed_desc_tc_weight);
+}
+
+void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+                                         u32 tx_pkt_shed_desc_vm_arb_mode)
+{
+       aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr,
+                           tps_desc_vm_arb_mode_msk,
+                           tps_desc_vm_arb_mode_shift,
+                           tx_pkt_shed_desc_vm_arb_mode);
+}
+
+void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+                                           u32 tx_pkt_shed_tc_data_max_credit,
+                                           u32 tc)
+{
+       aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc),
+                           tps_data_tctcredit_max_msk,
+                           tps_data_tctcredit_max_shift,
+                           tx_pkt_shed_tc_data_max_credit);
+}
+
+void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+                                       u32 tx_pkt_shed_tc_data_weight, u32 tc)
+{
+       aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc),
+                           tps_data_tctweight_msk,
+                           tps_data_tctweight_shift,
+                           tx_pkt_shed_tc_data_weight);
+}
+
+/* tx */
+void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
+{
+       aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr,
+                           tx_reg_res_dsbl_msk,
+                           tx_reg_res_dsbl_shift, tx_reg_res_dis);
+}
+
+/* msm */
+u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr,
+                                 msm_reg_access_busy_msk,
+                                 msm_reg_access_busy_shift);
+}
+
+void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+                                       u32 reg_addr_for_indirect_addr)
+{
+       aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr,
+                           msm_reg_addr_msk,
+                           msm_reg_addr_shift,
+                           reg_addr_for_indirect_addr);
+}
+
+void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
+{
+       aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr,
+                           msm_reg_rd_strobe_msk,
+                           msm_reg_rd_strobe_shift,
+                           reg_rd_strobe);
+}
+
+u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr);
+}
+
+void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
+{
+       aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data);
+}
+
+void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
+{
+       aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr,
+                           msm_reg_wr_strobe_msk,
+                           msm_reg_wr_strobe_shift,
+                           reg_wr_strobe);
+}
+
+/* pci */
+void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
+{
+       aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr,
+                           pci_reg_res_dsbl_msk,
+                           pci_reg_res_dsbl_shift,
+                           pci_reg_res_dis);
+}
+
+void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp,
+                                u32 scratch_scp)
+{
+       aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp),
+                       glb_cpu_scratch_scp);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
new file mode 100644 (file)
index 0000000..ed1085b
--- /dev/null
@@ -0,0 +1,677 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_llh.h: Declarations of bitfield and register access functions for
+ * Atlantic registers.
+ */
+
+#ifndef HW_ATL_LLH_H
+#define HW_ATL_LLH_H
+
+#include <linux/types.h>
+
+struct aq_hw_s;
+
+/* global */
+
+/* set global microprocessor semaphore */
+void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw,        u32 glb_cpu_sem,
+                        u32 semaphore);
+
+/* get global microprocessor semaphore */
+u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
+
+/* set global register reset disable */
+void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
+
+/* set soft reset */
+void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
+
+/* get soft reset */
+u32 glb_soft_res_get(struct aq_hw_s *aq_hw);
+
+/* stats */
+
+u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good octet counter lsw */
+u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good packet counter lsw */
+u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good octet counter lsw */
+u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good packet counter lsw */
+u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good octet counter msw */
+u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good packet counter msw */
+u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good octet counter msw */
+u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good packet counter msw */
+u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx errors counter register */
+u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx unicast frames counter register */
+u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx multicast frames counter register */
+u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx broadcast frames counter register */
+u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx broadcast octets counter register 1 */
+u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+
+/* get msm rx unicast octets counter register 0 */
+u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+
+/* get rx dma statistics counter 7 */
+u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
+
+/* get msm tx errors counter register */
+u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx unicast frames counter register */
+u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx multicast frames counter register */
+u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx broadcast frames counter register */
+u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx multicast octets counter register 1 */
+u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
+
+/* get msm tx broadcast octets counter register 1 */
+u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+
+/* get msm tx unicast octets counter register 0 */
+u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+
+/* get global mif identification */
+u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
+
+/* interrupt */
+
+/* set interrupt auto mask lsw */
+void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw);
+
+/* set interrupt mapping enable rx */
+void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx);
+
+/* set interrupt mapping enable tx */
+void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx);
+
+/* set interrupt mapping rx */
+void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
+
+/* set interrupt mapping tx */
+void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
+
+/* set interrupt mask clear lsw */
+void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw);
+
+/* set interrupt mask set lsw */
+void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
+
+/* set interrupt register reset disable */
+void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
+
+/* set interrupt status clear lsw */
+void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+                                u32 irq_status_clearlsw);
+
+/* get interrupt status lsw */
+u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
+
+/* get reset interrupt */
+u32 itr_res_irq_get(struct aq_hw_s *aq_hw);
+
+/* set reset interrupt */
+void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
+
+/* rdm */
+
+/* set cpu id */
+void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+
+/* set rx dca enable */
+void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
+
+/* set rx dca mode */
+void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
+
+/* set rx descriptor data buffer size */
+void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+                                   u32 rx_desc_data_buff_size,
+                                   u32 descriptor);
+
+/* set rx descriptor dca enable */
+void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+                           u32 dca);
+
+/* set rx descriptor enable */
+void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+                       u32 descriptor);
+
+/* set rx descriptor header splitting */
+void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+                                   u32 rx_desc_head_splitting,
+                                   u32 descriptor);
+
+/* get rx descriptor head pointer */
+u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+
+/* set rx descriptor length */
+void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+                        u32 descriptor);
+
+/* set rx descriptor write-back interrupt enable */
+void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+                                 u32 rx_desc_wr_wb_irq_en);
+
+/* set rx header dca enable */
+void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+                           u32 dca);
+
+/* set rx payload dca enable */
+void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca);
+
+/* set rx descriptor header buffer size */
+void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+                                   u32 rx_desc_head_buff_size,
+                                   u32 descriptor);
+
+/* set rx descriptor reset */
+void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+                        u32 descriptor);
+
+/* Set RDM Interrupt Moderation Enable */
+void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en);
+
+/* reg */
+
+/* set general interrupt mapping register */
+void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx);
+
+/* get general interrupt status register */
+u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
+
+/* set interrupt global control register */
+void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
+
+/* set interrupt throttle register */
+void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
+
+/* set rx dma descriptor base address lsw */
+void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+                                       u32 rx_dma_desc_base_addrlsw,
+                                       u32 descriptor);
+
+/* set rx dma descriptor base address msw */
+void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+                                       u32 rx_dma_desc_base_addrmsw,
+                                       u32 descriptor);
+
+/* get rx dma descriptor status register */
+u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
+
+/* set rx dma descriptor tail pointer register */
+void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+                                 u32 rx_dma_desc_tail_ptr,
+                                 u32 descriptor);
+
+/* set rx filter multicast filter mask register */
+void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+                                u32 rx_flr_mcst_flr_msk);
+
+/* set rx filter multicast filter register */
+void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+                            u32 filter);
+
+/* set rx filter rss control register 1 */
+void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+                               u32 rx_flr_rss_control1);
+
+/* Set RX Filter Control Register 2 */
+void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
+
+/* Set RX Interrupt Moderation Control Register */
+void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+                               u32 rx_intr_moderation_ctl,
+                               u32 queue);
+
+/* set tx dma debug control */
+void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl);
+
+/* set tx dma descriptor base address lsw */
+void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+                                       u32 tx_dma_desc_base_addrlsw,
+                                       u32 descriptor);
+
+/* set tx dma descriptor base address msw */
+void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+                                       u32 tx_dma_desc_base_addrmsw,
+                                       u32 descriptor);
+
+/* set tx dma descriptor tail pointer register */
+void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+                                 u32 tx_dma_desc_tail_ptr,
+                                 u32 descriptor);
+
+/* Set TX Interrupt Moderation Control Register */
+void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+                               u32 tx_intr_moderation_ctl,
+                               u32 queue);
+
+/* set global microprocessor scratch pad */
+void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+                                u32 glb_cpu_scratch_scp, u32 scratch_scp);
+
+/* rpb */
+
+/* set dma system loopback */
+void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
+
+/* set rx traffic class mode */
+void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+                                   u32 rx_traf_class_mode);
+
+/* set rx buffer enable */
+void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
+
+/* set rx buffer high threshold (per tc) */
+void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+                                        u32 rx_buff_hi_threshold_per_tc,
+                                        u32 buffer);
+
+/* set rx buffer low threshold (per tc) */
+void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+                                        u32 rx_buff_lo_threshold_per_tc,
+                                        u32 buffer);
+
+/* set rx flow control mode */
+void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
+
+/* set rx packet buffer size (per tc) */
+void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+                                    u32 rx_pkt_buff_size_per_tc,
+                                    u32 buffer);
+
+/* set rx xoff enable (per tc) */
+void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+                              u32 buffer);
+
+/* rpf */
+
+/* set l2 broadcast count threshold */
+void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+                                       u32 l2broadcast_count_threshold);
+
+/* set l2 broadcast enable */
+void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
+
+/* set l2 broadcast filter action */
+void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+                               u32 l2broadcast_flr_act);
+
+/* set l2 multicast filter enable */
+void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
+                              u32 filter);
+
+/* set l2 promiscuous mode enable */
+void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+                                 u32 l2promiscuous_mode_en);
+
+/* set l2 unicast filter action */
+void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
+                             u32 filter);
+
+/* set l2 unicast filter enable */
+void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+                        u32 filter);
+
+/* set l2 unicast destination address lsw */
+void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+                                     u32 l2unicast_dest_addresslsw,
+                                     u32 filter);
+
+/* set l2 unicast destination address msw */
+void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+                                     u32 l2unicast_dest_addressmsw,
+                                     u32 filter);
+
+/* Set L2 Accept all Multicast packets */
+void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+                                    u32 l2_accept_all_mc_packets);
+
+/* set user-priority tc mapping */
+void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+                                     u32 user_priority_tc_map, u32 tc);
+
+/* set rss key address */
+void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
+
+/* set rss key write data */
+void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
+
+/* get rss key write enable */
+u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
+
+/* set rss key write enable */
+void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
+
+/* set rss redirection table address */
+void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+                               u32 rss_redir_tbl_addr);
+
+/* set rss redirection table write data */
+void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+                                  u32 rss_redir_tbl_wr_data);
+
+/* get rss redirection write enable */
+u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
+
+/* set rss redirection write enable */
+void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
+
+/* set tpo to rpf system loopback */
+void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+                               u32 tpo_to_rpf_sys_lbk);
+
+/* set vlan inner ethertype */
+void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
+
+/* set vlan outer ethertype */
+void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
+
+/* set vlan promiscuous mode enable */
+void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en);
+
+/* Set VLAN untagged action */
+void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act);
+
+/* Set VLAN accept untagged packets */
+void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+                                         u32 vlan_accept_untagged_packets);
+
+/* Set VLAN filter enable */
+void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter);
+
+/* Set VLAN Filter Action */
+void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
+                         u32 filter);
+
+/* Set VLAN ID Filter */
+void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter);
+
+/* set ethertype filter enable */
+void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter);
+
+/* set  ethertype user-priority enable */
+void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+                                  u32 etht_user_priority_en, u32 filter);
+
+/* set  ethertype rx queue enable */
+void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
+                             u32 filter);
+
+/* set ethertype rx queue */
+void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+                          u32 filter);
+
+/* set ethertype user-priority */
+void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
+                               u32 filter);
+
+/* set ethertype management queue */
+void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+                           u32 filter);
+
+/* set ethertype filter action */
+void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+                         u32 filter);
+
+/* set ethertype filter */
+void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+
+/* rpo */
+
+/* set ipv4 header checksum offload enable */
+void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+                                      u32 ipv4header_crc_offload_en);
+
+/* set rx descriptor vlan stripping */
+void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+                                   u32 rx_desc_vlan_stripping,
+                                   u32 descriptor);
+
+/* set tcp/udp checksum offload enable */
+void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+                                   u32 tcp_udp_crc_offload_en);
+
+/* Set LRO Patch Optimization Enable. */
+void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+                                      u32 lro_patch_optimization_en);
+
+/* Set Large Receive Offload Enable */
+void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
+
+/* Set LRO Q Sessions Limit */
+void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim);
+
+/* Set LRO Total Descriptor Limit */
+void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim);
+
+/* Set LRO Min Payload of First Packet */
+void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+                                     u32 lro_min_pld_of_first_pkt);
+
+/* Set LRO Packet Limit */
+void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
+
+/* Set LRO Max Number of Descriptors */
+void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+                                       u32 lro_max_desc_num, u32 lro);
+
+/* Set LRO Time Base Divider */
+void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+                                  u32 lro_time_base_divider);
+
+/*Set LRO Inactive Interval */
+void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+                                  u32 lro_inactive_interval);
+
+/*Set LRO Max Coalescing Interval */
+void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+                                        u32 lro_max_coalescing_interval);
+
+/* rx */
+
+/* set rx register reset disable */
+void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
+
+/* tdm */
+
+/* set cpu id */
+void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+
+/* set large send offload enable */
+void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+                                  u32 large_send_offload_en);
+
+/* set tx descriptor enable */
+void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor);
+
+/* set tx dca enable */
+void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
+
+/* set tx dca mode */
+void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
+
+/* set tx descriptor dca enable */
+void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca);
+
+/* get tx descriptor head pointer */
+u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+
+/* set tx descriptor length */
+void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+                        u32 descriptor);
+
+/* set tx descriptor write-back interrupt enable */
+void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+                                 u32 tx_desc_wr_wb_irq_en);
+
+/* set tx descriptor write-back threshold */
+void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+                                    u32 tx_desc_wr_wb_threshold,
+                                    u32 descriptor);
+
+/* Set TDM Interrupt Moderation Enable */
+void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+                              u32 tdm_irq_moderation_en);
+/* thm */
+
+/* set lso tcp flag of first packet */
+void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+                                      u32 lso_tcp_flag_of_first_pkt);
+
+/* set lso tcp flag of last packet */
+void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+                                     u32 lso_tcp_flag_of_last_pkt);
+
+/* set lso tcp flag of middle packet */
+void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+                                       u32 lso_tcp_flag_of_middle_pkt);
+
+/* tpb */
+
+/* set tx buffer enable */
+void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
+
+/* set tx buffer high threshold (per tc) */
+void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+                                        u32 tx_buff_hi_threshold_per_tc,
+                                        u32 buffer);
+
+/* set tx buffer low threshold (per tc) */
+void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+                                        u32 tx_buff_lo_threshold_per_tc,
+                                        u32 buffer);
+
+/* set tx dma system loopback enable */
+void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
+
+/* set tx packet buffer size (per tc) */
+void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+                                    u32 tx_pkt_buff_size_per_tc, u32 buffer);
+
+/* set tx path pad insert enable */
+void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
+
+/* tpo */
+
+/* set ipv4 header checksum offload enable */
+void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+                                      u32 ipv4header_crc_offload_en);
+
+/* set tcp/udp checksum offload enable */
+void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+                                   u32 tcp_udp_crc_offload_en);
+
+/* set tx pkt system loopback enable */
+void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en);
+
+/* tps */
+
+/* set tx packet scheduler data arbitration mode */
+void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+                                      u32 tx_pkt_shed_data_arb_mode);
+
+/* set tx packet scheduler descriptor rate current time reset */
+void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+                                                u32 curr_time_res);
+
+/* set tx packet scheduler descriptor rate limit */
+void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+                                      u32 tx_pkt_shed_desc_rate_lim);
+
+/* set tx packet scheduler descriptor tc arbitration mode */
+void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+                                         u32 tx_pkt_shed_desc_tc_arb_mode);
+
+/* set tx packet scheduler descriptor tc max credit */
+void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+                                           u32 tx_pkt_shed_desc_tc_max_credit,
+                                           u32 tc);
+
+/* set tx packet scheduler descriptor tc weight */
+void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+                                       u32 tx_pkt_shed_desc_tc_weight,
+                                       u32 tc);
+
+/* set tx packet scheduler descriptor vm arbitration mode */
+void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+                                         u32 tx_pkt_shed_desc_vm_arb_mode);
+
+/* set tx packet scheduler tc data max credit */
+void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+                                           u32 tx_pkt_shed_tc_data_max_credit,
+                                           u32 tc);
+
+/* set tx packet scheduler tc data weight */
+void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+                                       u32 tx_pkt_shed_tc_data_weight,
+                                       u32 tc);
+
+/* tx */
+
+/* set tx register reset disable */
+void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
+
+/* msm */
+
+/* get register access status */
+u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw);
+
+/* set  register address for indirect address */
+void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+                                       u32 reg_addr_for_indirect_addr);
+
+/* set register read strobe */
+void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
+
+/* get  register read data */
+u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
+
+/* set  register write data */
+void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
+
+/* set register write strobe */
+void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
+
+/* pci */
+
+/* set pci register reset disable */
+void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
+
+#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
new file mode 100644 (file)
index 0000000..5527fc0
--- /dev/null
@@ -0,0 +1,2375 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_llh_internal.h: Preprocessor definitions
+ * for Atlantic registers.
+ */
+
+#ifndef HW_ATL_LLH_INTERNAL_H
+#define HW_ATL_LLH_INTERNAL_H
+
+/* global microprocessor semaphore  definitions
+ * base address: 0x000003a0
+ * parameter: semaphore {s} | stride size 0x4 | range [0, 15]
+ */
+#define glb_cpu_sem_adr(semaphore)  (0x000003a0u + (semaphore) * 0x4)
+/* register address for bitfield rx dma good octet counter lsw [1f:0] */
+#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808
+/* register address for bitfield rx dma good packet counter lsw [1f:0] */
+#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800
+/* register address for bitfield tx dma good octet counter lsw [1f:0] */
+#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808
+/* register address for bitfield tx dma good packet counter lsw [1f:0] */
+#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800
+
+/* register address for bitfield rx dma good octet counter msw [3f:20] */
+#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c
+/* register address for bitfield rx dma good packet counter msw [3f:20] */
+#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804
+/* register address for bitfield tx dma good octet counter msw [3f:20] */
+#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c
+/* register address for bitfield tx dma good packet counter msw [3f:20] */
+#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804
+
+/* preprocessor definitions for msm rx errors counter register */
+#define mac_msm_rx_errs_cnt_adr 0x00000120u
+
+/* preprocessor definitions for msm rx unicast frames counter register */
+#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u
+
+/* preprocessor definitions for msm rx multicast frames counter register */
+#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u
+
+/* preprocessor definitions for msm rx broadcast frames counter register */
+#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u
+
+/* preprocessor definitions for msm rx broadcast octets counter register 1 */
+#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u
+
+/* preprocessor definitions for msm rx broadcast octets counter register 2 */
+#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u
+
+/* preprocessor definitions for msm rx unicast octets counter register 0 */
+#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u
+
+/* preprocessor definitions for rx dma statistics counter 7 */
+#define rx_dma_stat_counter7_adr 0x00006818u
+
+/* preprocessor definitions for msm tx unicast frames counter register */
+#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u
+
+/* preprocessor definitions for msm tx multicast frames counter register */
+#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u
+
+/* preprocessor definitions for global mif identification */
+#define glb_mif_id_adr 0x0000001cu
+
+/* register address for bitfield iamr_lsw[1f:0] */
+#define itr_iamrlsw_adr 0x00002090
+/* register address for bitfield rx dma drop packet counter [1f:0] */
+#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818
+
+/* register address for bitfield imcr_lsw[1f:0] */
+#define itr_imcrlsw_adr 0x00002070
+/* register address for bitfield imsr_lsw[1f:0] */
+#define itr_imsrlsw_adr 0x00002060
+/* register address for bitfield itr_reg_res_dsbl */
+#define itr_reg_res_dsbl_adr 0x00002300
+/* bitmask for bitfield itr_reg_res_dsbl */
+#define itr_reg_res_dsbl_msk 0x20000000
+/* lower bit position of bitfield itr_reg_res_dsbl */
+#define itr_reg_res_dsbl_shift 29
+/* register address for bitfield iscr_lsw[1f:0] */
+#define itr_iscrlsw_adr 0x00002050
+/* register address for bitfield isr_lsw[1f:0] */
+#define itr_isrlsw_adr 0x00002000
+/* register address for bitfield itr_reset */
+#define itr_res_adr 0x00002300
+/* bitmask for bitfield itr_reset */
+#define itr_res_msk 0x80000000
+/* lower bit position of bitfield itr_reset */
+#define itr_res_shift 31
+/* register address for bitfield dca{d}_cpuid[7:0] */
+#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_cpuid[7:0] */
+#define rdm_dcadcpuid_msk 0x000000ff
+/* lower bit position of bitfield dca{d}_cpuid[7:0] */
+#define rdm_dcadcpuid_shift 0
+/* register address for bitfield dca_en */
+#define rdm_dca_en_adr 0x00006180
+
+/* rx dca_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca_en".
+ * port="pif_rdm_dca_en_i"
+ */
+
+/* register address for bitfield dca_en */
+#define rdm_dca_en_adr 0x00006180
+/* bitmask for bitfield dca_en */
+#define rdm_dca_en_msk 0x80000000
+/* inverted bitmask for bitfield dca_en */
+#define rdm_dca_en_mskn 0x7fffffff
+/* lower bit position of bitfield dca_en */
+#define rdm_dca_en_shift 31
+/* width of bitfield dca_en */
+#define rdm_dca_en_width 1
+/* default value of bitfield dca_en */
+#define rdm_dca_en_default 0x1
+
+/* rx dca_mode[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "dca_mode[3:0]".
+ * port="pif_rdm_dca_mode_i[3:0]"
+ */
+
+/* register address for bitfield dca_mode[3:0] */
+#define rdm_dca_mode_adr 0x00006180
+/* bitmask for bitfield dca_mode[3:0] */
+#define rdm_dca_mode_msk 0x0000000f
+/* inverted bitmask for bitfield dca_mode[3:0] */
+#define rdm_dca_mode_mskn 0xfffffff0
+/* lower bit position of bitfield dca_mode[3:0] */
+#define rdm_dca_mode_shift 0
+/* width of bitfield dca_mode[3:0] */
+#define rdm_dca_mode_width 4
+/* default value of bitfield dca_mode[3:0] */
+#define rdm_dca_mode_default 0x0
+
+/* rx desc{d}_data_size[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_data_size[4:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc0_data_size_i[4:0]"
+ */
+
+/* register address for bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_msk 0x0000001f
+/* inverted bitmask for bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_mskn 0xffffffe0
+/* lower bit position of bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_shift 0
+/* width of bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_width 5
+/* default value of bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_default 0x0
+
+/* rx dca{d}_desc_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_desc_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_rdm_dca_desc_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_msk 0x80000000
+/* inverted bitmask for bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_mskn 0x7fffffff
+/* lower bit position of bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_shift 31
+/* width of bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_width 1
+/* default value of bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_default 0x0
+
+/* rx desc{d}_en bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_en".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc_en_i[0]"
+ */
+
+/* register address for bitfield desc{d}_en */
+#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_en */
+#define rdm_descden_msk 0x80000000
+/* inverted bitmask for bitfield desc{d}_en */
+#define rdm_descden_mskn 0x7fffffff
+/* lower bit position of bitfield desc{d}_en */
+#define rdm_descden_shift 31
+/* width of bitfield desc{d}_en */
+#define rdm_descden_width 1
+/* default value of bitfield desc{d}_en */
+#define rdm_descden_default 0x0
+
+/* rx desc{d}_hdr_size[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc0_hdr_size_i[4:0]"
+ */
+
+/* register address for bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_msk 0x00001f00
+/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_mskn 0xffffe0ff
+/* lower bit position of bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_shift 8
+/* width of bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_width 5
+/* default value of bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_default 0x0
+
+/* rx desc{d}_hdr_split bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hdr_split".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc_hdr_split_i[0]"
+ */
+
+/* register address for bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_msk 0x10000000
+/* inverted bitmask for bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_mskn 0xefffffff
+/* lower bit position of bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_shift 28
+/* width of bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_width 1
+/* default value of bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_default 0x0
+
+/* rx desc{d}_hd[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="rdm_pif_desc0_hd_o[12:0]"
+ */
+
+/* register address for bitfield desc{d}_hd[c:0] */
+#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_hd[c:0] */
+#define rdm_descdhd_msk 0x00001fff
+/* inverted bitmask for bitfield desc{d}_hd[c:0] */
+#define rdm_descdhd_mskn 0xffffe000
+/* lower bit position of bitfield desc{d}_hd[c:0] */
+#define rdm_descdhd_shift 0
+/* width of bitfield desc{d}_hd[c:0] */
+#define rdm_descdhd_width 13
+
+/* rx desc{d}_len[9:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_len[9:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc0_len_i[9:0]"
+ */
+
+/* register address for bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_msk 0x00001ff8
+/* inverted bitmask for bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_mskn 0xffffe007
+/* lower bit position of bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_shift 3
+/* width of bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_width 10
+/* default value of bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_default 0x0
+
+/* rx desc{d}_reset bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_reset".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_q_pf_res_i[0]"
+ */
+
+/* register address for bitfield desc{d}_reset */
+#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_reset */
+#define rdm_descdreset_msk 0x02000000
+/* inverted bitmask for bitfield desc{d}_reset */
+#define rdm_descdreset_mskn 0xfdffffff
+/* lower bit position of bitfield desc{d}_reset */
+#define rdm_descdreset_shift 25
+/* width of bitfield desc{d}_reset */
+#define rdm_descdreset_width 1
+/* default value of bitfield desc{d}_reset */
+#define rdm_descdreset_default 0x0
+
+/* rx int_desc_wrb_en bitfield definitions
+ * preprocessor definitions for the bitfield "int_desc_wrb_en".
+ * port="pif_rdm_int_desc_wrb_en_i"
+ */
+
+/* register address for bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_adr 0x00005a30
+/* bitmask for bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_msk 0x00000004
+/* inverted bitmask for bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_mskn 0xfffffffb
+/* lower bit position of bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_shift 2
+/* width of bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_width 1
+/* default value of bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_default 0x0
+
+/* rx dca{d}_hdr_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_hdr_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_rdm_dca_hdr_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_msk 0x40000000
+/* inverted bitmask for bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_mskn 0xbfffffff
+/* lower bit position of bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_shift 30
+/* width of bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_width 1
+/* default value of bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_default 0x0
+
+/* rx dca{d}_pay_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_pay_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_rdm_dca_pay_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_msk 0x20000000
+/* inverted bitmask for bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_mskn 0xdfffffff
+/* lower bit position of bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_shift 29
+/* width of bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_width 1
+/* default value of bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_default 0x0
+
+/* RX rdm_int_rim_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "rdm_int_rim_en".
+ * PORT="pif_rdm_int_rim_en_i"
+ */
+
+/* Register address for bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_adr 0x00005A30
+/* Bitmask for bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_msk 0x00000008
+/* Inverted bitmask for bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_mskn 0xFFFFFFF7
+/* Lower bit position of bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_shift 3
+/* Width of bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_width 1
+/* Default value of bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_default 0x0
+
+/* general interrupt mapping register definitions
+ * preprocessor definitions for general interrupt mapping register
+ * base address: 0x00002180
+ * parameter: regidx {f} | stride size 0x4 | range [0, 3]
+ */
+#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4)
+
+/* general interrupt status register definitions
+ * preprocessor definitions for general interrupt status register
+ * address: 0x000021A0
+ */
+
+#define gen_intr_stat_adr 0x000021A4U
+
+/* interrupt global control register  definitions
+ * preprocessor definitions for interrupt global control register
+ * address: 0x00002300
+ */
+#define intr_glb_ctl_adr 0x00002300u
+
+/* interrupt throttle register definitions
+ * preprocessor definitions for interrupt throttle register
+ * base address: 0x00002800
+ * parameter: throttle {t} | stride size 0x4 | range [0, 31]
+ */
+#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4)
+
+/* rx dma descriptor base address lsw definitions
+ * preprocessor definitions for rx dma descriptor base address lsw
+ * base address: 0x00005b00
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define rx_dma_desc_base_addrlsw_adr(descriptor) \
+(0x00005b00u + (descriptor) * 0x20)
+
+/* rx dma descriptor base address msw definitions
+ * preprocessor definitions for rx dma descriptor base address msw
+ * base address: 0x00005b04
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define rx_dma_desc_base_addrmsw_adr(descriptor) \
+(0x00005b04u + (descriptor) * 0x20)
+
+/* rx dma descriptor status register definitions
+ * preprocessor definitions for rx dma descriptor status register
+ * base address: 0x00005b14
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20)
+
+/* rx dma descriptor tail pointer register definitions
+ * preprocessor definitions for rx dma descriptor tail pointer register
+ * base address: 0x00005b10
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20)
+
+/* rx interrupt moderation control register definitions
+ * Preprocessor definitions for RX Interrupt Moderation Control Register
+ * Base Address: 0x00005A40
+ * Parameter: RIM {R} | stride size 0x4 | range [0, 31]
+ */
+#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4)
+
+/* rx filter multicast filter mask register definitions
+ * preprocessor definitions for rx filter multicast filter mask register
+ * address: 0x00005270
+ */
+#define rx_flr_mcst_flr_msk_adr 0x00005270u
+
+/* rx filter multicast filter register definitions
+ * preprocessor definitions for rx filter multicast filter register
+ * base address: 0x00005250
+ * parameter: filter {f} | stride size 0x4 | range [0, 7]
+ */
+#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4)
+
+/* RX Filter RSS Control Register 1 Definitions
+ * Preprocessor definitions for RX Filter RSS Control Register 1
+ * Address: 0x000054C0
+ */
+#define rx_flr_rss_control1_adr 0x000054C0u
+
+/* RX Filter Control Register 2 Definitions
+ * Preprocessor definitions for RX Filter Control Register 2
+ * Address: 0x00005104
+ */
+#define rx_flr_control2_adr 0x00005104u
+
+/* tx tx dma debug control [1f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx dma debug control [1f:0]".
+ * port="pif_tdm_debug_cntl_i[31:0]"
+ */
+
+/* register address for bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_adr 0x00008920
+/* bitmask for bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_msk 0xffffffff
+/* inverted bitmask for bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_mskn 0x00000000
+/* lower bit position of bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_shift 0
+/* width of bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_width 32
+/* default value of bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_default 0x0
+
+/* tx dma descriptor base address lsw definitions
+ * preprocessor definitions for tx dma descriptor base address lsw
+ * base address: 0x00007c00
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ */
+#define tx_dma_desc_base_addrlsw_adr(descriptor) \
+       (0x00007c00u + (descriptor) * 0x40)
+
+/* tx dma descriptor tail pointer register definitions
+ * preprocessor definitions for tx dma descriptor tail pointer register
+ * base address: 0x00007c10
+ *  parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ */
+#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40)
+
+/* rx dma_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_sys_loopback".
+ * port="pif_rpb_dma_sys_lbk_i"
+ */
+
+/* register address for bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_adr 0x00005000
+/* bitmask for bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_msk 0x00000040
+/* inverted bitmask for bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_mskn 0xffffffbf
+/* lower bit position of bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_shift 6
+/* width of bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_width 1
+/* default value of bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_default 0x0
+
+/* rx rx_tc_mode bitfield definitions
+ * preprocessor definitions for the bitfield "rx_tc_mode".
+ * port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i"
+ */
+
+/* register address for bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_adr 0x00005700
+/* bitmask for bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_msk 0x00000100
+/* inverted bitmask for bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff
+/* lower bit position of bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_shift 8
+/* width of bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_width 1
+/* default value of bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_default 0x0
+
+/* rx rx_buf_en bitfield definitions
+ * preprocessor definitions for the bitfield "rx_buf_en".
+ * port="pif_rpb_rx_buf_en_i"
+ */
+
+/* register address for bitfield rx_buf_en */
+#define rpb_rx_buf_en_adr 0x00005700
+/* bitmask for bitfield rx_buf_en */
+#define rpb_rx_buf_en_msk 0x00000001
+/* inverted bitmask for bitfield rx_buf_en */
+#define rpb_rx_buf_en_mskn 0xfffffffe
+/* lower bit position of bitfield rx_buf_en */
+#define rpb_rx_buf_en_shift 0
+/* width of bitfield rx_buf_en */
+#define rpb_rx_buf_en_width 1
+/* default value of bitfield rx_buf_en */
+#define rpb_rx_buf_en_default 0x0
+
+/* rx rx{b}_hi_thresh[d:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx0_hi_thresh_i[13:0]"
+ */
+
+/* register address for bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_msk 0x3fff0000
+/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_mskn 0xc000ffff
+/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_shift 16
+/* width of bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_width 14
+/* default value of bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_default 0x0
+
+/* rx rx{b}_lo_thresh[d:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx0_lo_thresh_i[13:0]"
+ */
+
+/* register address for bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_msk 0x00003fff
+/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_mskn 0xffffc000
+/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_shift 0
+/* width of bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_width 14
+/* default value of bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_default 0x0
+
+/* rx rx_fc_mode[1:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx_fc_mode[1:0]".
+ * port="pif_rpb_rx_fc_mode_i[1:0]"
+ */
+
+/* register address for bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_adr 0x00005700
+/* bitmask for bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_msk 0x00000030
+/* inverted bitmask for bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_mskn 0xffffffcf
+/* lower bit position of bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_shift 4
+/* width of bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_width 2
+/* default value of bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_default 0x0
+
+/* rx rx{b}_buf_size[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx0_buf_size_i[8:0]"
+ */
+
+/* register address for bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_msk 0x000001ff
+/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_mskn 0xfffffe00
+/* lower bit position of bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_shift 0
+/* width of bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_width 9
+/* default value of bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_default 0x0
+
+/* rx rx{b}_xoff_en bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_xoff_en".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx_xoff_en_i[0]"
+ */
+
+/* register address for bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_msk 0x80000000
+/* inverted bitmask for bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_mskn 0x7fffffff
+/* lower bit position of bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_shift 31
+/* width of bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_width 1
+/* default value of bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_default 0x0
+
+/* rx l2_bc_thresh[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_bc_thresh[f:0]".
+ * port="pif_rpf_l2_bc_thresh_i[15:0]"
+ */
+
+/* register address for bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_adr 0x00005100
+/* bitmask for bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_msk 0xffff0000
+/* inverted bitmask for bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_mskn 0x0000ffff
+/* lower bit position of bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_shift 16
+/* width of bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_width 16
+/* default value of bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_default 0x0
+
+/* rx l2_bc_en bitfield definitions
+ * preprocessor definitions for the bitfield "l2_bc_en".
+ * port="pif_rpf_l2_bc_en_i"
+ */
+
+/* register address for bitfield l2_bc_en */
+#define rpfl2bc_en_adr 0x00005100
+/* bitmask for bitfield l2_bc_en */
+#define rpfl2bc_en_msk 0x00000001
+/* inverted bitmask for bitfield l2_bc_en */
+#define rpfl2bc_en_mskn 0xfffffffe
+/* lower bit position of bitfield l2_bc_en */
+#define rpfl2bc_en_shift 0
+/* width of bitfield l2_bc_en */
+#define rpfl2bc_en_width 1
+/* default value of bitfield l2_bc_en */
+#define rpfl2bc_en_default 0x0
+
+/* rx l2_bc_act[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_bc_act[2:0]".
+ * port="pif_rpf_l2_bc_act_i[2:0]"
+ */
+
+/* register address for bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_adr 0x00005100
+/* bitmask for bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_msk 0x00007000
+/* inverted bitmask for bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_mskn 0xffff8fff
+/* lower bit position of bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_shift 12
+/* width of bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_width 3
+/* default value of bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_default 0x0
+
+/* rx l2_mc_en{f} bitfield definitions
+ * preprocessor definitions for the bitfield "l2_mc_en{f}".
+ * parameter: filter {f} | stride size 0x4 | range [0, 7]
+ * port="pif_rpf_l2_mc_en_i[0]"
+ */
+
+/* register address for bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4)
+/* bitmask for bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_msk 0x80000000
+/* inverted bitmask for bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_mskn 0x7fffffff
+/* lower bit position of bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_shift 31
+/* width of bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_width 1
+/* default value of bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_default 0x0
+
+/* rx l2_promis_mode bitfield definitions
+ * preprocessor definitions for the bitfield "l2_promis_mode".
+ * port="pif_rpf_l2_promis_mode_i"
+ */
+
+/* register address for bitfield l2_promis_mode */
+#define rpfl2promis_mode_adr 0x00005100
+/* bitmask for bitfield l2_promis_mode */
+#define rpfl2promis_mode_msk 0x00000008
+/* inverted bitmask for bitfield l2_promis_mode */
+#define rpfl2promis_mode_mskn 0xfffffff7
+/* lower bit position of bitfield l2_promis_mode */
+#define rpfl2promis_mode_shift 3
+/* width of bitfield l2_promis_mode */
+#define rpfl2promis_mode_width 1
+/* default value of bitfield l2_promis_mode */
+#define rpfl2promis_mode_default 0x0
+
+/* rx l2_uc_act{f}[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]".
+ * parameter: filter {f} | stride size 0x8 | range [0, 37]
+ * port="pif_rpf_l2_uc_act0_i[2:0]"
+ */
+
+/* register address for bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_msk 0x00070000
+/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_mskn 0xfff8ffff
+/* lower bit position of bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_shift 16
+/* width of bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_width 3
+/* default value of bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_default 0x0
+
+/* rx l2_uc_en{f} bitfield definitions
+ * preprocessor definitions for the bitfield "l2_uc_en{f}".
+ * parameter: filter {f} | stride size 0x8 | range [0, 37]
+ * port="pif_rpf_l2_uc_en_i[0]"
+ */
+
+/* register address for bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_msk 0x80000000
+/* inverted bitmask for bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_mskn 0x7fffffff
+/* lower bit position of bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_shift 31
+/* width of bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_width 1
+/* default value of bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_default 0x0
+
+/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */
+#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8)
+/* register address for bitfield l2_uc_da{f}_msw[f:0] */
+#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */
+#define rpfl2uc_dafmsw_msk 0x0000ffff
+/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */
+#define rpfl2uc_dafmsw_shift 0
+
+/* rx l2_mc_accept_all bitfield definitions
+ * Preprocessor definitions for the bitfield "l2_mc_accept_all".
+ * PORT="pif_rpf_l2_mc_all_accept_i"
+ */
+
+/* Register address for bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_adr 0x00005270
+/* Bitmask for bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_msk 0x00004000
+/* Inverted bitmask for bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_mskn 0xFFFFBFFF
+/* Lower bit position of bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_shift 14
+/* Width of bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_width 1
+/* Default value of bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_default 0x0
+
+/* width of bitfield rx_tc_up{t}[2:0] */
+#define rpf_rpb_rx_tc_upt_width 3
+/* default value of bitfield rx_tc_up{t}[2:0] */
+#define rpf_rpb_rx_tc_upt_default 0x0
+
+/* rx rss_key_addr[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_key_addr[4:0]".
+ * port="pif_rpf_rss_key_addr_i[4:0]"
+ */
+
+/* register address for bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_adr 0x000054d0
+/* bitmask for bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_msk 0x0000001f
+/* inverted bitmask for bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_mskn 0xffffffe0
+/* lower bit position of bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_shift 0
+/* width of bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_width 5
+/* default value of bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_default 0x0
+
+/* rx rss_key_wr_data[1f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]".
+ * port="pif_rpf_rss_key_wr_data_i[31:0]"
+ */
+
+/* register address for bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_adr 0x000054d4
+/* bitmask for bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_msk 0xffffffff
+/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_mskn 0x00000000
+/* lower bit position of bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_shift 0
+/* width of bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_width 32
+/* default value of bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_default 0x0
+
+/* rx rss_key_wr_en_i bitfield definitions
+ * preprocessor definitions for the bitfield "rss_key_wr_en_i".
+ * port="pif_rpf_rss_key_wr_en_i"
+ */
+
+/* register address for bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_adr 0x000054d0
+/* bitmask for bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_msk 0x00000020
+/* inverted bitmask for bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_mskn 0xffffffdf
+/* lower bit position of bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_shift 5
+/* width of bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_width 1
+/* default value of bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_default 0x0
+
+/* rx rss_redir_addr[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_redir_addr[3:0]".
+ * port="pif_rpf_rss_redir_addr_i[3:0]"
+ */
+
+/* register address for bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_adr 0x000054e0
+/* bitmask for bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_msk 0x0000000f
+/* inverted bitmask for bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_mskn 0xfffffff0
+/* lower bit position of bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_shift 0
+/* width of bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_width 4
+/* default value of bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_default 0x0
+
+/* rx rss_redir_wr_data[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]".
+ * port="pif_rpf_rss_redir_wr_data_i[15:0]"
+ */
+
+/* register address for bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_adr 0x000054e4
+/* bitmask for bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_msk 0x0000ffff
+/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_mskn 0xffff0000
+/* lower bit position of bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_shift 0
+/* width of bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_width 16
+/* default value of bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_default 0x0
+
+/* rx rss_redir_wr_en_i bitfield definitions
+ * preprocessor definitions for the bitfield "rss_redir_wr_en_i".
+ * port="pif_rpf_rss_redir_wr_en_i"
+ */
+
+/* register address for bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_adr 0x000054e0
+/* bitmask for bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_msk 0x00000010
+/* inverted bitmask for bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_mskn 0xffffffef
+/* lower bit position of bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_shift 4
+/* width of bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_width 1
+/* default value of bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_default 0x0
+
+/* rx tpo_rpf_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "tpo_rpf_sys_loopback".
+ * port="pif_rpf_tpo_pkt_sys_lbk_i"
+ */
+
+/* register address for bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_adr 0x00005000
+/* bitmask for bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_msk 0x00000100
+/* inverted bitmask for bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff
+/* lower bit position of bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_shift 8
+/* width of bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_width 1
+/* default value of bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_default 0x0
+
+/* rx vl_inner_tpid[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "vl_inner_tpid[f:0]".
+ * port="pif_rpf_vl_inner_tpid_i[15:0]"
+ */
+
+/* register address for bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_adr 0x00005284
+/* bitmask for bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_msk 0x0000ffff
+/* inverted bitmask for bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_mskn 0xffff0000
+/* lower bit position of bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_shift 0
+/* width of bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_width 16
+/* default value of bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_default 0x8100
+
+/* rx vl_outer_tpid[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "vl_outer_tpid[f:0]".
+ * port="pif_rpf_vl_outer_tpid_i[15:0]"
+ */
+
+/* register address for bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_adr 0x00005284
+/* bitmask for bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_msk 0xffff0000
+/* inverted bitmask for bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_mskn 0x0000ffff
+/* lower bit position of bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_shift 16
+/* width of bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_width 16
+/* default value of bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_default 0x88a8
+
+/* rx vl_promis_mode bitfield definitions
+ * preprocessor definitions for the bitfield "vl_promis_mode".
+ * port="pif_rpf_vl_promis_mode_i"
+ */
+
+/* register address for bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_adr 0x00005280
+/* bitmask for bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_msk 0x00000002
+/* inverted bitmask for bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_mskn 0xfffffffd
+/* lower bit position of bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_shift 1
+/* width of bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_width 1
+/* default value of bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_default 0x0
+
+/* RX vl_accept_untagged_mode Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_accept_untagged_mode".
+ * PORT="pif_rpf_vl_accept_untagged_i"
+ */
+
+/* Register address for bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_adr 0x00005280
+/* Bitmask for bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_msk 0x00000004
+/* Inverted bitmask for bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB
+/* Lower bit position of bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_shift 2
+/* Width of bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_width 1
+/* Default value of bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_default 0x0
+
+/* rX vl_untagged_act[2:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_untagged_act[2:0]".
+ * PORT="pif_rpf_vl_untagged_act_i[2:0]"
+ */
+
+/* Register address for bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_adr 0x00005280
+/* Bitmask for bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_msk 0x00000038
+/* Inverted bitmask for bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_mskn 0xFFFFFFC7
+/* Lower bit position of bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_shift 3
+/* Width of bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_width 3
+/* Default value of bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_default 0x0
+
+/* RX vl_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_en{F}".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_en_i[0]"
+ */
+
+/* Register address for bitfield vl_en{F} */
+#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_en{F} */
+#define rpf_vl_en_f_msk 0x80000000
+/* Inverted bitmask for bitfield vl_en{F} */
+#define rpf_vl_en_f_mskn 0x7FFFFFFF
+/* Lower bit position of bitfield vl_en{F} */
+#define rpf_vl_en_f_shift 31
+/* Width of bitfield vl_en{F} */
+#define rpf_vl_en_f_width 1
+/* Default value of bitfield vl_en{F} */
+#define rpf_vl_en_f_default 0x0
+
+/* RX vl_act{F}[2:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_act{F}[2:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_act0_i[2:0]"
+ */
+
+/* Register address for bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_msk 0x00070000
+/* Inverted bitmask for bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_mskn 0xFFF8FFFF
+/* Lower bit position of bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_shift 16
+/* Width of bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_width 3
+/* Default value of bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_default 0x0
+
+/* RX vl_id{F}[B:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_id{F}[B:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_id0_i[11:0]"
+ */
+
+/* Register address for bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_msk 0x00000FFF
+/* Inverted bitmask for bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_mskn 0xFFFFF000
+/* Lower bit position of bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_shift 0
+/* Width of bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_width 12
+/* Default value of bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_default 0x0
+
+/* RX et_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "et_en{F}".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_et_en_i[0]"
+ */
+
+/* Register address for bitfield et_en{F} */
+#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4)
+/* Bitmask for bitfield et_en{F} */
+#define rpf_et_en_f_msk 0x80000000
+/* Inverted bitmask for bitfield et_en{F} */
+#define rpf_et_en_f_mskn 0x7FFFFFFF
+/* Lower bit position of bitfield et_en{F} */
+#define rpf_et_en_f_shift 31
+/* Width of bitfield et_en{F} */
+#define rpf_et_en_f_width 1
+/* Default value of bitfield et_en{F} */
+#define rpf_et_en_f_default 0x0
+
+/* rx et_en{f} bitfield definitions
+ * preprocessor definitions for the bitfield "et_en{f}".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_en_i[0]"
+ */
+
+/* register address for bitfield et_en{f} */
+#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_en{f} */
+#define rpf_et_enf_msk 0x80000000
+/* inverted bitmask for bitfield et_en{f} */
+#define rpf_et_enf_mskn 0x7fffffff
+/* lower bit position of bitfield et_en{f} */
+#define rpf_et_enf_shift 31
+/* width of bitfield et_en{f} */
+#define rpf_et_enf_width 1
+/* default value of bitfield et_en{f} */
+#define rpf_et_enf_default 0x0
+
+/* rx et_up{f}_en bitfield definitions
+ * preprocessor definitions for the bitfield "et_up{f}_en".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_up_en_i[0]"
+ */
+
+/* register address for bitfield et_up{f}_en */
+#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_up{f}_en */
+#define rpf_et_upfen_msk 0x40000000
+/* inverted bitmask for bitfield et_up{f}_en */
+#define rpf_et_upfen_mskn 0xbfffffff
+/* lower bit position of bitfield et_up{f}_en */
+#define rpf_et_upfen_shift 30
+/* width of bitfield et_up{f}_en */
+#define rpf_et_upfen_width 1
+/* default value of bitfield et_up{f}_en */
+#define rpf_et_upfen_default 0x0
+
+/* rx et_rxq{f}_en bitfield definitions
+ * preprocessor definitions for the bitfield "et_rxq{f}_en".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_rxq_en_i[0]"
+ */
+
+/* register address for bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_msk 0x20000000
+/* inverted bitmask for bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_mskn 0xdfffffff
+/* lower bit position of bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_shift 29
+/* width of bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_width 1
+/* default value of bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_default 0x0
+
+/* rx et_up{f}[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_up{f}[2:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_up0_i[2:0]"
+ */
+
+/* register address for bitfield et_up{f}[2:0] */
+#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_up{f}[2:0] */
+#define rpf_et_upf_msk 0x1c000000
+/* inverted bitmask for bitfield et_up{f}[2:0] */
+#define rpf_et_upf_mskn 0xe3ffffff
+/* lower bit position of bitfield et_up{f}[2:0] */
+#define rpf_et_upf_shift 26
+/* width of bitfield et_up{f}[2:0] */
+#define rpf_et_upf_width 3
+/* default value of bitfield et_up{f}[2:0] */
+#define rpf_et_upf_default 0x0
+
+/* rx et_rxq{f}[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_rxq{f}[4:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_rxq0_i[4:0]"
+ */
+
+/* register address for bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_msk 0x01f00000
+/* inverted bitmask for bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_mskn 0xfe0fffff
+/* lower bit position of bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_shift 20
+/* width of bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_width 5
+/* default value of bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_default 0x0
+
+/* rx et_mng_rxq{f} bitfield definitions
+ * preprocessor definitions for the bitfield "et_mng_rxq{f}".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_mng_rxq_i[0]"
+ */
+
+/* register address for bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_msk 0x00080000
+/* inverted bitmask for bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_mskn 0xfff7ffff
+/* lower bit position of bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_shift 19
+/* width of bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_width 1
+/* default value of bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_default 0x0
+
+/* rx et_act{f}[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_act{f}[2:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_act0_i[2:0]"
+ */
+
+/* register address for bitfield et_act{f}[2:0] */
+#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_act{f}[2:0] */
+#define rpf_et_actf_msk 0x00070000
+/* inverted bitmask for bitfield et_act{f}[2:0] */
+#define rpf_et_actf_mskn 0xfff8ffff
+/* lower bit position of bitfield et_act{f}[2:0] */
+#define rpf_et_actf_shift 16
+/* width of bitfield et_act{f}[2:0] */
+#define rpf_et_actf_width 3
+/* default value of bitfield et_act{f}[2:0] */
+#define rpf_et_actf_default 0x0
+
+/* rx et_val{f}[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_val{f}[f:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_val0_i[15:0]"
+ */
+
+/* register address for bitfield et_val{f}[f:0] */
+#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_val{f}[f:0] */
+#define rpf_et_valf_msk 0x0000ffff
+/* inverted bitmask for bitfield et_val{f}[f:0] */
+#define rpf_et_valf_mskn 0xffff0000
+/* lower bit position of bitfield et_val{f}[f:0] */
+#define rpf_et_valf_shift 0
+/* width of bitfield et_val{f}[f:0] */
+#define rpf_et_valf_width 16
+/* default value of bitfield et_val{f}[f:0] */
+#define rpf_et_valf_default 0x0
+
+/* rx ipv4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "ipv4_chk_en".
+ * port="pif_rpo_ipv4_chk_en_i"
+ */
+
+/* register address for bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_adr 0x00005580
+/* bitmask for bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_msk 0x00000002
+/* inverted bitmask for bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_mskn 0xfffffffd
+/* lower bit position of bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_shift 1
+/* width of bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_width 1
+/* default value of bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_default 0x0
+
+/* rx desc{d}_vl_strip bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_vl_strip".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rpo_desc_vl_strip_i[0]"
+ */
+
+/* register address for bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_msk 0x20000000
+/* inverted bitmask for bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_mskn 0xdfffffff
+/* lower bit position of bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_shift 29
+/* width of bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_width 1
+/* default value of bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_default 0x0
+
+/* rx l4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "l4_chk_en".
+ * port="pif_rpo_l4_chk_en_i"
+ */
+
+/* register address for bitfield l4_chk_en */
+#define rpol4chk_en_adr 0x00005580
+/* bitmask for bitfield l4_chk_en */
+#define rpol4chk_en_msk 0x00000001
+/* inverted bitmask for bitfield l4_chk_en */
+#define rpol4chk_en_mskn 0xfffffffe
+/* lower bit position of bitfield l4_chk_en */
+#define rpol4chk_en_shift 0
+/* width of bitfield l4_chk_en */
+#define rpol4chk_en_width 1
+/* default value of bitfield l4_chk_en */
+#define rpol4chk_en_default 0x0
+
+/* rx reg_res_dsbl bitfield definitions
+ * preprocessor definitions for the bitfield "reg_res_dsbl".
+ * port="pif_rx_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_adr 0x00005000
+/* bitmask for bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_msk 0x20000000
+/* inverted bitmask for bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_mskn 0xdfffffff
+/* lower bit position of bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_shift 29
+/* width of bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_width 1
+/* default value of bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_default 0x1
+
+/* tx dca{d}_cpuid[7:0] bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_tdm_dca0_cpuid_i[7:0]"
+ */
+
+/* register address for bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_msk 0x000000ff
+/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_mskn 0xffffff00
+/* lower bit position of bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_shift 0
+/* width of bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_width 8
+/* default value of bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_default 0x0
+
+/* tx lso_en[1f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_en[1f:0]".
+ * port="pif_tdm_lso_en_i[31:0]"
+ */
+
+/* register address for bitfield lso_en[1f:0] */
+#define tdm_lso_en_adr 0x00007810
+/* bitmask for bitfield lso_en[1f:0] */
+#define tdm_lso_en_msk 0xffffffff
+/* inverted bitmask for bitfield lso_en[1f:0] */
+#define tdm_lso_en_mskn 0x00000000
+/* lower bit position of bitfield lso_en[1f:0] */
+#define tdm_lso_en_shift 0
+/* width of bitfield lso_en[1f:0] */
+#define tdm_lso_en_width 32
+/* default value of bitfield lso_en[1f:0] */
+#define tdm_lso_en_default 0x0
+
+/* tx dca_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca_en".
+ * port="pif_tdm_dca_en_i"
+ */
+
+/* register address for bitfield dca_en */
+#define tdm_dca_en_adr 0x00008480
+/* bitmask for bitfield dca_en */
+#define tdm_dca_en_msk 0x80000000
+/* inverted bitmask for bitfield dca_en */
+#define tdm_dca_en_mskn 0x7fffffff
+/* lower bit position of bitfield dca_en */
+#define tdm_dca_en_shift 31
+/* width of bitfield dca_en */
+#define tdm_dca_en_width 1
+/* default value of bitfield dca_en */
+#define tdm_dca_en_default 0x1
+
+/* tx dca_mode[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "dca_mode[3:0]".
+ * port="pif_tdm_dca_mode_i[3:0]"
+ */
+
+/* register address for bitfield dca_mode[3:0] */
+#define tdm_dca_mode_adr 0x00008480
+/* bitmask for bitfield dca_mode[3:0] */
+#define tdm_dca_mode_msk 0x0000000f
+/* inverted bitmask for bitfield dca_mode[3:0] */
+#define tdm_dca_mode_mskn 0xfffffff0
+/* lower bit position of bitfield dca_mode[3:0] */
+#define tdm_dca_mode_shift 0
+/* width of bitfield dca_mode[3:0] */
+#define tdm_dca_mode_width 4
+/* default value of bitfield dca_mode[3:0] */
+#define tdm_dca_mode_default 0x0
+
+/* tx dca{d}_desc_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_desc_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_tdm_dca_desc_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_msk 0x80000000
+/* inverted bitmask for bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_mskn 0x7fffffff
+/* lower bit position of bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_shift 31
+/* width of bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_width 1
+/* default value of bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_default 0x0
+
+/* tx desc{d}_en bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_en".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="pif_tdm_desc_en_i[0]"
+ */
+
+/* register address for bitfield desc{d}_en */
+#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_en */
+#define tdm_descden_msk 0x80000000
+/* inverted bitmask for bitfield desc{d}_en */
+#define tdm_descden_mskn 0x7fffffff
+/* lower bit position of bitfield desc{d}_en */
+#define tdm_descden_shift 31
+/* width of bitfield desc{d}_en */
+#define tdm_descden_width 1
+/* default value of bitfield desc{d}_en */
+#define tdm_descden_default 0x0
+
+/* tx desc{d}_hd[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="tdm_pif_desc0_hd_o[12:0]"
+ */
+
+/* register address for bitfield desc{d}_hd[c:0] */
+#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_hd[c:0] */
+#define tdm_descdhd_msk 0x00001fff
+/* inverted bitmask for bitfield desc{d}_hd[c:0] */
+#define tdm_descdhd_mskn 0xffffe000
+/* lower bit position of bitfield desc{d}_hd[c:0] */
+#define tdm_descdhd_shift 0
+/* width of bitfield desc{d}_hd[c:0] */
+#define tdm_descdhd_width 13
+
+/* tx desc{d}_len[9:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_len[9:0]".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="pif_tdm_desc0_len_i[9:0]"
+ */
+
+/* register address for bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_msk 0x00001ff8
+/* inverted bitmask for bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_mskn 0xffffe007
+/* lower bit position of bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_shift 3
+/* width of bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_width 10
+/* default value of bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_default 0x0
+
+/* tx int_desc_wrb_en bitfield definitions
+ * preprocessor definitions for the bitfield "int_desc_wrb_en".
+ * port="pif_tdm_int_desc_wrb_en_i"
+ */
+
+/* register address for bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_adr 0x00007b40
+/* bitmask for bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_msk 0x00000002
+/* inverted bitmask for bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_mskn 0xfffffffd
+/* lower bit position of bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_shift 1
+/* width of bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_width 1
+/* default value of bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_default 0x0
+
+/* tx desc{d}_wrb_thresh[6:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="pif_tdm_desc0_wrb_thresh_i[6:0]"
+ */
+
+/* register address for bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_msk 0x00007f00
+/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_mskn 0xffff80ff
+/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_shift 8
+/* width of bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_width 7
+/* default value of bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_default 0x0
+
+/* tx lso_tcp_flag_first[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]".
+ * port="pif_thm_lso_tcp_flag_first_i[11:0]"
+ */
+
+/* register address for bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_adr 0x00007820
+/* bitmask for bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_msk 0x00000fff
+/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_mskn 0xfffff000
+/* lower bit position of bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_shift 0
+/* width of bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_width 12
+/* default value of bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_default 0x0
+
+/* tx lso_tcp_flag_last[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]".
+ * port="pif_thm_lso_tcp_flag_last_i[11:0]"
+ */
+
+/* register address for bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_adr 0x00007824
+/* bitmask for bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_msk 0x00000fff
+/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_mskn 0xfffff000
+/* lower bit position of bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_shift 0
+/* width of bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_width 12
+/* default value of bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_default 0x0
+
+/* tx lso_tcp_flag_mid[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]".
+ * port="pif_thm_lso_tcp_flag_mid_i[11:0]"
+ */
+
+/* Register address for bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_adr 0x00005598
+/* Bitmask for bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_msk 0xFFFFFFFF
+/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_mskn 0x00000000
+/* Lower bit position of bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_shift 0
+/* Width of bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_width 32
+/* Default value of bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_default 0x0
+
+/* RX lro_en[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_en[1F:0]".
+ * PORT="pif_rpo_lro_en_i[31:0]"
+ */
+
+/* Register address for bitfield lro_en[1F:0] */
+#define rpo_lro_en_adr 0x00005590
+/* Bitmask for bitfield lro_en[1F:0] */
+#define rpo_lro_en_msk 0xFFFFFFFF
+/* Inverted bitmask for bitfield lro_en[1F:0] */
+#define rpo_lro_en_mskn 0x00000000
+/* Lower bit position of bitfield lro_en[1F:0] */
+#define rpo_lro_en_shift 0
+/* Width of bitfield lro_en[1F:0] */
+#define rpo_lro_en_width 32
+/* Default value of bitfield lro_en[1F:0] */
+#define rpo_lro_en_default 0x0
+
+/* RX lro_ptopt_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_ptopt_en".
+ * PORT="pif_rpo_lro_ptopt_en_i"
+ */
+
+/* Register address for bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_adr 0x00005594
+/* Bitmask for bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_msk 0x00008000
+/* Inverted bitmask for bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF
+/* Lower bit position of bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_shift 15
+/* Width of bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_width 1
+/* Default value of bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_defalt 0x1
+
+/* RX lro_q_ses_lmt Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_q_ses_lmt".
+ * PORT="pif_rpo_lro_q_ses_lmt_i[1:0]"
+ */
+
+/* Register address for bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_adr 0x00005594
+/* Bitmask for bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_msk 0x00003000
+/* Inverted bitmask for bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF
+/* Lower bit position of bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_shift 12
+/* Width of bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_width 2
+/* Default value of bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_default 0x1
+
+/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]".
+ * PORT="pif_rpo_lro_tot_dsc_lmt_i[1:0]"
+ */
+
+/* Register address for bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_adr 0x00005594
+/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_msk 0x00000060
+/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F
+/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_shift 5
+/* Width of bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_width 2
+/* Default value of bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_defalt 0x1
+
+/* RX lro_pkt_min[4:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_pkt_min[4:0]".
+ * PORT="pif_rpo_lro_pkt_min_i[4:0]"
+ */
+
+/* Register address for bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_adr 0x00005594
+/* Bitmask for bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_msk 0x0000001F
+/* Inverted bitmask for bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_mskn 0xFFFFFFE0
+/* Lower bit position of bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_shift 0
+/* Width of bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_width 5
+/* Default value of bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_default 0x8
+
+/* Width of bitfield lro{L}_des_max[1:0] */
+#define rpo_lro_ldes_max_width 2
+/* Default value of bitfield lro{L}_des_max[1:0] */
+#define rpo_lro_ldes_max_default 0x0
+
+/* RX lro_tb_div[11:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_tb_div[11:0]".
+ * PORT="pif_rpo_lro_tb_div_i[11:0]"
+ */
+
+/* Register address for bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_adr 0x00005620
+/* Bitmask for bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_msk 0xFFF00000
+/* Inverted bitmask for bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_mskn 0x000FFFFF
+/* Lower bit position of bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_shift 20
+/* Width of bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_width 12
+/* Default value of bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_default 0xC35
+
+/* RX lro_ina_ival[9:0] Bitfield Definitions
+ *   Preprocessor definitions for the bitfield "lro_ina_ival[9:0]".
+ *   PORT="pif_rpo_lro_ina_ival_i[9:0]"
+ */
+
+/* Register address for bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_adr 0x00005620
+/* Bitmask for bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_msk 0x000FFC00
+/* Inverted bitmask for bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_mskn 0xFFF003FF
+/* Lower bit position of bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_shift 10
+/* Width of bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_width 10
+/* Default value of bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_default 0xA
+
+/* RX lro_max_ival[9:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_max_ival[9:0]".
+ * PORT="pif_rpo_lro_max_ival_i[9:0]"
+ */
+
+/* Register address for bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_adr 0x00005620
+/* Bitmask for bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_msk 0x000003FF
+/* Inverted bitmask for bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_mskn 0xFFFFFC00
+/* Lower bit position of bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_shift 0
+/* Width of bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_width 10
+/* Default value of bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_default 0x19
+
+/* TX dca{D}_cpuid[7:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]".
+ * Parameter: DCA {D} | stride size 0x4 | range [0, 31]
+ * PORT="pif_tdm_dca0_cpuid_i[7:0]"
+ */
+
+/* Register address for bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+/* Bitmask for bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_msk 0x000000FF
+/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_mskn 0xFFFFFF00
+/* Lower bit position of bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_shift 0
+/* Width of bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_width 8
+/* Default value of bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_default 0x0
+
+/* TX dca{D}_desc_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "dca{D}_desc_en".
+ * Parameter: DCA {D} | stride size 0x4 | range [0, 31]
+ * PORT="pif_tdm_dca_desc_en_i[0]"
+ */
+
+/* Register address for bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+/* Bitmask for bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_msk 0x80000000
+/* Inverted bitmask for bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF
+/* Lower bit position of bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_shift 31
+/* Width of bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_width 1
+/* Default value of bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_default 0x0
+
+/* TX desc{D}_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_en".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="pif_tdm_desc_en_i[0]"
+ */
+
+/* Register address for bitfield desc{D}_en */
+#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_en */
+#define tdm_desc_den_msk 0x80000000
+/* Inverted bitmask for bitfield desc{D}_en */
+#define tdm_desc_den_mskn 0x7FFFFFFF
+/* Lower bit position of bitfield desc{D}_en */
+#define tdm_desc_den_shift 31
+/* Width of bitfield desc{D}_en */
+#define tdm_desc_den_width 1
+/* Default value of bitfield desc{D}_en */
+#define tdm_desc_den_default 0x0
+
+/* TX desc{D}_hd[C:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_hd[C:0]".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="tdm_pif_desc0_hd_o[12:0]"
+ */
+
+/* Register address for bitfield desc{D}_hd[C:0] */
+#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_hd[C:0] */
+#define tdm_desc_dhd_msk 0x00001FFF
+/* Inverted bitmask for bitfield desc{D}_hd[C:0] */
+#define tdm_desc_dhd_mskn 0xFFFFE000
+/* Lower bit position of bitfield desc{D}_hd[C:0] */
+#define tdm_desc_dhd_shift 0
+/* Width of bitfield desc{D}_hd[C:0] */
+#define tdm_desc_dhd_width 13
+
+/* TX desc{D}_len[9:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_len[9:0]".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="pif_tdm_desc0_len_i[9:0]"
+ */
+
+/* Register address for bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_msk 0x00001FF8
+/* Inverted bitmask for bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_mskn 0xFFFFE007
+/* Lower bit position of bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_shift 3
+/* Width of bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_width 10
+/* Default value of bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_default 0x0
+
+/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="pif_tdm_desc0_wrb_thresh_i[6:0]"
+ */
+
+/* Register address for bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_adr(descriptor) \
+       (0x00007C18 + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_msk 0x00007F00
+/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF
+/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_shift 8
+/* Width of bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_width 7
+/* Default value of bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_default 0x0
+
+/* TX tdm_int_mod_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "tdm_int_mod_en".
+ * PORT="pif_tdm_int_mod_en_i"
+ */
+
+/* Register address for bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_adr 0x00007B40
+/* Bitmask for bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_msk 0x00000010
+/* Inverted bitmask for bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_mskn 0xFFFFFFEF
+/* Lower bit position of bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_shift 4
+/* Width of bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_width 1
+/* Default value of bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_default 0x0
+
+/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]".
+ * PORT="pif_thm_lso_tcp_flag_mid_i[11:0]"
+ */
+/* register address for bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_adr 0x00007820
+/* bitmask for bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_msk 0x0fff0000
+/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_mskn 0xf000ffff
+/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_shift 16
+/* width of bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_width 12
+/* default value of bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_default 0x0
+
+/* tx tx_buf_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_buf_en".
+ * port="pif_tpb_tx_buf_en_i"
+ */
+
+/* register address for bitfield tx_buf_en */
+#define tpb_tx_buf_en_adr 0x00007900
+/* bitmask for bitfield tx_buf_en */
+#define tpb_tx_buf_en_msk 0x00000001
+/* inverted bitmask for bitfield tx_buf_en */
+#define tpb_tx_buf_en_mskn 0xfffffffe
+/* lower bit position of bitfield tx_buf_en */
+#define tpb_tx_buf_en_shift 0
+/* width of bitfield tx_buf_en */
+#define tpb_tx_buf_en_width 1
+/* default value of bitfield tx_buf_en */
+#define tpb_tx_buf_en_default 0x0
+
+/* tx tx{b}_hi_thresh[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_tpb_tx0_hi_thresh_i[12:0]"
+ */
+
+/* register address for bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+/* bitmask for bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_msk 0x1fff0000
+/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_mskn 0xe000ffff
+/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_shift 16
+/* width of bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_width 13
+/* default value of bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_default 0x0
+
+/* tx tx{b}_lo_thresh[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_tpb_tx0_lo_thresh_i[12:0]"
+ */
+
+/* register address for bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+/* bitmask for bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_msk 0x00001fff
+/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_mskn 0xffffe000
+/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_shift 0
+/* width of bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_width 13
+/* default value of bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_default 0x0
+
+/* tx dma_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_sys_loopback".
+ * port="pif_tpb_dma_sys_lbk_i"
+ */
+
+/* register address for bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_adr 0x00007000
+/* bitmask for bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_msk 0x00000040
+/* inverted bitmask for bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_mskn 0xffffffbf
+/* lower bit position of bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_shift 6
+/* width of bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_width 1
+/* default value of bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_default 0x0
+
+/* tx tx{b}_buf_size[7:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_tpb_tx0_buf_size_i[7:0]"
+ */
+
+/* register address for bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10)
+/* bitmask for bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_msk 0x000000ff
+/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_mskn 0xffffff00
+/* lower bit position of bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_shift 0
+/* width of bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_width 8
+/* default value of bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_default 0x0
+
+/* tx tx_scp_ins_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_scp_ins_en".
+ * port="pif_tpb_scp_ins_en_i"
+ */
+
+/* register address for bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_adr 0x00007900
+/* bitmask for bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_msk 0x00000004
+/* inverted bitmask for bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_mskn 0xfffffffb
+/* lower bit position of bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_shift 2
+/* width of bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_width 1
+/* default value of bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_default 0x0
+
+/* tx ipv4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "ipv4_chk_en".
+ * port="pif_tpo_ipv4_chk_en_i"
+ */
+
+/* register address for bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_adr 0x00007800
+/* bitmask for bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_msk 0x00000002
+/* inverted bitmask for bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_mskn 0xfffffffd
+/* lower bit position of bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_shift 1
+/* width of bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_width 1
+/* default value of bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_default 0x0
+
+/* tx l4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "l4_chk_en".
+ * port="pif_tpo_l4_chk_en_i"
+ */
+
+/* register address for bitfield l4_chk_en */
+#define tpol4chk_en_adr 0x00007800
+/* bitmask for bitfield l4_chk_en */
+#define tpol4chk_en_msk 0x00000001
+/* inverted bitmask for bitfield l4_chk_en */
+#define tpol4chk_en_mskn 0xfffffffe
+/* lower bit position of bitfield l4_chk_en */
+#define tpol4chk_en_shift 0
+/* width of bitfield l4_chk_en */
+#define tpol4chk_en_width 1
+/* default value of bitfield l4_chk_en */
+#define tpol4chk_en_default 0x0
+
+/* tx pkt_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "pkt_sys_loopback".
+ * port="pif_tpo_pkt_sys_lbk_i"
+ */
+
+/* register address for bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_adr 0x00007000
+/* bitmask for bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_msk 0x00000080
+/* inverted bitmask for bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_mskn 0xffffff7f
+/* lower bit position of bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_shift 7
+/* width of bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_width 1
+/* default value of bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_default 0x0
+
+/* tx data_tc_arb_mode bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc_arb_mode".
+ * port="pif_tps_data_tc_arb_mode_i"
+ */
+
+/* register address for bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_adr 0x00007100
+/* bitmask for bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_msk 0x00000001
+/* inverted bitmask for bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_mskn 0xfffffffe
+/* lower bit position of bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_shift 0
+/* width of bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_width 1
+/* default value of bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_default 0x0
+
+/* tx desc_rate_ta_rst bitfield definitions
+ * preprocessor definitions for the bitfield "desc_rate_ta_rst".
+ * port="pif_tps_desc_rate_ta_rst_i"
+ */
+
+/* register address for bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_adr 0x00007310
+/* bitmask for bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_msk 0x80000000
+/* inverted bitmask for bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_mskn 0x7fffffff
+/* lower bit position of bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_shift 31
+/* width of bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_width 1
+/* default value of bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_default 0x0
+
+/* tx desc_rate_limit[a:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_rate_limit[a:0]".
+ * port="pif_tps_desc_rate_lim_i[10:0]"
+ */
+
+/* register address for bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_adr 0x00007310
+/* bitmask for bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_msk 0x000007ff
+/* inverted bitmask for bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_mskn 0xfffff800
+/* lower bit position of bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_shift 0
+/* width of bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_width 11
+/* default value of bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_default 0x0
+
+/* tx desc_tc_arb_mode[1:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]".
+ * port="pif_tps_desc_tc_arb_mode_i[1:0]"
+ */
+
+/* register address for bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_adr 0x00007200
+/* bitmask for bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_msk 0x00000003
+/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_mskn 0xfffffffc
+/* lower bit position of bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_shift 0
+/* width of bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_width 2
+/* default value of bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_default 0x0
+
+/* tx desc_tc{t}_credit_max[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_desc_tc0_credit_max_i[11:0]"
+ */
+
+/* register address for bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4)
+/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_msk 0x0fff0000
+/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_mskn 0xf000ffff
+/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_shift 16
+/* width of bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_width 12
+/* default value of bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_default 0x0
+
+/* tx desc_tc{t}_weight[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_desc_tc0_weight_i[8:0]"
+ */
+
+/* register address for bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4)
+/* bitmask for bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_msk 0x000001ff
+/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_mskn 0xfffffe00
+/* lower bit position of bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_shift 0
+/* width of bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_width 9
+/* default value of bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_default 0x0
+
+/* tx desc_vm_arb_mode bitfield definitions
+ * preprocessor definitions for the bitfield "desc_vm_arb_mode".
+ * port="pif_tps_desc_vm_arb_mode_i"
+ */
+
+/* register address for bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_adr 0x00007300
+/* bitmask for bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_msk 0x00000001
+/* inverted bitmask for bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_mskn 0xfffffffe
+/* lower bit position of bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_shift 0
+/* width of bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_width 1
+/* default value of bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_default 0x0
+
+/* tx data_tc{t}_credit_max[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_credit_max_i[11:0]"
+ */
+
+/* register address for bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_msk 0x0fff0000
+/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_mskn 0xf000ffff
+/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_shift 16
+/* width of bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_width 12
+/* default value of bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_default 0x0
+
+/* tx data_tc{t}_weight[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_weight_i[8:0]"
+ */
+
+/* register address for bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_msk 0x000001ff
+/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_mskn 0xfffffe00
+/* lower bit position of bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_shift 0
+/* width of bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_width 9
+/* default value of bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_default 0x0
+
+/* tx reg_res_dsbl bitfield definitions
+ * preprocessor definitions for the bitfield "reg_res_dsbl".
+ * port="pif_tx_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_adr 0x00007000
+/* bitmask for bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_msk 0x20000000
+/* inverted bitmask for bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_mskn 0xdfffffff
+/* lower bit position of bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_shift 29
+/* width of bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_width 1
+/* default value of bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_default 0x1
+
+/* mac_phy register access busy bitfield definitions
+ * preprocessor definitions for the bitfield "register access busy".
+ * port="msm_pif_reg_busy_o"
+ */
+
+/* register address for bitfield register access busy */
+#define msm_reg_access_busy_adr 0x00004400
+/* bitmask for bitfield register access busy */
+#define msm_reg_access_busy_msk 0x00001000
+/* inverted bitmask for bitfield register access busy */
+#define msm_reg_access_busy_mskn 0xffffefff
+/* lower bit position of bitfield register access busy */
+#define msm_reg_access_busy_shift 12
+/* width of bitfield register access busy */
+#define msm_reg_access_busy_width 1
+
+/* mac_phy msm register address[7:0] bitfield definitions
+ * preprocessor definitions for the bitfield "msm register address[7:0]".
+ * port="pif_msm_reg_addr_i[7:0]"
+ */
+
+/* register address for bitfield msm register address[7:0] */
+#define msm_reg_addr_adr 0x00004400
+/* bitmask for bitfield msm register address[7:0] */
+#define msm_reg_addr_msk 0x000000ff
+/* inverted bitmask for bitfield msm register address[7:0] */
+#define msm_reg_addr_mskn 0xffffff00
+/* lower bit position of bitfield msm register address[7:0] */
+#define msm_reg_addr_shift 0
+/* width of bitfield msm register address[7:0] */
+#define msm_reg_addr_width 8
+/* default value of bitfield msm register address[7:0] */
+#define msm_reg_addr_default 0x0
+
+/* mac_phy register read strobe bitfield definitions
+ * preprocessor definitions for the bitfield "register read strobe".
+ * port="pif_msm_reg_rden_i"
+ */
+
+/* register address for bitfield register read strobe */
+#define msm_reg_rd_strobe_adr 0x00004400
+/* bitmask for bitfield register read strobe */
+#define msm_reg_rd_strobe_msk 0x00000200
+/* inverted bitmask for bitfield register read strobe */
+#define msm_reg_rd_strobe_mskn 0xfffffdff
+/* lower bit position of bitfield register read strobe */
+#define msm_reg_rd_strobe_shift 9
+/* width of bitfield register read strobe */
+#define msm_reg_rd_strobe_width 1
+/* default value of bitfield register read strobe */
+#define msm_reg_rd_strobe_default 0x0
+
+/* mac_phy msm register read data[31:0] bitfield definitions
+ * preprocessor definitions for the bitfield "msm register read data[31:0]".
+ * port="msm_pif_reg_rd_data_o[31:0]"
+ */
+
+/* register address for bitfield msm register read data[31:0] */
+#define msm_reg_rd_data_adr 0x00004408
+/* bitmask for bitfield msm register read data[31:0] */
+#define msm_reg_rd_data_msk 0xffffffff
+/* inverted bitmask for bitfield msm register read data[31:0] */
+#define msm_reg_rd_data_mskn 0x00000000
+/* lower bit position of bitfield msm register read data[31:0] */
+#define msm_reg_rd_data_shift 0
+/* width of bitfield msm register read data[31:0] */
+#define msm_reg_rd_data_width 32
+
+/* mac_phy msm register write data[31:0] bitfield definitions
+ * preprocessor definitions for the bitfield "msm register write data[31:0]".
+ * port="pif_msm_reg_wr_data_i[31:0]"
+ */
+
+/* register address for bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_adr 0x00004404
+/* bitmask for bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_msk 0xffffffff
+/* inverted bitmask for bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_mskn 0x00000000
+/* lower bit position of bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_shift 0
+/* width of bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_width 32
+/* default value of bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_default 0x0
+
+/* mac_phy register write strobe bitfield definitions
+ * preprocessor definitions for the bitfield "register write strobe".
+ * port="pif_msm_reg_wren_i"
+ */
+
+/* register address for bitfield register write strobe */
+#define msm_reg_wr_strobe_adr 0x00004400
+/* bitmask for bitfield register write strobe */
+#define msm_reg_wr_strobe_msk 0x00000100
+/* inverted bitmask for bitfield register write strobe */
+#define msm_reg_wr_strobe_mskn 0xfffffeff
+/* lower bit position of bitfield register write strobe */
+#define msm_reg_wr_strobe_shift 8
+/* width of bitfield register write strobe */
+#define msm_reg_wr_strobe_width 1
+/* default value of bitfield register write strobe */
+#define msm_reg_wr_strobe_default 0x0
+
+/* mif soft reset bitfield definitions
+ * preprocessor definitions for the bitfield "soft reset".
+ * port="pif_glb_res_i"
+ */
+
+/* register address for bitfield soft reset */
+#define glb_soft_res_adr 0x00000000
+/* bitmask for bitfield soft reset */
+#define glb_soft_res_msk 0x00008000
+/* inverted bitmask for bitfield soft reset */
+#define glb_soft_res_mskn 0xffff7fff
+/* lower bit position of bitfield soft reset */
+#define glb_soft_res_shift 15
+/* width of bitfield soft reset */
+#define glb_soft_res_width 1
+/* default value of bitfield soft reset */
+#define glb_soft_res_default 0x0
+
+/* mif register reset disable bitfield definitions
+ * preprocessor definitions for the bitfield "register reset disable".
+ * port="pif_glb_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield register reset disable */
+#define glb_reg_res_dis_adr 0x00000000
+/* bitmask for bitfield register reset disable */
+#define glb_reg_res_dis_msk 0x00004000
+/* inverted bitmask for bitfield register reset disable */
+#define glb_reg_res_dis_mskn 0xffffbfff
+/* lower bit position of bitfield register reset disable */
+#define glb_reg_res_dis_shift 14
+/* width of bitfield register reset disable */
+#define glb_reg_res_dis_width 1
+/* default value of bitfield register reset disable */
+#define glb_reg_res_dis_default 0x1
+
+/* tx dma debug control definitions */
+#define tx_dma_debug_ctl_adr 0x00008920u
+
+/* tx dma descriptor base address msw definitions */
+#define tx_dma_desc_base_addrmsw_adr(descriptor) \
+                       (0x00007c04u + (descriptor) * 0x40)
+
+/* tx interrupt moderation control register definitions
+ * Preprocessor definitions for TX Interrupt Moderation Control Register
+ * Base Address: 0x00008980
+ * Parameter: queue {Q} | stride size 0x4 | range [0, 31]
+ */
+
+#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4)
+
+/* pcie reg_res_dsbl bitfield definitions
+ * preprocessor definitions for the bitfield "reg_res_dsbl".
+ * port="pif_pci_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_adr 0x00001000
+/* bitmask for bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_msk 0x20000000
+/* inverted bitmask for bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_mskn 0xdfffffff
+/* lower bit position of bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_shift 29
+/* width of bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_width 1
+/* default value of bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_default 0x1
+
+/* global microprocessor scratch pad definitions */
+#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
+
+#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
new file mode 100644 (file)
index 0000000..8d6d8f5
--- /dev/null
@@ -0,0 +1,570 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
+ * abstraction layer.
+ */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_pci_func.h"
+#include "../aq_ring.h"
+#include "../aq_vec.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+
+#include <linux/random.h>
+
+#define HW_ATL_UCP_0X370_REG    0x0370U
+
+#define HW_ATL_FW_SM_RAM        0x2U
+#define HW_ATL_MPI_CONTROL_ADR  0x0368U
+#define HW_ATL_MPI_STATE_ADR    0x036CU
+
+#define HW_ATL_MPI_STATE_MSK    0x00FFU
+#define HW_ATL_MPI_STATE_SHIFT  0U
+#define HW_ATL_MPI_SPEED_MSK    0xFFFFU
+#define HW_ATL_MPI_SPEED_SHIFT  16U
+
+static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
+                                        u32 *p, u32 cnt)
+{
+       int err = 0;
+
+       AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self,
+                                          HW_ATL_FW_SM_RAM) == 1U,
+                                          1U, 10000U);
+
+       if (err < 0) {
+               bool is_locked;
+
+               reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+               is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+               if (!is_locked) {
+                       err = -ETIME;
+                       goto err_exit;
+               }
+       }
+
+       aq_hw_write_reg(self, 0x00000208U, a);
+
+       for (++cnt; --cnt;) {
+               u32 i = 0U;
+
+               aq_hw_write_reg(self, 0x00000200U, 0x00008000U);
+
+               for (i = 1024U;
+                       (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) {
+               }
+
+               *(p++) = aq_hw_read_reg(self, 0x0000020CU);
+       }
+
+       reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
+                                        u32 cnt)
+{
+       int err = 0;
+       bool is_locked;
+
+       is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+       if (!is_locked) {
+               err = -ETIME;
+               goto err_exit;
+       }
+
+       aq_hw_write_reg(self, 0x00000208U, a);
+
+       for (++cnt; --cnt;) {
+               u32 i = 0U;
+
+               aq_hw_write_reg(self, 0x0000020CU, *(p++));
+               aq_hw_write_reg(self, 0x00000200U, 0xC000U);
+
+               for (i = 1024U;
+                       (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) {
+               }
+       }
+
+       reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
+{
+       int err = 0;
+       const u32 dw_major_mask = 0xff000000U;
+       const u32 dw_minor_mask = 0x00ffffffU;
+
+       err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0;
+       if (err < 0)
+               goto err_exit;
+       err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
+               -EOPNOTSUPP : 0;
+err_exit:
+       return err;
+}
+
+static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
+                                struct aq_hw_caps_s *aq_hw_caps)
+{
+       int err = 0;
+
+       if (!aq_hw_read_reg(self, 0x370U)) {
+               unsigned int rnd = 0U;
+               unsigned int ucp_0x370 = 0U;
+
+               get_random_bytes(&rnd, sizeof(unsigned int));
+
+               ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd);
+               aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
+       }
+
+       reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
+
+       /* check 10 times by 1ms */
+       AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr =
+                       aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
+
+       err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
+                                    aq_hw_read_reg(self, 0x18U));
+       return err;
+}
+
+#define HW_ATL_RPC_CONTROL_ADR 0x0338U
+#define HW_ATL_RPC_STATE_ADR   0x033CU
+
+struct aq_hw_atl_utils_fw_rpc_tid_s {
+       union {
+               u32 val;
+               struct {
+                       u16 tid;
+                       u16 len;
+               };
+       };
+};
+
+#define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL)
+
+static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
+{
+       int err = 0;
+       struct aq_hw_atl_utils_fw_rpc_tid_s sw;
+
+       if (!IS_CHIP_FEATURE(MIPS)) {
+               err = -1;
+               goto err_exit;
+       }
+       err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr,
+                                           (u32 *)(void *)&PHAL_ATLANTIC->rpc,
+                                           (rpc_size + sizeof(u32) -
+                                           sizeof(u8)) / sizeof(u32));
+       if (err < 0)
+               goto err_exit;
+
+       sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid);
+       sw.len = (u16)rpc_size;
+       aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
+                                   struct hw_aq_atl_utils_fw_rpc **rpc)
+{
+       int err = 0;
+       struct aq_hw_atl_utils_fw_rpc_tid_s sw;
+       struct aq_hw_atl_utils_fw_rpc_tid_s fw;
+
+       do {
+               sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
+
+               PHAL_ATLANTIC->rpc_tid = sw.tid;
+
+               AQ_HW_WAIT_FOR(sw.tid ==
+                               (fw.val =
+                               aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR),
+                               fw.tid), 1000U, 100U);
+               if (err < 0)
+                       goto err_exit;
+
+               if (fw.len == 0xFFFFU) {
+                       err = hw_atl_utils_fw_rpc_call(self, sw.len);
+                       if (err < 0)
+                               goto err_exit;
+               }
+       } while (sw.tid != fw.tid || 0xFFFFU == fw.len);
+       if (err < 0)
+               goto err_exit;
+
+       if (rpc) {
+               if (fw.len) {
+                       err =
+                       hw_atl_utils_fw_downld_dwords(self,
+                                                     PHAL_ATLANTIC->rpc_addr,
+                                                     (u32 *)(void *)
+                                                     &PHAL_ATLANTIC->rpc,
+                                                     (fw.len + sizeof(u32) -
+                                                     sizeof(u8)) /
+                                                     sizeof(u32));
+                       if (err < 0)
+                               goto err_exit;
+               }
+
+               *rpc = &PHAL_ATLANTIC->rpc;
+       }
+
+err_exit:
+       return err;
+}
+
+static int hw_atl_utils_mpi_create(struct aq_hw_s *self,
+                                  struct aq_hw_caps_s *aq_hw_caps)
+{
+       int err = 0;
+
+       err = hw_atl_utils_init_ucp(self, aq_hw_caps);
+       if (err < 0)
+               goto err_exit;
+
+       err = hw_atl_utils_fw_rpc_init(self);
+       if (err < 0)
+               goto err_exit;
+
+err_exit:
+       return err;
+}
+
+void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
+                                struct hw_aq_atl_utils_mbox *pmbox)
+{
+       int err = 0;
+
+       err = hw_atl_utils_fw_downld_dwords(self,
+                                           PHAL_ATLANTIC->mbox_addr,
+                                           (u32 *)(void *)pmbox,
+                                           sizeof(*pmbox) / sizeof(u32));
+       if (err < 0)
+               goto err_exit;
+
+       if (pmbox != &PHAL_ATLANTIC->mbox)
+               memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox));
+
+       if (IS_CHIP_FEATURE(REVISION_A0)) {
+               unsigned int mtu = self->aq_nic_cfg ?
+                                       self->aq_nic_cfg->mtu : 1514U;
+               pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
+               pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
+               pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc);
+       } else {
+               pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self);
+       }
+
+err_exit:;
+}
+
+int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
+                              enum hal_atl_utils_fw_state_e state)
+{
+       u32 ucp_0x368 = 0;
+
+       ucp_0x368 = (speed << HW_ATL_MPI_SPEED_SHIFT) | state;
+       aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, ucp_0x368);
+
+       return 0;
+}
+
+void hw_atl_utils_mpi_set(struct aq_hw_s *self,
+                         enum hal_atl_utils_fw_state_e state, u32 speed)
+{
+       int err = 0;
+       u32 transaction_id = 0;
+
+       if (state == MPI_RESET) {
+               hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
+
+               transaction_id = PHAL_ATLANTIC->mbox.transaction_id;
+
+               AQ_HW_WAIT_FOR(transaction_id !=
+                               (hw_atl_utils_mpi_read_stats
+                                       (self, &PHAL_ATLANTIC->mbox),
+                                       PHAL_ATLANTIC->mbox.transaction_id),
+                                       1000U, 100U);
+               if (err < 0)
+                       goto err_exit;
+       }
+
+       err = hw_atl_utils_mpi_set_speed(self, speed, state);
+
+err_exit:;
+}
+
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
+                                    struct aq_hw_link_status_s *link_status)
+{
+       u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
+       u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
+
+       if (!link_speed_mask) {
+               link_status->mbps = 0U;
+       } else {
+               switch (link_speed_mask) {
+               case HAL_ATLANTIC_RATE_10G:
+                       link_status->mbps = 10000U;
+                       break;
+
+               case HAL_ATLANTIC_RATE_5G:
+               case HAL_ATLANTIC_RATE_5GSR:
+                       link_status->mbps = 5000U;
+                       break;
+
+               case HAL_ATLANTIC_RATE_2GS:
+                       link_status->mbps = 2500U;
+                       break;
+
+               case HAL_ATLANTIC_RATE_1G:
+                       link_status->mbps = 1000U;
+                       break;
+
+               case HAL_ATLANTIC_RATE_100M:
+                       link_status->mbps = 100U;
+                       break;
+
+               default:
+                       link_status->mbps = 0U;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
+                                  struct aq_hw_caps_s *aq_hw_caps,
+                                  u8 *mac)
+{
+       int err = 0;
+       u32 h = 0U;
+       u32 l = 0U;
+       u32 mac_addr[2];
+
+       self->mmio = aq_pci_func_get_mmio(self->aq_pci_func);
+
+       hw_atl_utils_hw_chip_features_init(self,
+                                          &PHAL_ATLANTIC_A0->chip_features);
+
+       err = hw_atl_utils_mpi_create(self, aq_hw_caps);
+       if (err < 0)
+               goto err_exit;
+
+       if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
+               unsigned int rnd = 0;
+               unsigned int ucp_0x370 = 0;
+
+               get_random_bytes(&rnd, sizeof(unsigned int));
+
+               ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
+               aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
+       }
+
+       err = hw_atl_utils_fw_downld_dwords(self,
+                                           aq_hw_read_reg(self, 0x00000374U) +
+                                           (40U * 4U),
+                                           mac_addr,
+                                           AQ_DIMOF(mac_addr));
+       if (err < 0) {
+               mac_addr[0] = 0U;
+               mac_addr[1] = 0U;
+               err = 0;
+       } else {
+               mac_addr[0] = __swab32(mac_addr[0]);
+               mac_addr[1] = __swab32(mac_addr[1]);
+       }
+
+       ether_addr_copy(mac, (u8 *)mac_addr);
+
+       if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
+               /* chip revision */
+               l = 0xE3000000U
+                       | (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG))
+                       | (0x00 << 16);
+               h = 0x8001300EU;
+
+               mac[5] = (u8)(0xFFU & l);
+               l >>= 8;
+               mac[4] = (u8)(0xFFU & l);
+               l >>= 8;
+               mac[3] = (u8)(0xFFU & l);
+               l >>= 8;
+               mac[2] = (u8)(0xFFU & l);
+               mac[1] = (u8)(0xFFU & h);
+               h >>= 8;
+               mac[0] = (u8)(0xFFU & h);
+       }
+
+err_exit:
+       return err;
+}
+
+unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
+{
+       unsigned int ret = 0U;
+
+       switch (mbps) {
+       case 100U:
+               ret = 5U;
+               break;
+
+       case 1000U:
+               ret = 4U;
+               break;
+
+       case 2500U:
+               ret = 3U;
+               break;
+
+       case 5000U:
+               ret = 1U;
+               break;
+
+       case 10000U:
+               ret = 0U;
+               break;
+
+       default:
+               break;
+       }
+       return ret;
+}
+
+void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
+{
+       u32 chip_features = 0U;
+       u32 val = reg_glb_mif_id_get(self);
+       u32 mif_rev = val & 0xFFU;
+
+       if ((3U & mif_rev) == 1U) {
+               chip_features |=
+                       HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
+                       HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
+                       HAL_ATLANTIC_UTILS_CHIP_MIPS;
+       } else if ((3U & mif_rev) == 2U) {
+               chip_features |=
+                       HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
+                       HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
+                       HAL_ATLANTIC_UTILS_CHIP_MIPS |
+                       HAL_ATLANTIC_UTILS_CHIP_TPO2 |
+                       HAL_ATLANTIC_UTILS_CHIP_RPF2;
+       }
+
+       *p = chip_features;
+}
+
+int hw_atl_utils_hw_deinit(struct aq_hw_s *self)
+{
+       hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U);
+       return 0;
+}
+
+int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
+                             unsigned int power_state)
+{
+       hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U);
+       return 0;
+}
+
+int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
+                             u64 *data, unsigned int *p_count)
+{
+       struct hw_atl_stats_s *stats = NULL;
+       int i = 0;
+
+       hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
+
+       stats = &PHAL_ATLANTIC->mbox.stats;
+
+       data[i] = stats->uprc + stats->mprc + stats->bprc;
+       data[++i] = stats->uprc;
+       data[++i] = stats->mprc;
+       data[++i] = stats->bprc;
+       data[++i] = stats->erpt;
+       data[++i] = stats->uptc + stats->mptc + stats->bptc;
+       data[++i] = stats->uptc;
+       data[++i] = stats->mptc;
+       data[++i] = stats->bptc;
+       data[++i] = stats->ubrc;
+       data[++i] = stats->ubtc;
+       data[++i] = stats->mbrc;
+       data[++i] = stats->mbtc;
+       data[++i] = stats->bbrc;
+       data[++i] = stats->bbtc;
+       data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+       data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+       data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self);
+       data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self);
+       data[++i] = stats_rx_dma_good_octet_counterlsw_get(self);
+       data[++i] = stats_tx_dma_good_octet_counterlsw_get(self);
+       data[++i] = stats->dpc;
+
+       if (p_count)
+               *p_count = ++i;
+
+       return 0;
+}
+
+static const u32 hw_atl_utils_hw_mac_regs[] = {
+       0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U,
+       0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U,
+       0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U,
+       0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U,
+       0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U,
+       0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U,
+       0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U,
+       0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U,
+       0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U,
+       0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U,
+       0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U,
+       0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U,
+       0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U,
+       0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U,
+       0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U,
+       0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U,
+       0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU,
+       0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU,
+       0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U,
+       0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U,
+       0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U,
+       0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U,
+};
+
+int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
+                            struct aq_hw_caps_s *aq_hw_caps,
+                            u32 *regs_buff)
+{
+       unsigned int i = 0U;
+
+       for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
+               regs_buff[i] = aq_hw_read_reg(self,
+                       hw_atl_utils_hw_mac_regs[i]);
+       return 0;
+}
+
+int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
+{
+       *fw_version = aq_hw_read_reg(self, 0x18U);
+       return 0;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
new file mode 100644 (file)
index 0000000..b8e3d88
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware
+ * abstraction layer.
+ */
+
+#ifndef HW_ATL_UTILS_H
+#define HW_ATL_UTILS_H
+
+#include "../aq_common.h"
+
+#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); }
+
+struct __packed hw_atl_stats_s {
+       u32 uprc;
+       u32 mprc;
+       u32 bprc;
+       u32 erpt;
+       u32 uptc;
+       u32 mptc;
+       u32 bptc;
+       u32 erpr;
+       u32 mbtc;
+       u32 bbtc;
+       u32 mbrc;
+       u32 bbrc;
+       u32 ubrc;
+       u32 ubtc;
+       u32 dpc;
+};
+
+union __packed ip_addr {
+       struct {
+               u8 addr[16];
+       } v6;
+       struct {
+               u8 padding[12];
+               u8 addr[4];
+       } v4;
+};
+
+struct __packed hw_aq_atl_utils_fw_rpc {
+       u32 msg_id;
+
+       union {
+               struct {
+                       u32 pong;
+               } msg_ping;
+
+               struct {
+                       u8 mac_addr[6];
+                       u32 ip_addr_cnt;
+
+                       struct {
+                               union ip_addr addr;
+                               union ip_addr mask;
+                       } ip[1];
+               } msg_arp;
+
+               struct {
+                       u32 len;
+                       u8 packet[1514U];
+               } msg_inject;
+
+               struct {
+                       u32 priority;
+                       u32 wol_packet_type;
+                       u16 friendly_name_len;
+                       u16 friendly_name[65];
+                       u32 pattern_id;
+                       u32 next_wol_pattern_offset;
+
+                       union {
+                               struct {
+                                       u32 flags;
+                                       u8 ipv4_source_address[4];
+                                       u8 ipv4_dest_address[4];
+                                       u16 tcp_source_port_number;
+                                       u16 tcp_dest_port_number;
+                               } ipv4_tcp_syn_parameters;
+
+                               struct {
+                                       u32 flags;
+                                       u8 ipv6_source_address[16];
+                                       u8 ipv6_dest_address[16];
+                                       u16 tcp_source_port_number;
+                                       u16 tcp_dest_port_number;
+                               } ipv6_tcp_syn_parameters;
+
+                               struct {
+                                       u32 flags;
+                               } eapol_request_id_message_parameters;
+
+                               struct {
+                                       u32 flags;
+                                       u32 mask_offset;
+                                       u32 mask_size;
+                                       u32 pattern_offset;
+                                       u32 pattern_size;
+                               } wol_bit_map_pattern;
+                       } wol_pattern;
+               } msg_wol;
+
+               struct {
+                       u32 is_wake_on_link_down;
+                       u32 is_wake_on_link_up;
+               } msg_wolink;
+       };
+};
+
+struct __packed hw_aq_atl_utils_mbox {
+       u32 version;
+       u32 transaction_id;
+       int error;
+       struct hw_atl_stats_s stats;
+};
+
+struct __packed hw_atl_s {
+       struct aq_hw_s base;
+       struct hw_aq_atl_utils_mbox mbox;
+       u64 speed;
+       u32 itr_tx;
+       u32 itr_rx;
+       unsigned int chip_features;
+       u32 fw_ver_actual;
+       atomic_t dpc;
+       u32 mbox_addr;
+       u32 rpc_addr;
+       u32 rpc_tid;
+       struct hw_aq_atl_utils_fw_rpc rpc;
+};
+
+#define SELF ((struct hw_atl_s *)self)
+
+#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self)))
+#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self)))
+#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self)))
+
+#define HAL_ATLANTIC_UTILS_CHIP_MIPS         0x00000001U
+#define HAL_ATLANTIC_UTILS_CHIP_TPO2         0x00000002U
+#define HAL_ATLANTIC_UTILS_CHIP_RPF2         0x00000004U
+#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ       0x00000010U
+#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0  0x01000000U
+#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0  0x02000000U
+
+#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
+                               PHAL_ATLANTIC->chip_features)
+
+enum hal_atl_utils_fw_state_e {
+       MPI_DEINIT = 0,
+       MPI_RESET = 1,
+       MPI_INIT = 2,
+       MPI_POWER = 4,
+};
+
+#define HAL_ATLANTIC_RATE_10G        BIT(0)
+#define HAL_ATLANTIC_RATE_5G         BIT(1)
+#define HAL_ATLANTIC_RATE_5GSR       BIT(2)
+#define HAL_ATLANTIC_RATE_2GS        BIT(3)
+#define HAL_ATLANTIC_RATE_1G         BIT(4)
+#define HAL_ATLANTIC_RATE_100M       BIT(5)
+#define HAL_ATLANTIC_RATE_INVALID    BIT(6)
+
+void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
+
+void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
+                                struct hw_aq_atl_utils_mbox *pmbox);
+
+void hw_atl_utils_mpi_set(struct aq_hw_s *self,
+                         enum hal_atl_utils_fw_state_e state,
+                         u32 speed);
+
+int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
+                              enum hal_atl_utils_fw_state_e state);
+
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
+                                    struct aq_hw_link_status_s *link_status);
+
+int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
+                                  struct aq_hw_caps_s *aq_hw_caps,
+                                  u8 *mac);
+
+unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps);
+
+int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
+                            struct aq_hw_caps_s *aq_hw_caps,
+                            u32 *regs_buff);
+
+int hw_atl_utils_hw_get_settings(struct aq_hw_s *self,
+                                struct ethtool_cmd *cmd);
+
+int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
+                             unsigned int power_state);
+
+int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
+
+int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
+
+int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
+                             u64 *data,
+                             unsigned int *p_count);
+
+#endif /* HW_ATL_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
new file mode 100644 (file)
index 0000000..0de858d
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef VER_H
+#define VER_H
+
+#define NIC_MAJOR_DRIVER_VERSION           1
+#define NIC_MINOR_DRIVER_VERSION           5
+#define NIC_BUILD_DRIVER_VERSION           345
+#define NIC_REVISION_DRIVER_VERSION        0
+
+#endif /* VER_H */
index abc9f2a590546e0be4c5a5dabcf6ff3d219f1936..23873395f100b06763e03e12b6a91685aba71bbf 100644 (file)
@@ -275,7 +275,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
 
        work_done = arc_emac_rx(ndev, budget);
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
        }
 
index 7dcc907a449d60adcbeb27cf58434edb0442792b..6a27c266267587685956b8720cd0a6bc9b3fcb69 100644 (file)
@@ -311,7 +311,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
        if (!tx_complete || work == budget)
                return budget;
 
-       napi_complete(&np->napi);
+       napi_complete_done(&np->napi, work);
 
        /* enable interrupt */
        if (alx->flags & ALX_FLAG_USING_MSIX) {
@@ -1648,8 +1648,8 @@ static void alx_poll_controller(struct net_device *netdev)
 }
 #endif
 
-static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev,
-                                       struct rtnl_link_stats64 *net_stats)
+static void alx_get_stats64(struct net_device *dev,
+                           struct rtnl_link_stats64 *net_stats)
 {
        struct alx_priv *alx = netdev_priv(dev);
        struct alx_hw_stats *hw_stats = &alx->hw.stats;
@@ -1693,8 +1693,6 @@ static struct rtnl_link_stats64 *alx_get_stats64(struct net_device *dev,
        net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
 
        spin_unlock(&alx->stats_lock);
-
-       return net_stats;
 }
 
 static const struct net_device_ops alx_netdev_ops = {
@@ -1823,6 +1821,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        netdev->hw_features = NETIF_F_SG |
                              NETIF_F_HW_CSUM |
+                             NETIF_F_RXCSUM |
                              NETIF_F_TSO |
                              NETIF_F_TSO6;
 
index 773d3b7d8dd5efeee95b14eb1df6b483b9d80311..7e913d8331c3082d4720e8a4077a85abd4be17c7 100644 (file)
@@ -1892,7 +1892,7 @@ static int atl1c_clean(struct napi_struct *napi, int budget)
 
        if (work_done < budget) {
 quit_polling:
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                adapter->hw.intr_mask |= ISR_RX_PKT;
                AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
        }
index e96091b652a7ec1baf42d24fc4bf674aabf308d1..4f7e195af0bc6dff79687547b9979375b35a17d6 100644 (file)
@@ -1472,7 +1472,7 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
                                           prrs->vtag);
                                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
                        }
-                       netif_receive_skb(skb);
+                       napi_gro_receive(&adapter->napi, skb);
 
 skip_pkt:
        /* skip current packet whether it's ok or not. */
@@ -1526,7 +1526,7 @@ static int atl1e_clean(struct napi_struct *napi, int budget)
        /* If no Tx and not enough Rx work done, exit the polling mode */
        if (work_done < budget) {
 quit_polling:
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
                AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
                /* test debug */
index 7dad8e4b9d2a8aabdf6a5b8f12d3046e51313a07..022772e1e24988deec39b46c69a45349d7dba055 100644 (file)
@@ -2457,7 +2457,7 @@ static int atl1_rings_clean(struct napi_struct *napi, int budget)
        if (work_done >= budget)
                return work_done;
 
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
        /* re-enable Interrupt */
        if (likely(adapter->int_enabled))
                atlx_imr_set(adapter, IMR_NORMAL_MASK);
index 48707ed76ffcb44f037992887476bb2cf556dd6f..5b95bb48ce97dd9c0d02968badb05949f7eb4214 100644 (file)
@@ -902,7 +902,7 @@ static int b44_poll(struct napi_struct *napi, int budget)
        }
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                b44_enable_ints(bp);
        }
 
@@ -1674,8 +1674,8 @@ static int b44_close(struct net_device *dev)
        return 0;
 }
 
-static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
-                                       struct rtnl_link_stats64 *nstat)
+static void b44_get_stats64(struct net_device *dev,
+                           struct rtnl_link_stats64 *nstat)
 {
        struct b44 *bp = netdev_priv(dev);
        struct b44_hw_stats *hwstat = &bp->hw_stats;
@@ -1718,7 +1718,6 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
 #endif
        } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
 
-       return nstat;
 }
 
 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
index c483618b57bd7ef93f8522a91814a5dd9d9b0eed..0ee6e208aa07eca9d7111666d89e05e042e94fa4 100644 (file)
@@ -511,7 +511,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
 
        /* no more packet in rx/tx queue, remove device from poll
         * queue */
-       napi_complete(napi);
+       napi_complete_done(napi, rx_work_done);
 
        /* restore rx/tx interrupt */
        enet_dmac_writel(priv, priv->dma_chan_int_mask,
index 744ed6ddaf373964a2b3526a2b73613932c73c87..a68d4889f5db74d895f1bfb9e74c46bd2b892dbc 100644 (file)
@@ -43,14 +43,43 @@ static inline void name##_writel(struct bcm_sysport_priv *priv,             \
 BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
 BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
 BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
 BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
-BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
 BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
 BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
 BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
 BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
 BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
 
+/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
+ * same layout, except it has been moved by 4 bytes up, *sigh*
+ */
+static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
+{
+       if (priv->is_lite && off >= RDMA_STATUS)
+               off += 4;
+       return __raw_readl(priv->base + SYS_PORT_RDMA_OFFSET + off);
+}
+
+static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
+{
+       if (priv->is_lite && off >= RDMA_STATUS)
+               off += 4;
+       __raw_writel(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
+}
+
+static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
+{
+       if (!priv->is_lite) {
+               return BIT(bit);
+       } else {
+               if (bit >= ACB_ALGO)
+                       return BIT(bit + 1);
+               else
+                       return BIT(bit);
+       }
+}
+
 /* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
  * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
   */
@@ -143,9 +172,9 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev,
        priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
        reg = tdma_readl(priv, TDMA_CONTROL);
        if (priv->tsb_en)
-               reg |= TSB_EN;
+               reg |= tdma_control_bit(priv, TSB_EN);
        else
-               reg &= ~TSB_EN;
+               reg &= ~tdma_control_bit(priv, TSB_EN);
        tdma_writel(priv, reg, TDMA_CONTROL);
 
        return 0;
@@ -281,11 +310,35 @@ static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
        priv->msg_enable = enable;
 }
 
+static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
+{
+       switch (type) {
+       case BCM_SYSPORT_STAT_NETDEV:
+       case BCM_SYSPORT_STAT_RXCHK:
+       case BCM_SYSPORT_STAT_RBUF:
+       case BCM_SYSPORT_STAT_SOFT:
+               return true;
+       default:
+               return false;
+       }
+}
+
 static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
 {
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       const struct bcm_sysport_stats *s;
+       unsigned int i, j;
+
        switch (string_set) {
        case ETH_SS_STATS:
-               return BCM_SYSPORT_STATS_LEN;
+               for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+                       s = &bcm_sysport_gstrings_stats[i];
+                       if (priv->is_lite &&
+                           !bcm_sysport_lite_stat_valid(s->type))
+                               continue;
+                       j++;
+               }
+               return j;
        default:
                return -EOPNOTSUPP;
        }
@@ -294,14 +347,21 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
 static void bcm_sysport_get_strings(struct net_device *dev,
                                    u32 stringset, u8 *data)
 {
-       int i;
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       const struct bcm_sysport_stats *s;
+       int i, j;
 
        switch (stringset) {
        case ETH_SS_STATS:
-               for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
-                       memcpy(data + i * ETH_GSTRING_LEN,
-                              bcm_sysport_gstrings_stats[i].stat_string,
+               for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+                       s = &bcm_sysport_gstrings_stats[i];
+                       if (priv->is_lite &&
+                           !bcm_sysport_lite_stat_valid(s->type))
+                               continue;
+
+                       memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
                               ETH_GSTRING_LEN);
+                       j++;
                }
                break;
        default:
@@ -327,6 +387,9 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
                case BCM_SYSPORT_STAT_MIB_RX:
                case BCM_SYSPORT_STAT_MIB_TX:
                case BCM_SYSPORT_STAT_RUNT:
+                       if (priv->is_lite)
+                               continue;
+
                        if (s->type != BCM_SYSPORT_STAT_MIB_RX)
                                offset = UMAC_MIB_STAT_OFFSET;
                        val = umac_readl(priv, UMAC_MIB_START + j + offset);
@@ -355,12 +418,12 @@ static void bcm_sysport_get_stats(struct net_device *dev,
                                  struct ethtool_stats *stats, u64 *data)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
-       int i;
+       int i, j;
 
        if (netif_running(dev))
                bcm_sysport_update_mib_counters(priv);
 
-       for (i =  0; i < BCM_SYSPORT_STATS_LEN; i++) {
+       for (i =  0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
                const struct bcm_sysport_stats *s;
                char *p;
 
@@ -370,7 +433,8 @@ static void bcm_sysport_get_stats(struct net_device *dev,
                else
                        p = (char *)priv;
                p += s->stat_offset;
-               data[i] = *(unsigned long *)p;
+               data[j] = *(unsigned long *)p;
+               j++;
        }
 }
 
@@ -573,8 +637,14 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
        u16 len, status;
        struct bcm_rsb *rsb;
 
-       /* Determine how much we should process since last call */
-       p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+       /* Determine how much we should process since last call, SYSTEMPORT Lite
+        * groups the producer and consumer indexes into the same 32-bit
+        * which we access using RDMA_CONS_INDEX
+        */
+       if (!priv->is_lite)
+               p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+       else
+               p_index = rdma_readl(priv, RDMA_CONS_INDEX);
        p_index &= RDMA_PROD_INDEX_MASK;
 
        if (p_index < priv->rx_c_index)
@@ -791,7 +861,11 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
        if (work_done == 0) {
                napi_complete(napi);
                /* re-enable TX interrupt */
-               intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+               if (!ring->priv->is_lite)
+                       intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+               else
+                       intrl2_0_mask_clear(ring->priv, BIT(ring->index +
+                                           INTRL2_0_TDMA_MBDONE_SHIFT));
 
                return 0;
        }
@@ -817,7 +891,15 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
 
        priv->rx_c_index += work_done;
        priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
-       rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+
+       /* SYSTEMPORT Lite groups the producer/consumer index, producer is
+        * maintained by HW, but writes to it will be ignore while RDMA
+        * is active
+        */
+       if (!priv->is_lite)
+               rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+       else
+               rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
 
        if (work_done < budget) {
                napi_complete_done(napi, work_done);
@@ -848,6 +930,8 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
 {
        struct net_device *dev = dev_id;
        struct bcm_sysport_priv *priv = netdev_priv(dev);
+       struct bcm_sysport_tx_ring *txr;
+       unsigned int ring, ring_bit;
 
        priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
                          ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -877,6 +961,22 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
                bcm_sysport_resume_from_wol(priv);
        }
 
+       if (!priv->is_lite)
+               goto out;
+
+       for (ring = 0; ring < dev->num_tx_queues; ring++) {
+               ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
+               if (!(priv->irq0_stat & ring_bit))
+                       continue;
+
+               txr = &priv->tx_rings[ring];
+
+               if (likely(napi_schedule_prep(&txr->napi))) {
+                       intrl2_0_mask_set(priv, ring_bit);
+                       __napi_schedule(&txr->napi);
+               }
+       }
+out:
        return IRQ_HANDLED;
 }
 
@@ -930,9 +1030,11 @@ static void bcm_sysport_poll_controller(struct net_device *dev)
        bcm_sysport_rx_isr(priv->irq0, priv);
        enable_irq(priv->irq0);
 
-       disable_irq(priv->irq1);
-       bcm_sysport_tx_isr(priv->irq1, priv);
-       enable_irq(priv->irq1);
+       if (!priv->is_lite) {
+               disable_irq(priv->irq1);
+               bcm_sysport_tx_isr(priv->irq1, priv);
+               enable_irq(priv->irq1);
+       }
 }
 #endif
 
@@ -1129,6 +1231,9 @@ static void bcm_sysport_adj_link(struct net_device *dev)
                priv->old_duplex = phydev->duplex;
        }
 
+       if (priv->is_lite)
+               goto out;
+
        switch (phydev->speed) {
        case SPEED_2500:
                cmd_bits = CMD_SPEED_2500;
@@ -1169,8 +1274,9 @@ static void bcm_sysport_adj_link(struct net_device *dev)
                reg |= cmd_bits;
                umac_writel(priv, reg, UMAC_CMD);
        }
-
-       phy_print_status(phydev);
+out:
+       if (changed)
+               phy_print_status(phydev);
 }
 
 static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
@@ -1315,9 +1421,9 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
 
        reg = tdma_readl(priv, TDMA_CONTROL);
        if (enable)
-               reg |= TDMA_EN;
+               reg |= tdma_control_bit(priv, TDMA_EN);
        else
-               reg &= ~TDMA_EN;
+               reg &= ~tdma_control_bit(priv, TDMA_EN);
        tdma_writel(priv, reg, TDMA_CONTROL);
 
        /* Poll for TMDA disabling completion */
@@ -1342,7 +1448,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
        int i;
 
        /* Initialize SW view of the RX ring */
-       priv->num_rx_bds = NUM_RX_DESC;
+       priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
        priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
        priv->rx_c_index = 0;
        priv->rx_read_ptr = 0;
@@ -1379,7 +1485,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
        rdma_writel(priv, 0, RDMA_START_ADDR_HI);
        rdma_writel(priv, 0, RDMA_START_ADDR_LO);
        rdma_writel(priv, 0, RDMA_END_ADDR_HI);
-       rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
+       rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
 
        rdma_writel(priv, 1, RDMA_MBDONE_INTR);
 
@@ -1421,6 +1527,9 @@ static void bcm_sysport_set_rx_mode(struct net_device *dev)
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        u32 reg;
 
+       if (priv->is_lite)
+               return;
+
        reg = umac_readl(priv, UMAC_CMD);
        if (dev->flags & IFF_PROMISC)
                reg |= CMD_PROMISC;
@@ -1438,12 +1547,21 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv,
 {
        u32 reg;
 
-       reg = umac_readl(priv, UMAC_CMD);
-       if (enable)
-               reg |= mask;
-       else
-               reg &= ~mask;
-       umac_writel(priv, reg, UMAC_CMD);
+       if (!priv->is_lite) {
+               reg = umac_readl(priv, UMAC_CMD);
+               if (enable)
+                       reg |= mask;
+               else
+                       reg &= ~mask;
+               umac_writel(priv, reg, UMAC_CMD);
+       } else {
+               reg = gib_readl(priv, GIB_CONTROL);
+               if (enable)
+                       reg |= mask;
+               else
+                       reg &= ~mask;
+               gib_writel(priv, reg, GIB_CONTROL);
+       }
 
        /* UniMAC stops on a packet boundary, wait for a full-sized packet
         * to be processed (1 msec).
@@ -1456,6 +1574,9 @@ static inline void umac_reset(struct bcm_sysport_priv *priv)
 {
        u32 reg;
 
+       if (priv->is_lite)
+               return;
+
        reg = umac_readl(priv, UMAC_CMD);
        reg |= CMD_SW_RESET;
        umac_writel(priv, reg, UMAC_CMD);
@@ -1468,9 +1589,17 @@ static inline void umac_reset(struct bcm_sysport_priv *priv)
 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
                             unsigned char *addr)
 {
-       umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
-                       (addr[2] << 8) | addr[3], UMAC_MAC0);
-       umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+       u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
+                   addr[3];
+       u32 mac1 = (addr[4] << 8) | addr[5];
+
+       if (!priv->is_lite) {
+               umac_writel(priv, mac0, UMAC_MAC0);
+               umac_writel(priv, mac1, UMAC_MAC1);
+       } else {
+               gib_writel(priv, mac0, GIB_MAC0);
+               gib_writel(priv, mac1, GIB_MAC1);
+       }
 }
 
 static void topctrl_flush(struct bcm_sysport_priv *priv)
@@ -1515,8 +1644,11 @@ static void bcm_sysport_netif_start(struct net_device *dev)
 
        phy_start(dev->phydev);
 
-       /* Enable TX interrupts for the 32 TXQs */
-       intrl2_1_mask_clear(priv, 0xffffffff);
+       /* Enable TX interrupts for the TXQs */
+       if (!priv->is_lite)
+               intrl2_1_mask_clear(priv, 0xffffffff);
+       else
+               intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
 
        /* Last call before we start the real business */
        netif_tx_start_all_queues(dev);
@@ -1528,9 +1660,37 @@ static void rbuf_init(struct bcm_sysport_priv *priv)
 
        reg = rbuf_readl(priv, RBUF_CONTROL);
        reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
+       /* Set a correct RSB format on SYSTEMPORT Lite */
+       if (priv->is_lite) {
+               reg &= ~RBUF_RSB_SWAP1;
+               reg |= RBUF_RSB_SWAP0;
+       }
        rbuf_writel(priv, reg, RBUF_CONTROL);
 }
 
+static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
+{
+       intrl2_0_mask_set(priv, 0xffffffff);
+       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+       if (!priv->is_lite) {
+               intrl2_1_mask_set(priv, 0xffffffff);
+               intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+       }
+}
+
+static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
+{
+       u32 __maybe_unused reg;
+
+       /* Include Broadcom tag in pad extension */
+       if (netdev_uses_dsa(priv->netdev)) {
+               reg = gib_readl(priv, GIB_CONTROL);
+               reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
+               reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
+               gib_writel(priv, reg, GIB_CONTROL);
+       }
+}
+
 static int bcm_sysport_open(struct net_device *dev)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -1551,13 +1711,20 @@ static int bcm_sysport_open(struct net_device *dev)
        rbuf_init(priv);
 
        /* Set maximum frame length */
-       umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+       if (!priv->is_lite)
+               umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+       else
+               gib_set_pad_extension(priv);
 
        /* Set MAC address */
        umac_set_hw_addr(priv, dev->dev_addr);
 
        /* Read CRC forward */
-       priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+       if (!priv->is_lite)
+               priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+       else
+               priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
+                                  GIB_FCS_STRIP);
 
        phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
                                0, priv->phy_interface);
@@ -1572,12 +1739,7 @@ static int bcm_sysport_open(struct net_device *dev)
        priv->old_pause = -1;
 
        /* mask all interrupts and request them */
-       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
-       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
-       intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
-       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
-       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
-       intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+       bcm_sysport_mask_all_intrs(priv);
 
        ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
        if (ret) {
@@ -1585,10 +1747,13 @@ static int bcm_sysport_open(struct net_device *dev)
                goto out_phy_disconnect;
        }
 
-       ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
-       if (ret) {
-               netdev_err(dev, "failed to request TX interrupt\n");
-               goto out_free_irq0;
+       if (!priv->is_lite) {
+               ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
+                                 dev->name, dev);
+               if (ret) {
+                       netdev_err(dev, "failed to request TX interrupt\n");
+                       goto out_free_irq0;
+               }
        }
 
        /* Initialize both hardware and software ring */
@@ -1635,7 +1800,8 @@ out_free_rx_ring:
 out_free_tx_ring:
        for (i = 0; i < dev->num_tx_queues; i++)
                bcm_sysport_fini_tx_ring(priv, i);
-       free_irq(priv->irq1, dev);
+       if (!priv->is_lite)
+               free_irq(priv->irq1, dev);
 out_free_irq0:
        free_irq(priv->irq0, dev);
 out_phy_disconnect:
@@ -1653,10 +1819,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
        phy_stop(dev->phydev);
 
        /* mask all interrupts */
-       intrl2_0_mask_set(priv, 0xffffffff);
-       intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
-       intrl2_1_mask_set(priv, 0xffffffff);
-       intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+       bcm_sysport_mask_all_intrs(priv);
 }
 
 static int bcm_sysport_stop(struct net_device *dev)
@@ -1694,7 +1857,8 @@ static int bcm_sysport_stop(struct net_device *dev)
        bcm_sysport_fini_rx_ring(priv);
 
        free_irq(priv->irq0, dev);
-       free_irq(priv->irq1, dev);
+       if (!priv->is_lite)
+               free_irq(priv->irq1, dev);
 
        /* Disconnect from PHY */
        phy_disconnect(dev->phydev);
@@ -1733,8 +1897,32 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
 
 #define REV_FMT        "v%2x.%02x"
 
+static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
+       [SYSTEMPORT] = {
+               .is_lite = false,
+               .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
+       },
+       [SYSTEMPORT_LITE] = {
+               .is_lite = true,
+               .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
+       },
+};
+
+static const struct of_device_id bcm_sysport_of_match[] = {
+       { .compatible = "brcm,systemportlite-v1.00",
+         .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
+       { .compatible = "brcm,systemport-v1.00",
+         .data = &bcm_sysport_params[SYSTEMPORT] },
+       { .compatible = "brcm,systemport",
+         .data = &bcm_sysport_params[SYSTEMPORT] },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
+
 static int bcm_sysport_probe(struct platform_device *pdev)
 {
+       const struct bcm_sysport_hw_params *params;
+       const struct of_device_id *of_id = NULL;
        struct bcm_sysport_priv *priv;
        struct device_node *dn;
        struct net_device *dev;
@@ -1745,6 +1933,12 @@ static int bcm_sysport_probe(struct platform_device *pdev)
 
        dn = pdev->dev.of_node;
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       of_id = of_match_node(bcm_sysport_of_match, dn);
+       if (!of_id || !of_id->data)
+               return -EINVAL;
+
+       /* Fairly quickly we need to know the type of adapter we have */
+       params = of_id->data;
 
        /* Read the Transmit/Receive Queue properties */
        if (of_property_read_u32(dn, "systemport,num-txq", &txq))
@@ -1752,6 +1946,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
                rxq = 1;
 
+       /* Sanity check the number of transmit queues */
+       if (!txq || txq > TDMA_NUM_RINGS)
+               return -EINVAL;
+
        dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
        if (!dev)
                return -ENOMEM;
@@ -1759,10 +1957,21 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        /* Initialize private members */
        priv = netdev_priv(dev);
 
+       /* Allocate number of TX rings */
+       priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
+                                     sizeof(struct bcm_sysport_tx_ring),
+                                     GFP_KERNEL);
+       if (!priv->tx_rings)
+               return -ENOMEM;
+
+       priv->is_lite = params->is_lite;
+       priv->num_rx_desc_words = params->num_rx_desc_words;
+
        priv->irq0 = platform_get_irq(pdev, 0);
-       priv->irq1 = platform_get_irq(pdev, 1);
+       if (!priv->is_lite)
+               priv->irq1 = platform_get_irq(pdev, 1);
        priv->wol_irq = platform_get_irq(pdev, 2);
-       if (priv->irq0 <= 0 || priv->irq1 <= 0) {
+       if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
                dev_err(&pdev->dev, "invalid interrupts\n");
                ret = -EINVAL;
                goto err_free_netdev;
@@ -1836,8 +2045,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
 
        priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
        dev_info(&pdev->dev,
-                "Broadcom SYSTEMPORT" REV_FMT
+                "Broadcom SYSTEMPORT%s" REV_FMT
                 " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+                priv->is_lite ? " Lite" : "",
                 (priv->rev >> 8) & 0xff, priv->rev & 0xff,
                 priv->base, priv->irq0, priv->irq1, txq, rxq);
 
@@ -2033,7 +2243,10 @@ static int bcm_sysport_resume(struct device *d)
        rbuf_init(priv);
 
        /* Set maximum frame length */
-       umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+       if (!priv->is_lite)
+               umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+       else
+               gib_set_pad_extension(priv);
 
        /* Set MAC address */
        umac_set_hw_addr(priv, dev->dev_addr);
@@ -2069,13 +2282,6 @@ out_free_tx_rings:
 static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
                bcm_sysport_suspend, bcm_sysport_resume);
 
-static const struct of_device_id bcm_sysport_of_match[] = {
-       { .compatible = "brcm,systemport-v1.00" },
-       { .compatible = "brcm,systemport" },
-       { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
-
 static struct platform_driver bcm_sysport_driver = {
        .probe  = bcm_sysport_probe,
        .remove = bcm_sysport_remove,
index 1c82e3da69a7bf6134389d982e15bc17697327a6..863ddd7870b77d2ce963685098fff71211f395a8 100644 (file)
@@ -127,6 +127,10 @@ struct bcm_rsb {
 #define INTRL2_0_DESC_ALLOC_ERR                (1 << 10)
 #define INTRL2_0_UNEXP_PKTSIZE_ACK     (1 << 11)
 
+/* SYSTEMPORT Lite groups the TX queues interrupts on instance 0 */
+#define INTRL2_0_TDMA_MBDONE_SHIFT     12
+#define INTRL2_0_TDMA_MBDONE_MASK      (0xffff << INTRL2_0_TDMA_MBDONE_SHIFT)
+
 /* RXCHK offset and defines */
 #define SYS_PORT_RXCHK_OFFSET          0x300
 
@@ -176,7 +180,9 @@ struct bcm_rsb {
 #define  RBUF_OK_TO_SEND_MASK          0xff
 #define  RBUF_CRC_REPLACE              (1 << 20)
 #define  RBUF_OK_TO_SEND_MODE          (1 << 21)
-#define  RBUF_RSB_SWAP                 (1 << 22)
+/* SYSTEMPORT Lite uses two bits here */
+#define  RBUF_RSB_SWAP0                        (1 << 22)
+#define  RBUF_RSB_SWAP1                        (1 << 23)
 #define  RBUF_ACPI_EN                  (1 << 23)
 
 #define RBUF_PKT_RDY_THRESH            0x04
@@ -247,6 +253,7 @@ struct bcm_rsb {
 #define  MIB_RUNT_CNT_RST              (1 << 1)
 #define  MIB_TX_CNT_RST                        (1 << 2)
 
+/* These offsets are valid for SYSTEMPORT and SYSTEMPORT Lite */
 #define UMAC_MPD_CTRL                  0x620
 #define  MPD_EN                                (1 << 0)
 #define  MSEQ_LEN_SHIFT                        16
@@ -258,6 +265,34 @@ struct bcm_rsb {
 #define UMAC_MDF_CTRL                  0x650
 #define UMAC_MDF_ADDR                  0x654
 
+/* Only valid on SYSTEMPORT Lite */
+#define SYS_PORT_GIB_OFFSET            0x1000
+
+#define GIB_CONTROL                    0x00
+#define  GIB_TX_EN                     (1 << 0)
+#define  GIB_RX_EN                     (1 << 1)
+#define  GIB_TX_FLUSH                  (1 << 2)
+#define  GIB_RX_FLUSH                  (1 << 3)
+#define  GIB_GTX_CLK_SEL_SHIFT         4
+#define  GIB_GTX_CLK_EXT_CLK           (0 << GIB_GTX_CLK_SEL_SHIFT)
+#define  GIB_GTX_CLK_125MHZ            (1 << GIB_GTX_CLK_SEL_SHIFT)
+#define  GIB_GTX_CLK_250MHZ            (2 << GIB_GTX_CLK_SEL_SHIFT)
+#define  GIB_FCS_STRIP                 (1 << 6)
+#define  GIB_LCL_LOOP_EN               (1 << 7)
+#define  GIB_LCL_LOOP_TXEN             (1 << 8)
+#define  GIB_RMT_LOOP_EN               (1 << 9)
+#define  GIB_RMT_LOOP_RXEN             (1 << 10)
+#define  GIB_RX_PAUSE_EN               (1 << 11)
+#define  GIB_PREAMBLE_LEN_SHIFT                12
+#define  GIB_PREAMBLE_LEN_MASK         0xf
+#define  GIB_IPG_LEN_SHIFT             16
+#define  GIB_IPG_LEN_MASK              0x3f
+#define  GIB_PAD_EXTENSION_SHIFT       22
+#define  GIB_PAD_EXTENSION_MASK                0x3f
+
+#define GIB_MAC1                       0x08
+#define GIB_MAC0                       0x0c
+
 /* Receive DMA offset and defines */
 #define SYS_PORT_RDMA_OFFSET           0x2000
 
@@ -409,16 +444,19 @@ struct bcm_rsb {
                                        RING_PCP_DEI_VID)
 
 #define TDMA_CONTROL                   0x600
-#define  TDMA_EN                       (1 << 0)
-#define  TSB_EN                                (1 << 1)
-#define  TSB_SWAP                      (1 << 2)
-#define  ACB_ALGO                      (1 << 3)
+#define  TDMA_EN                       0
+#define  TSB_EN                                1
+/* Uses 2 bits on SYSTEMPORT Lite and shifts everything by 1 bit, we
+ * keep the SYSTEMPORT layout here and adjust with tdma_control_bit()
+ */
+#define  TSB_SWAP                      2
+#define  ACB_ALGO                      3
 #define  BUF_DATA_OFFSET_SHIFT         4
 #define  BUF_DATA_OFFSET_MASK          0x3ff
-#define  VLAN_EN                       (1 << 14)
-#define  SW_BRCM_TAG                   (1 << 15)
-#define  WNC_KPT_SIZE_UPDATE           (1 << 16)
-#define  SYNC_PKT_SIZE                 (1 << 17)
+#define  VLAN_EN                       14
+#define  SW_BRCM_TAG                   15
+#define  WNC_KPT_SIZE_UPDATE           16
+#define  SYNC_PKT_SIZE                 17
 #define  ACH_TXDONE_DELAY_SHIFT                18
 #define  ACH_TXDONE_DELAY_MASK         0xff
 
@@ -475,12 +513,12 @@ struct dma_desc {
 };
 
 /* Number of Receive hardware descriptor words */
-#define NUM_HW_RX_DESC_WORDS           1024
-/* Real number of usable descriptors */
-#define NUM_RX_DESC                    (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
+#define SP_NUM_HW_RX_DESC_WORDS                1024
+#define SP_LT_NUM_HW_RX_DESC_WORDS     256
 
-/* Internal linked-list RAM has up to 1536 entries */
-#define NUM_TX_DESC                    1536
+/* Internal linked-list RAM size */
+#define SP_NUM_TX_DESC                 1536
+#define SP_LT_NUM_TX_DESC              256
 
 #define WORDS_PER_DESC                 (sizeof(struct dma_desc) / sizeof(u32))
 
@@ -627,6 +665,16 @@ struct bcm_sysport_cb {
        DEFINE_DMA_UNMAP_LEN(dma_len);
 };
 
+enum bcm_sysport_type {
+       SYSTEMPORT = 0,
+       SYSTEMPORT_LITE,
+};
+
+struct bcm_sysport_hw_params {
+       bool            is_lite;
+       unsigned int    num_rx_desc_words;
+};
+
 /* Software view of the TX ring */
 struct bcm_sysport_tx_ring {
        spinlock_t      lock;           /* Ring lock for tx reclaim/xmit */
@@ -651,6 +699,8 @@ struct bcm_sysport_priv {
        u32                     irq0_mask;
        u32                     irq1_stat;
        u32                     irq1_mask;
+       bool                    is_lite;
+       unsigned int            num_rx_desc_words;
        struct napi_struct      napi ____cacheline_aligned;
        struct net_device       *netdev;
        struct platform_device  *pdev;
@@ -659,7 +709,7 @@ struct bcm_sysport_priv {
        int                     wol_irq;
 
        /* Transmit rings */
-       struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
+       struct bcm_sysport_tx_ring *tx_rings;
 
        /* Receive queue */
        void __iomem            *rx_bds;
index 7c19c8e2bf91f6c26601d08ba1e1f51ea7c4d11b..6ce80cbcb48e30c17b7e9c5ecf3f79e8a247132e 100644 (file)
 #include <linux/brcmphy.h>
 #include "bgmac.h"
 
-struct bcma_mdio {
-       struct bcma_device *core;
-       u8 phyaddr;
-};
-
 static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask,
                                 u32 value, int timeout)
 {
@@ -37,7 +32,7 @@ static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask,
  * PHY ops
  **************************************************/
 
-static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg)
+static u16 bcma_mdio_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
 {
        struct bcma_device *core;
        u16 phy_access_addr;
@@ -56,12 +51,12 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg)
        BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
        BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
 
-       if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
-               core = bcma_mdio->core->bus->drv_gmac_cmn.core;
+       if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+               core = bgmac->bcma.core->bus->drv_gmac_cmn.core;
                phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
                phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
        } else {
-               core = bcma_mdio->core;
+               core = bgmac->bcma.core;
                phy_access_addr = BGMAC_PHY_ACCESS;
                phy_ctl_addr = BGMAC_PHY_CNTL;
        }
@@ -87,7 +82,7 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg)
 }
 
 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
-static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
+static int bcma_mdio_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg,
                               u16 value)
 {
        struct bcma_device *core;
@@ -95,12 +90,12 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
        u16 phy_ctl_addr;
        u32 tmp;
 
-       if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
-               core = bcma_mdio->core->bus->drv_gmac_cmn.core;
+       if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+               core = bgmac->bcma.core->bus->drv_gmac_cmn.core;
                phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
                phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
        } else {
-               core = bcma_mdio->core;
+               core = bgmac->bcma.core;
                phy_access_addr = BGMAC_PHY_ACCESS;
                phy_ctl_addr = BGMAC_PHY_CNTL;
        }
@@ -110,8 +105,8 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
        tmp |= phyaddr;
        bcma_write32(core, phy_ctl_addr, tmp);
 
-       bcma_write32(bcma_mdio->core, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
-       if (bcma_read32(bcma_mdio->core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
+       bcma_write32(bgmac->bcma.core, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
+       if (bcma_read32(bgmac->bcma.core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
                dev_warn(&core->dev, "Error setting MDIO int\n");
 
        tmp = BGMAC_PA_START;
@@ -132,57 +127,67 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
 }
 
 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
-static void bcma_mdio_phy_init(struct bcma_mdio *bcma_mdio)
+static void bcma_mdio_phy_init(struct bgmac *bgmac)
 {
-       struct bcma_chipinfo *ci = &bcma_mdio->core->bus->chipinfo;
+       struct bcma_chipinfo *ci = &bgmac->bcma.core->bus->chipinfo;
        u8 i;
 
+       /* For some legacy hardware we do chipset-based PHY initialization here
+        * without even detecting PHY ID. It's hacky and should be cleaned as
+        * soon as someone can test it.
+        */
        if (ci->id == BCMA_CHIP_ID_BCM5356) {
                for (i = 0; i < 5; i++) {
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x008b);
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x15, 0x0100);
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x12, 0x2aaa);
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
+                       bcma_mdio_phy_write(bgmac, i, 0x1f, 0x008b);
+                       bcma_mdio_phy_write(bgmac, i, 0x15, 0x0100);
+                       bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f);
+                       bcma_mdio_phy_write(bgmac, i, 0x12, 0x2aaa);
+                       bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b);
                }
+               return;
        }
        if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
            (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
            (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
-               struct bcma_drv_cc *cc = &bcma_mdio->core->bus->drv_cc;
+               struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc;
 
                bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
                bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
                for (i = 0; i < 5; i++) {
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5284);
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x0010);
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5296);
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x1073);
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9073);
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x52b6);
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9273);
-                       bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
+                       bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f);
+                       bcma_mdio_phy_write(bgmac, i, 0x16, 0x5284);
+                       bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b);
+                       bcma_mdio_phy_write(bgmac, i, 0x17, 0x0010);
+                       bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f);
+                       bcma_mdio_phy_write(bgmac, i, 0x16, 0x5296);
+                       bcma_mdio_phy_write(bgmac, i, 0x17, 0x1073);
+                       bcma_mdio_phy_write(bgmac, i, 0x17, 0x9073);
+                       bcma_mdio_phy_write(bgmac, i, 0x16, 0x52b6);
+                       bcma_mdio_phy_write(bgmac, i, 0x17, 0x9273);
+                       bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b);
                }
+               return;
        }
+
+       /* For all other hw do initialization using PHY subsystem. */
+       if (bgmac->net_dev && bgmac->net_dev->phydev)
+               phy_init_hw(bgmac->net_dev->phydev);
 }
 
 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
 static int bcma_mdio_phy_reset(struct mii_bus *bus)
 {
-       struct bcma_mdio *bcma_mdio = bus->priv;
-       u8 phyaddr = bcma_mdio->phyaddr;
+       struct bgmac *bgmac = bus->priv;
+       u8 phyaddr = bgmac->phyaddr;
 
-       if (bcma_mdio->phyaddr == BGMAC_PHY_NOREGS)
+       if (phyaddr == BGMAC_PHY_NOREGS)
                return 0;
 
-       bcma_mdio_phy_write(bcma_mdio, phyaddr, MII_BMCR, BMCR_RESET);
+       bcma_mdio_phy_write(bgmac, phyaddr, MII_BMCR, BMCR_RESET);
        udelay(100);
-       if (bcma_mdio_phy_read(bcma_mdio, phyaddr, MII_BMCR) & BMCR_RESET)
-               dev_err(&bcma_mdio->core->dev, "PHY reset failed\n");
-       bcma_mdio_phy_init(bcma_mdio);
+       if (bcma_mdio_phy_read(bgmac, phyaddr, MII_BMCR) & BMCR_RESET)
+               dev_err(bgmac->dev, "PHY reset failed\n");
+       bcma_mdio_phy_init(bgmac);
 
        return 0;
 }
@@ -202,16 +207,12 @@ static int bcma_mdio_mii_write(struct mii_bus *bus, int mii_id, int regnum,
        return bcma_mdio_phy_write(bus->priv, mii_id, regnum, value);
 }
 
-struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr)
+struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
 {
-       struct bcma_mdio *bcma_mdio;
+       struct bcma_device *core = bgmac->bcma.core;
        struct mii_bus *mii_bus;
        int err;
 
-       bcma_mdio = kzalloc(sizeof(*bcma_mdio), GFP_KERNEL);
-       if (!bcma_mdio)
-               return ERR_PTR(-ENOMEM);
-
        mii_bus = mdiobus_alloc();
        if (!mii_bus) {
                err = -ENOMEM;
@@ -221,15 +222,12 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr)
        mii_bus->name = "bcma_mdio mii bus";
        sprintf(mii_bus->id, "%s-%d-%d", "bcma_mdio", core->bus->num,
                core->core_unit);
-       mii_bus->priv = bcma_mdio;
+       mii_bus->priv = bgmac;
        mii_bus->read = bcma_mdio_mii_read;
        mii_bus->write = bcma_mdio_mii_write;
        mii_bus->reset = bcma_mdio_phy_reset;
        mii_bus->parent = &core->dev;
-       mii_bus->phy_mask = ~(1 << phyaddr);
-
-       bcma_mdio->core = core;
-       bcma_mdio->phyaddr = phyaddr;
+       mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
 
        err = mdiobus_register(mii_bus);
        if (err) {
@@ -242,23 +240,17 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr)
 err_free_bus:
        mdiobus_free(mii_bus);
 err:
-       kfree(bcma_mdio);
        return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(bcma_mdio_mii_register);
 
 void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
 {
-       struct bcma_mdio *bcma_mdio;
-
        if (!mii_bus)
                return;
 
-       bcma_mdio = mii_bus->priv;
-
        mdiobus_unregister(mii_bus);
        mdiobus_free(mii_bus);
-       kfree(bcma_mdio);
 }
 EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
 
index 4a4ffc0c4c65d6d1777d8a24ffc5b0e0cdc63aeb..d59cfcc4c4d596d48957ecd06a77cf8a475090b7 100644 (file)
@@ -117,12 +117,11 @@ static int bgmac_probe(struct bcma_device *core)
        u8 *mac;
        int err;
 
-       bgmac = kzalloc(sizeof(*bgmac), GFP_KERNEL);
+       bgmac = bgmac_alloc(&core->dev);
        if (!bgmac)
                return -ENOMEM;
 
        bgmac->bcma.core = core;
-       bgmac->dev = &core->dev;
        bgmac->dma_dev = core->dma_dev;
        bgmac->irq = core->irq;
 
@@ -145,7 +144,7 @@ static int bgmac_probe(struct bcma_device *core)
                goto err;
        }
 
-       ether_addr_copy(bgmac->mac_addr, mac);
+       ether_addr_copy(bgmac->net_dev->dev_addr, mac);
 
        /* On BCM4706 we need common core to access PHY */
        if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
@@ -178,7 +177,7 @@ static int bgmac_probe(struct bcma_device *core)
 
        if (!bgmac_is_bcm4707_family(core) &&
            !(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) {
-               mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr);
+               mii_bus = bcma_mdio_mii_register(bgmac);
                if (IS_ERR(mii_bus)) {
                        err = PTR_ERR(mii_bus);
                        goto err;
@@ -307,7 +306,6 @@ static int bgmac_probe(struct bcma_device *core)
 err1:
        bcma_mdio_mii_unregister(bgmac->mii_bus);
 err:
-       kfree(bgmac);
        bcma_set_drvdata(core, NULL);
 
        return err;
index 6f736c19872fe304f093f47951ecfe5dd5d2e727..7b1af950f312f30b2b6caae941a2a39e47caa3fd 100644 (file)
@@ -151,7 +151,7 @@ static int bgmac_probe(struct platform_device *pdev)
        struct resource *regs;
        const u8 *mac_addr;
 
-       bgmac = devm_kzalloc(&pdev->dev, sizeof(*bgmac), GFP_KERNEL);
+       bgmac = bgmac_alloc(&pdev->dev);
        if (!bgmac)
                return -ENOMEM;
 
@@ -169,7 +169,7 @@ static int bgmac_probe(struct platform_device *pdev)
 
        mac_addr = of_get_mac_address(np);
        if (mac_addr)
-               ether_addr_copy(bgmac->mac_addr, mac_addr);
+               ether_addr_copy(bgmac->net_dev->dev_addr, mac_addr);
        else
                dev_warn(&pdev->dev, "MAC address not present in device tree\n");
 
index 0e066dc6b8cc32436f0a5bbcab0505d7af376c1a..415046750bb449853e794318c700d14f1a1de48d 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/bcma/bcma.h>
 #include <linux/etherdevice.h>
 #include <linux/bcm47xx_nvram.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
 #include "bgmac.h"
 
 static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask,
@@ -1148,7 +1150,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight)
                return weight;
 
        if (handled < weight) {
-               napi_complete(napi);
+               napi_complete_done(napi, handled);
                bgmac_chip_intrs_on(bgmac);
        }
 
@@ -1446,33 +1448,42 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
 }
 EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct);
 
-int bgmac_enet_probe(struct bgmac *info)
+struct bgmac *bgmac_alloc(struct device *dev)
 {
        struct net_device *net_dev;
        struct bgmac *bgmac;
-       int err;
 
        /* Allocation and references */
-       net_dev = alloc_etherdev(sizeof(*bgmac));
+       net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac));
        if (!net_dev)
-               return -ENOMEM;
+               return NULL;
 
        net_dev->netdev_ops = &bgmac_netdev_ops;
        net_dev->ethtool_ops = &bgmac_ethtool_ops;
+
        bgmac = netdev_priv(net_dev);
-       memcpy(bgmac, info, sizeof(*bgmac));
+       bgmac->dev = dev;
        bgmac->net_dev = net_dev;
+
+       return bgmac;
+}
+EXPORT_SYMBOL_GPL(bgmac_alloc);
+
+int bgmac_enet_probe(struct bgmac *bgmac)
+{
+       struct net_device *net_dev = bgmac->net_dev;
+       int err;
+
        net_dev->irq = bgmac->irq;
        SET_NETDEV_DEV(net_dev, bgmac->dev);
 
-       if (!is_valid_ether_addr(bgmac->mac_addr)) {
+       if (!is_valid_ether_addr(net_dev->dev_addr)) {
                dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
-                       bgmac->mac_addr);
-               eth_random_addr(bgmac->mac_addr);
+                       net_dev->dev_addr);
+               eth_hw_addr_random(net_dev);
                dev_warn(bgmac->dev, "Using random MAC: %pM\n",
-                        bgmac->mac_addr);
+                        net_dev->dev_addr);
        }
-       ether_addr_copy(net_dev->dev_addr, bgmac->mac_addr);
 
        /* This (reset &) enable is not preset in specs or reference driver but
         * Broadcom does it in arch PCI code when enabling fake PCI device.
@@ -1488,7 +1499,7 @@ int bgmac_enet_probe(struct bgmac *info)
        err = bgmac_dma_alloc(bgmac);
        if (err) {
                dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
-               goto err_netdev_free;
+               goto err_out;
        }
 
        bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
@@ -1521,8 +1532,7 @@ err_phy_disconnect:
        phy_disconnect(net_dev->phydev);
 err_dma_free:
        bgmac_dma_free(bgmac);
-err_netdev_free:
-       free_netdev(net_dev);
+err_out:
 
        return err;
 }
index 71f493f2451f7d333492965d1bc9da94a2e55695..248727dc62f22c2ae2aa6f640f1b2cf91230a133 100644 (file)
@@ -474,7 +474,6 @@ struct bgmac {
 
        struct device *dev;
        struct device *dma_dev;
-       unsigned char mac_addr[ETH_ALEN];
        u32 feature_flags;
 
        struct net_device *net_dev;
@@ -517,12 +516,13 @@ struct bgmac {
        int (*phy_connect)(struct bgmac *bgmac);
 };
 
-int bgmac_enet_probe(struct bgmac *info);
+struct bgmac *bgmac_alloc(struct device *dev);
+int bgmac_enet_probe(struct bgmac *bgmac);
 void bgmac_enet_remove(struct bgmac *bgmac);
 void bgmac_adjust_link(struct net_device *net_dev);
 int bgmac_phy_connect_direct(struct bgmac *bgmac);
 
-struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr);
+struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac);
 void bcma_mdio_mii_unregister(struct mii_bus *mii_bus);
 
 static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset)
index d5d1026be4b70a320c48af7545a1891c74e19a47..e3af1f3cb61f3f75de64866a4ee9e00fc1e566b1 100644 (file)
@@ -3515,7 +3515,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget)
                rmb();
                if (likely(!bnx2_has_fast_work(bnapi))) {
 
-                       napi_complete(napi);
+                       napi_complete_done(napi, work_done);
                        BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
                                bnapi->last_status_idx);
@@ -3552,7 +3552,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
 
                rmb();
                if (likely(!bnx2_has_work(bnapi))) {
-                       napi_complete(napi);
+                       napi_complete_done(napi, work_done);
                        if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
                                BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
@@ -6821,13 +6821,13 @@ bnx2_save_stats(struct bnx2 *bp)
        (unsigned long) (bp->stats_blk->ctr +                   \
                         bp->temp_stats_blk->ctr)
 
-static struct rtnl_link_stats64 *
+static void
 bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
 {
        struct bnx2 *bp = netdev_priv(dev);
 
        if (bp->stats_blk == NULL)
-               return net_stats;
+               return;
 
        net_stats->rx_packets =
                GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
@@ -6891,7 +6891,6 @@ bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
                GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
                GET_32BIT_NET_STATS(stat_FwRxDrop);
 
-       return net_stats;
 }
 
 /* All ethtool functions called with rtnl_lock */
index 3e199d3e461ef5dd237e3176bcb666f878ac9c3a..9e8c06130c092d3f061089448797c1da74e15043 100644 (file)
@@ -549,14 +549,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        struct bnx2x_alloc_pool *pool = &fp->page_pool;
        dma_addr_t mapping;
 
-       if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
-
-               /* put page reference used by the memory pool, since we
-                * won't be using this page as the mempool anymore.
-                */
-               if (pool->page)
-                       put_page(pool->page);
-
+       if (!pool->page) {
                pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
                if (unlikely(!pool->page))
                        return -ENOMEM;
@@ -571,7 +564,6 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                return -ENOMEM;
        }
 
-       get_page(pool->page);
        sw_buf->page = pool->page;
        sw_buf->offset = pool->offset;
 
@@ -581,7 +573,10 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        sge->addr_lo = cpu_to_le32(U64_LO(mapping));
 
        pool->offset += SGE_PAGE_SIZE;
-
+       if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
+               get_page(pool->page);
+       else
+               pool->page = NULL;
        return 0;
 }
 
@@ -3229,7 +3224,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
                 * has been updated when NAPI was scheduled.
                 */
                if (IS_FCOE_FP(fp)) {
-                       napi_complete(napi);
+                       napi_complete_done(napi, rx_work_done);
                } else {
                        bnx2x_update_fpsb_idx(fp);
                        /* bnx2x_has_rx_work() reads the status block,
index 5f19427c7b278b99ee34a20b4d854e1d1293578b..43423744fdfa8151b2312bc84b41267965d6c92c 100644 (file)
@@ -216,165 +216,184 @@ static int bnx2x_get_port_type(struct bnx2x *bp)
        return port_type;
 }
 
-static int bnx2x_get_vf_settings(struct net_device *dev,
-                                struct ethtool_cmd *cmd)
+static int bnx2x_get_vf_link_ksettings(struct net_device *dev,
+                                      struct ethtool_link_ksettings *cmd)
 {
        struct bnx2x *bp = netdev_priv(dev);
+       u32 supported, advertising;
+
+       ethtool_convert_link_mode_to_legacy_u32(&supported,
+                                               cmd->link_modes.supported);
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
 
        if (bp->state == BNX2X_STATE_OPEN) {
                if (test_bit(BNX2X_LINK_REPORT_FD,
                             &bp->vf_link_vars.link_report_flags))
-                       cmd->duplex = DUPLEX_FULL;
+                       cmd->base.duplex = DUPLEX_FULL;
                else
-                       cmd->duplex = DUPLEX_HALF;
+                       cmd->base.duplex = DUPLEX_HALF;
 
-               ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed);
+               cmd->base.speed = bp->vf_link_vars.line_speed;
        } else {
-               cmd->duplex = DUPLEX_UNKNOWN;
-               ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+               cmd->base.duplex = DUPLEX_UNKNOWN;
+               cmd->base.speed = SPEED_UNKNOWN;
        }
 
-       cmd->port               = PORT_OTHER;
-       cmd->phy_address        = 0;
-       cmd->transceiver        = XCVR_INTERNAL;
-       cmd->autoneg            = AUTONEG_DISABLE;
-       cmd->maxtxpkt           = 0;
-       cmd->maxrxpkt           = 0;
+       cmd->base.port          = PORT_OTHER;
+       cmd->base.phy_address   = 0;
+       cmd->base.autoneg       = AUTONEG_DISABLE;
 
        DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
           "  supported 0x%x  advertising 0x%x  speed %u\n"
-          "  duplex %d  port %d  phy_address %d  transceiver %d\n"
-          "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
-          cmd->cmd, cmd->supported, cmd->advertising,
-          ethtool_cmd_speed(cmd),
-          cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
-          cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+          "  duplex %d  port %d  phy_address %d\n"
+          "  autoneg %d\n",
+          cmd->base.cmd, supported, advertising,
+          cmd->base.speed,
+          cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
+          cmd->base.autoneg);
 
        return 0;
 }
 
-static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int bnx2x_get_link_ksettings(struct net_device *dev,
+                                   struct ethtool_link_ksettings *cmd)
 {
        struct bnx2x *bp = netdev_priv(dev);
        int cfg_idx = bnx2x_get_link_cfg_idx(bp);
        u32 media_type;
+       u32 supported, advertising, lp_advertising;
+
+       ethtool_convert_link_mode_to_legacy_u32(&lp_advertising,
+                                               cmd->link_modes.lp_advertising);
 
        /* Dual Media boards present all available port types */
-       cmd->supported = bp->port.supported[cfg_idx] |
+       supported = bp->port.supported[cfg_idx] |
                (bp->port.supported[cfg_idx ^ 1] &
                 (SUPPORTED_TP | SUPPORTED_FIBRE));
-       cmd->advertising = bp->port.advertising[cfg_idx];
+       advertising = bp->port.advertising[cfg_idx];
        media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type;
        if (media_type == ETH_PHY_SFP_1G_FIBER) {
-               cmd->supported &= ~(SUPPORTED_10000baseT_Full);
-               cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
+               supported &= ~(SUPPORTED_10000baseT_Full);
+               advertising &= ~(ADVERTISED_10000baseT_Full);
        }
 
        if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
            !(bp->flags & MF_FUNC_DIS)) {
-               cmd->duplex = bp->link_vars.duplex;
+               cmd->base.duplex = bp->link_vars.duplex;
 
                if (IS_MF(bp) && !BP_NOMCP(bp))
-                       ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
+                       cmd->base.speed = bnx2x_get_mf_speed(bp);
                else
-                       ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
+                       cmd->base.speed = bp->link_vars.line_speed;
        } else {
-               cmd->duplex = DUPLEX_UNKNOWN;
-               ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+               cmd->base.duplex = DUPLEX_UNKNOWN;
+               cmd->base.speed = SPEED_UNKNOWN;
        }
 
-       cmd->port = bnx2x_get_port_type(bp);
+       cmd->base.port = bnx2x_get_port_type(bp);
 
-       cmd->phy_address = bp->mdio.prtad;
-       cmd->transceiver = XCVR_INTERNAL;
+       cmd->base.phy_address = bp->mdio.prtad;
 
        if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
-               cmd->autoneg = AUTONEG_ENABLE;
+               cmd->base.autoneg = AUTONEG_ENABLE;
        else
-               cmd->autoneg = AUTONEG_DISABLE;
+               cmd->base.autoneg = AUTONEG_DISABLE;
 
        /* Publish LP advertised speeds and FC */
        if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
                u32 status = bp->link_vars.link_status;
 
-               cmd->lp_advertising |= ADVERTISED_Autoneg;
+               lp_advertising |= ADVERTISED_Autoneg;
                if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE)
-                       cmd->lp_advertising |= ADVERTISED_Pause;
+                       lp_advertising |= ADVERTISED_Pause;
                if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
-                       cmd->lp_advertising |= ADVERTISED_Asym_Pause;
+                       lp_advertising |= ADVERTISED_Asym_Pause;
 
                if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE)
-                       cmd->lp_advertising |= ADVERTISED_10baseT_Half;
+                       lp_advertising |= ADVERTISED_10baseT_Half;
                if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE)
-                       cmd->lp_advertising |= ADVERTISED_10baseT_Full;
+                       lp_advertising |= ADVERTISED_10baseT_Full;
                if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE)
-                       cmd->lp_advertising |= ADVERTISED_100baseT_Half;
+                       lp_advertising |= ADVERTISED_100baseT_Half;
                if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE)
-                       cmd->lp_advertising |= ADVERTISED_100baseT_Full;
+                       lp_advertising |= ADVERTISED_100baseT_Full;
                if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE)
-                       cmd->lp_advertising |= ADVERTISED_1000baseT_Half;
+                       lp_advertising |= ADVERTISED_1000baseT_Half;
                if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) {
                        if (media_type == ETH_PHY_KR) {
-                               cmd->lp_advertising |=
+                               lp_advertising |=
                                        ADVERTISED_1000baseKX_Full;
                        } else {
-                               cmd->lp_advertising |=
+                               lp_advertising |=
                                        ADVERTISED_1000baseT_Full;
                        }
                }
                if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE)
-                       cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
+                       lp_advertising |= ADVERTISED_2500baseX_Full;
                if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) {
                        if (media_type == ETH_PHY_KR) {
-                               cmd->lp_advertising |=
+                               lp_advertising |=
                                        ADVERTISED_10000baseKR_Full;
                        } else {
-                               cmd->lp_advertising |=
+                               lp_advertising |=
                                        ADVERTISED_10000baseT_Full;
                        }
                }
                if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
-                       cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
+                       lp_advertising |= ADVERTISED_20000baseKR2_Full;
        }
 
-       cmd->maxtxpkt = 0;
-       cmd->maxrxpkt = 0;
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+                                               lp_advertising);
 
        DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
           "  supported 0x%x  advertising 0x%x  speed %u\n"
-          "  duplex %d  port %d  phy_address %d  transceiver %d\n"
-          "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
-          cmd->cmd, cmd->supported, cmd->advertising,
-          ethtool_cmd_speed(cmd),
-          cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
-          cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+          "  duplex %d  port %d  phy_address %d\n"
+          "  autoneg %d\n",
+          cmd->base.cmd, supported, advertising,
+          cmd->base.speed,
+          cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
+          cmd->base.autoneg);
 
        return 0;
 }
 
-static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int bnx2x_set_link_ksettings(struct net_device *dev,
+                                   const struct ethtool_link_ksettings *cmd)
 {
        struct bnx2x *bp = netdev_priv(dev);
        u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
        u32 speed, phy_idx;
+       u32 supported;
+       u8 duplex = cmd->base.duplex;
+
+       ethtool_convert_link_mode_to_legacy_u32(&supported,
+                                               cmd->link_modes.supported);
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
 
        if (IS_MF_SD(bp))
                return 0;
 
        DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
           "  supported 0x%x  advertising 0x%x  speed %u\n"
-          "  duplex %d  port %d  phy_address %d  transceiver %d\n"
-          "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
-          cmd->cmd, cmd->supported, cmd->advertising,
-          ethtool_cmd_speed(cmd),
-          cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
-          cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+          "  duplex %d  port %d  phy_address %d\n"
+          "  autoneg %d\n",
+          cmd->base.cmd, supported, advertising,
+          cmd->base.speed,
+          cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
+          cmd->base.autoneg);
 
-       speed = ethtool_cmd_speed(cmd);
+       speed = cmd->base.speed;
 
        /* If received a request for an unknown duplex, assume full*/
-       if (cmd->duplex == DUPLEX_UNKNOWN)
-               cmd->duplex = DUPLEX_FULL;
+       if (duplex == DUPLEX_UNKNOWN)
+               duplex = DUPLEX_FULL;
 
        if (IS_MF_SI(bp)) {
                u32 part;
@@ -410,8 +429,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
        cfg_idx = bnx2x_get_link_cfg_idx(bp);
        old_multi_phy_config = bp->link_params.multi_phy_config;
-       if (cmd->port != bnx2x_get_port_type(bp)) {
-               switch (cmd->port) {
+       if (cmd->base.port != bnx2x_get_port_type(bp)) {
+               switch (cmd->base.port) {
                case PORT_TP:
                        if (!(bp->port.supported[0] & SUPPORTED_TP ||
                              bp->port.supported[1] & SUPPORTED_TP)) {
@@ -461,7 +480,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        bp->link_params.multi_phy_config = old_multi_phy_config;
        DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx);
 
-       if (cmd->autoneg == AUTONEG_ENABLE) {
+       if (cmd->base.autoneg == AUTONEG_ENABLE) {
                u32 an_supported_speed = bp->port.supported[cfg_idx];
                if (bp->link_params.phy[EXT_PHY1].type ==
                    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
@@ -473,51 +492,51 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                }
 
                /* advertise the requested speed and duplex if supported */
-               if (cmd->advertising & ~an_supported_speed) {
+               if (advertising & ~an_supported_speed) {
                        DP(BNX2X_MSG_ETHTOOL,
                           "Advertisement parameters are not supported\n");
                        return -EINVAL;
                }
 
                bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
-               bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+               bp->link_params.req_duplex[cfg_idx] = duplex;
                bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
-                                        cmd->advertising);
-               if (cmd->advertising) {
+                                        advertising);
+               if (advertising) {
 
                        bp->link_params.speed_cap_mask[cfg_idx] = 0;
-                       if (cmd->advertising & ADVERTISED_10baseT_Half) {
+                       if (advertising & ADVERTISED_10baseT_Half) {
                                bp->link_params.speed_cap_mask[cfg_idx] |=
                                PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
                        }
-                       if (cmd->advertising & ADVERTISED_10baseT_Full)
+                       if (advertising & ADVERTISED_10baseT_Full)
                                bp->link_params.speed_cap_mask[cfg_idx] |=
                                PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
 
-                       if (cmd->advertising & ADVERTISED_100baseT_Full)
+                       if (advertising & ADVERTISED_100baseT_Full)
                                bp->link_params.speed_cap_mask[cfg_idx] |=
                                PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
 
-                       if (cmd->advertising & ADVERTISED_100baseT_Half) {
+                       if (advertising & ADVERTISED_100baseT_Half) {
                                bp->link_params.speed_cap_mask[cfg_idx] |=
                                     PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
                        }
-                       if (cmd->advertising & ADVERTISED_1000baseT_Half) {
+                       if (advertising & ADVERTISED_1000baseT_Half) {
                                bp->link_params.speed_cap_mask[cfg_idx] |=
                                        PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
                        }
-                       if (cmd->advertising & (ADVERTISED_1000baseT_Full |
+                       if (advertising & (ADVERTISED_1000baseT_Full |
                                                ADVERTISED_1000baseKX_Full))
                                bp->link_params.speed_cap_mask[cfg_idx] |=
                                        PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
 
-                       if (cmd->advertising & (ADVERTISED_10000baseT_Full |
+                       if (advertising & (ADVERTISED_10000baseT_Full |
                                                ADVERTISED_10000baseKX4_Full |
                                                ADVERTISED_10000baseKR_Full))
                                bp->link_params.speed_cap_mask[cfg_idx] |=
                                        PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
 
-                       if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
+                       if (advertising & ADVERTISED_20000baseKR2_Full)
                                bp->link_params.speed_cap_mask[cfg_idx] |=
                                        PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
                }
@@ -525,7 +544,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                /* advertise the requested speed and duplex if supported */
                switch (speed) {
                case SPEED_10:
-                       if (cmd->duplex == DUPLEX_FULL) {
+                       if (duplex == DUPLEX_FULL) {
                                if (!(bp->port.supported[cfg_idx] &
                                      SUPPORTED_10baseT_Full)) {
                                        DP(BNX2X_MSG_ETHTOOL,
@@ -549,7 +568,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        break;
 
                case SPEED_100:
-                       if (cmd->duplex == DUPLEX_FULL) {
+                       if (duplex == DUPLEX_FULL) {
                                if (!(bp->port.supported[cfg_idx] &
                                                SUPPORTED_100baseT_Full)) {
                                        DP(BNX2X_MSG_ETHTOOL,
@@ -573,7 +592,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        break;
 
                case SPEED_1000:
-                       if (cmd->duplex != DUPLEX_FULL) {
+                       if (duplex != DUPLEX_FULL) {
                                DP(BNX2X_MSG_ETHTOOL,
                                   "1G half not supported\n");
                                return -EINVAL;
@@ -596,7 +615,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        break;
 
                case SPEED_2500:
-                       if (cmd->duplex != DUPLEX_FULL) {
+                       if (duplex != DUPLEX_FULL) {
                                DP(BNX2X_MSG_ETHTOOL,
                                   "2.5G half not supported\n");
                                return -EINVAL;
@@ -614,7 +633,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        break;
 
                case SPEED_10000:
-                       if (cmd->duplex != DUPLEX_FULL) {
+                       if (duplex != DUPLEX_FULL) {
                                DP(BNX2X_MSG_ETHTOOL,
                                   "10G half not supported\n");
                                return -EINVAL;
@@ -644,7 +663,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                }
 
                bp->link_params.req_line_speed[cfg_idx] = speed;
-               bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+               bp->link_params.req_duplex[cfg_idx] = duplex;
                bp->port.advertising[cfg_idx] = advertising;
        }
 
@@ -3605,8 +3624,6 @@ static int bnx2x_get_ts_info(struct net_device *dev,
 }
 
 static const struct ethtool_ops bnx2x_ethtool_ops = {
-       .get_settings           = bnx2x_get_settings,
-       .set_settings           = bnx2x_set_settings,
        .get_drvinfo            = bnx2x_get_drvinfo,
        .get_regs_len           = bnx2x_get_regs_len,
        .get_regs               = bnx2x_get_regs,
@@ -3646,10 +3663,11 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
        .get_eee                = bnx2x_get_eee,
        .set_eee                = bnx2x_set_eee,
        .get_ts_info            = bnx2x_get_ts_info,
+       .get_link_ksettings     = bnx2x_get_link_ksettings,
+       .set_link_ksettings     = bnx2x_set_link_ksettings,
 };
 
 static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
-       .get_settings           = bnx2x_get_vf_settings,
        .get_drvinfo            = bnx2x_get_drvinfo,
        .get_msglevel           = bnx2x_get_msglevel,
        .set_msglevel           = bnx2x_set_msglevel,
@@ -3667,6 +3685,7 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
        .set_rxfh               = bnx2x_set_rxfh,
        .get_channels           = bnx2x_get_channels,
        .set_channels           = bnx2x_set_channels,
+       .get_link_ksettings     = bnx2x_get_vf_link_ksettings,
 };
 
 void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
index 6082ed1b5ea08e7f79b3c84ef24277a688117938..a7ca45b251cb91157c04b6150a4fcf065ace3174 100644 (file)
@@ -1,3 +1,3 @@
 obj-$(CONFIG_BNXT) += bnxt_en.o
 
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o
index 4fcc6a84a087974e5d73042ce1af98268f59bcde..71f9a1894db95652e65a4399af95ea98791221d0 100644 (file)
@@ -1,6 +1,7 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
  * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #include <linux/if.h>
 #include <linux/if_vlan.h>
 #include <linux/rtc.h>
+#include <linux/bpf.h>
 #include <net/ip.h>
 #include <net/tcp.h>
 #include <net/udp.h>
 #include <net/checksum.h>
 #include <net/ip6_checksum.h>
 #include <net/udp_tunnel.h>
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#include <net/busy_poll.h>
-#endif
 #include <linux/workqueue.h>
 #include <linux/prefetch.h>
 #include <linux/cache.h>
@@ -56,6 +55,7 @@
 #include "bnxt_sriov.h"
 #include "bnxt_ethtool.h"
 #include "bnxt_dcb.h"
+#include "bnxt_xdp.h"
 
 #define BNXT_TX_TIMEOUT                (5 * HZ)
 
@@ -99,6 +99,8 @@ enum board_idx {
        BCM57407_NPAR,
        BCM57414_NPAR,
        BCM57416_NPAR,
+       BCM57452,
+       BCM57454,
        NETXTREME_E_VF,
        NETXTREME_C_VF,
 };
@@ -133,6 +135,8 @@ static const struct {
        { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
        { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
        { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
+       { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
+       { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
        { "Broadcom NetXtreme-E Ethernet Virtual Function" },
        { "Broadcom NetXtreme-C Ethernet Virtual Function" },
 };
@@ -168,6 +172,8 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
        { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
        { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
        { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
+       { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
+       { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
 #ifdef CONFIG_BNXT_SRIOV
        { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
        { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
@@ -213,16 +219,7 @@ static bool bnxt_vf_pciid(enum board_idx idx)
 #define BNXT_CP_DB_IRQ_DIS(db)                                         \
                writel(DB_CP_IRQ_DIS_FLAGS, db)
 
-static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
-{
-       /* Tell compiler to fetch tx indices from memory. */
-       barrier();
-
-       return bp->tx_ring_size -
-               ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
-}
-
-static const u16 bnxt_lhint_arr[] = {
+const u16 bnxt_lhint_arr[] = {
        TX_BD_FLAGS_LHINT_512_AND_SMALLER,
        TX_BD_FLAGS_LHINT_512_TO_1023,
        TX_BD_FLAGS_LHINT_1024_TO_2047,
@@ -265,8 +262,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
-       txr = &bp->tx_ring[i];
        txq = netdev_get_tx_queue(dev, i);
+       txr = &bp->tx_ring[bp->tx_ring_map[i]];
        prod = txr->tx_prod;
 
        free_size = bnxt_tx_avail(bp, txr);
@@ -512,8 +509,7 @@ tx_dma_error:
 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
 {
        struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
-       int index = txr - &bp->tx_ring[0];
-       struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
+       struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
        u16 cons = txr->tx_cons;
        struct pci_dev *pdev = bp->pdev;
        int i;
@@ -576,6 +572,25 @@ next_tx_int:
        }
 }
 
+static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
+                                        gfp_t gfp)
+{
+       struct device *dev = &bp->pdev->dev;
+       struct page *page;
+
+       page = alloc_page(gfp);
+       if (!page)
+               return NULL;
+
+       *mapping = dma_map_page(dev, page, 0, PAGE_SIZE, bp->rx_dir);
+       if (dma_mapping_error(dev, *mapping)) {
+               __free_page(page);
+               return NULL;
+       }
+       *mapping += bp->rx_dma_offset;
+       return page;
+}
+
 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
                                       gfp_t gfp)
 {
@@ -586,8 +601,8 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
        if (!data)
                return NULL;
 
-       *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
-                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
+       *mapping = dma_map_single(&pdev->dev, data + bp->rx_dma_offset,
+                                 bp->rx_buf_use_size, bp->rx_dir);
 
        if (dma_mapping_error(&pdev->dev, *mapping)) {
                kfree(data);
@@ -596,29 +611,37 @@ static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
        return data;
 }
 
-static inline int bnxt_alloc_rx_data(struct bnxt *bp,
-                                    struct bnxt_rx_ring_info *rxr,
-                                    u16 prod, gfp_t gfp)
+int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+                      u16 prod, gfp_t gfp)
 {
        struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
        struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
-       u8 *data;
        dma_addr_t mapping;
 
-       data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
-       if (!data)
-               return -ENOMEM;
+       if (BNXT_RX_PAGE_MODE(bp)) {
+               struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
 
-       rx_buf->data = data;
-       dma_unmap_addr_set(rx_buf, mapping, mapping);
+               if (!page)
+                       return -ENOMEM;
 
-       rxbd->rx_bd_haddr = cpu_to_le64(mapping);
+               rx_buf->data = page;
+               rx_buf->data_ptr = page_address(page) + bp->rx_offset;
+       } else {
+               u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
 
+               if (!data)
+                       return -ENOMEM;
+
+               rx_buf->data = data;
+               rx_buf->data_ptr = data + bp->rx_offset;
+       }
+       rx_buf->mapping = mapping;
+
+       rxbd->rx_bd_haddr = cpu_to_le64(mapping);
        return 0;
 }
 
-static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
-                              u8 *data)
+void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
 {
        u16 prod = rxr->rx_prod;
        struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
@@ -628,9 +651,9 @@ static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
        cons_rx_buf = &rxr->rx_buf_ring[cons];
 
        prod_rx_buf->data = data;
+       prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
 
-       dma_unmap_addr_set(prod_rx_buf, mapping,
-                          dma_unmap_addr(cons_rx_buf, mapping));
+       prod_rx_buf->mapping = cons_rx_buf->mapping;
 
        prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
        cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
@@ -756,13 +779,60 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
        rxr->rx_sw_agg_prod = sw_prod;
 }
 
+static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
+                                       struct bnxt_rx_ring_info *rxr,
+                                       u16 cons, void *data, u8 *data_ptr,
+                                       dma_addr_t dma_addr,
+                                       unsigned int offset_and_len)
+{
+       unsigned int payload = offset_and_len >> 16;
+       unsigned int len = offset_and_len & 0xffff;
+       struct skb_frag_struct *frag;
+       struct page *page = data;
+       u16 prod = rxr->rx_prod;
+       struct sk_buff *skb;
+       int off, err;
+
+       err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+       if (unlikely(err)) {
+               bnxt_reuse_rx_data(rxr, cons, data);
+               return NULL;
+       }
+       dma_addr -= bp->rx_dma_offset;
+       dma_unmap_page(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir);
+
+       if (unlikely(!payload))
+               payload = eth_get_headlen(data_ptr, len);
+
+       skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
+       if (!skb) {
+               __free_page(page);
+               return NULL;
+       }
+
+       off = (void *)data_ptr - page_address(page);
+       skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
+       memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
+              payload + NET_IP_ALIGN);
+
+       frag = &skb_shinfo(skb)->frags[0];
+       skb_frag_size_sub(frag, payload);
+       frag->page_offset += payload;
+       skb->data_len -= payload;
+       skb->tail += payload;
+
+       return skb;
+}
+
 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
                                   struct bnxt_rx_ring_info *rxr, u16 cons,
-                                  u16 prod, u8 *data, dma_addr_t dma_addr,
-                                  unsigned int len)
+                                  void *data, u8 *data_ptr,
+                                  dma_addr_t dma_addr,
+                                  unsigned int offset_and_len)
 {
-       int err;
+       u16 prod = rxr->rx_prod;
        struct sk_buff *skb;
+       int err;
 
        err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
        if (unlikely(err)) {
@@ -772,14 +842,14 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
 
        skb = build_skb(data, 0);
        dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
-                        PCI_DMA_FROMDEVICE);
+                        bp->rx_dir);
        if (!skb) {
                kfree(data);
                return NULL;
        }
 
-       skb_reserve(skb, BNXT_RX_OFFSET);
-       skb_put(skb, len);
+       skb_reserve(skb, bp->rx_offset);
+       skb_put(skb, offset_and_len & 0xffff);
        return skb;
 }
 
@@ -815,7 +885,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
                 * a sw_prod index that equals the cons index, so we
                 * need to clear the cons entry now.
                 */
-               mapping = dma_unmap_addr(cons_rx_buf, mapping);
+               mapping = cons_rx_buf->mapping;
                page = cons_rx_buf->page;
                cons_rx_buf->page = NULL;
 
@@ -878,14 +948,14 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
        if (!skb)
                return NULL;
 
-       dma_sync_single_for_cpu(&pdev->dev, mapping,
-                               bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
+       dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
+                               bp->rx_dir);
 
-       memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
+       memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
+              len + NET_IP_ALIGN);
 
-       dma_sync_single_for_device(&pdev->dev, mapping,
-                                  bp->rx_copy_thresh,
-                                  PCI_DMA_FROMDEVICE);
+       dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
+                                  bp->rx_dir);
 
        skb_put(skb, len);
        return skb;
@@ -954,17 +1024,19 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
        }
 
        prod_rx_buf->data = tpa_info->data;
+       prod_rx_buf->data_ptr = tpa_info->data_ptr;
 
        mapping = tpa_info->mapping;
-       dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+       prod_rx_buf->mapping = mapping;
 
        prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
 
        prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
 
        tpa_info->data = cons_rx_buf->data;
+       tpa_info->data_ptr = cons_rx_buf->data_ptr;
        cons_rx_buf->data = NULL;
-       tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
+       tpa_info->mapping = cons_rx_buf->mapping;
 
        tpa_info->len =
                le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
@@ -1130,7 +1202,6 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
                dev_kfree_skb_any(skb);
                return NULL;
        }
-       tcp_gro_complete(skb);
 
        if (nw_off) { /* tunnel */
                struct udphdr *uh = NULL;
@@ -1180,6 +1251,8 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
                       RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
                      RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
        skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
+       if (likely(skb))
+               tcp_gro_complete(skb);
 #endif
        return skb;
 }
@@ -1189,17 +1262,18 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
                                           u32 *raw_cons,
                                           struct rx_tpa_end_cmp *tpa_end,
                                           struct rx_tpa_end_cmp_ext *tpa_end1,
-                                          bool *agg_event)
+                                          u8 *event)
 {
        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
        u8 agg_id = TPA_END_AGG_ID(tpa_end);
-       u8 *data, agg_bufs;
+       u8 *data_ptr, agg_bufs;
        u16 cp_cons = RING_CMP(*raw_cons);
        unsigned int len;
        struct bnxt_tpa_info *tpa_info;
        dma_addr_t mapping;
        struct sk_buff *skb;
+       void *data;
 
        if (unlikely(bnapi->in_reset)) {
                int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
@@ -1211,7 +1285,8 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
 
        tpa_info = &rxr->rx_tpa[agg_id];
        data = tpa_info->data;
-       prefetch(data);
+       data_ptr = tpa_info->data_ptr;
+       prefetch(data_ptr);
        len = tpa_info->len;
        mapping = tpa_info->mapping;
 
@@ -1222,7 +1297,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
                if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
                        return ERR_PTR(-EBUSY);
 
-               *agg_event = true;
+               *event |= BNXT_AGG_EVENT;
                cp_cons = NEXT_CMP(cp_cons);
        }
 
@@ -1234,7 +1309,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
        }
 
        if (len <= bp->rx_copy_thresh) {
-               skb = bnxt_copy_skb(bnapi, data, len, mapping);
+               skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
                if (!skb) {
                        bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
                        return NULL;
@@ -1250,18 +1325,19 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
                }
 
                tpa_info->data = new_data;
+               tpa_info->data_ptr = new_data + bp->rx_offset;
                tpa_info->mapping = new_mapping;
 
                skb = build_skb(data, 0);
                dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
-                                PCI_DMA_FROMDEVICE);
+                                bp->rx_dir);
 
                if (!skb) {
                        kfree(data);
                        bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
                        return NULL;
                }
-               skb_reserve(skb, BNXT_RX_OFFSET);
+               skb_reserve(skb, bp->rx_offset);
                skb_put(skb, len);
        }
 
@@ -1307,7 +1383,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
  * -EIO    - packet aborted due to hw error indicated in BD
  */
 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
-                      bool *agg_event)
+                      u8 *event)
 {
        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
@@ -1318,10 +1394,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
        u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
        struct bnxt_sw_rx_bd *rx_buf;
        unsigned int len;
-       u8 *data, agg_bufs, cmp_type;
+       u8 *data_ptr, agg_bufs, cmp_type;
        dma_addr_t dma_addr;
        struct sk_buff *skb;
+       void *data;
        int rc = 0;
+       u32 misc;
 
        rxcmp = (struct rx_cmp *)
                        &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
@@ -1342,13 +1420,13 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
                bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
                               (struct rx_tpa_start_cmp_ext *)rxcmp1);
 
+               *event |= BNXT_RX_EVENT;
                goto next_rx_no_prod;
 
        } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
                skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
                                   (struct rx_tpa_end_cmp *)rxcmp,
-                                  (struct rx_tpa_end_cmp_ext *)rxcmp1,
-                                  agg_event);
+                                  (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
 
                if (unlikely(IS_ERR(skb)))
                        return -EBUSY;
@@ -1356,37 +1434,36 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
                rc = -ENOMEM;
                if (likely(skb)) {
                        skb_record_rx_queue(skb, bnapi->index);
-                       skb_mark_napi_id(skb, &bnapi->napi);
-                       if (bnxt_busy_polling(bnapi))
-                               netif_receive_skb(skb);
-                       else
-                               napi_gro_receive(&bnapi->napi, skb);
+                       napi_gro_receive(&bnapi->napi, skb);
                        rc = 1;
                }
+               *event |= BNXT_RX_EVENT;
                goto next_rx_no_prod;
        }
 
        cons = rxcmp->rx_cmp_opaque;
        rx_buf = &rxr->rx_buf_ring[cons];
        data = rx_buf->data;
+       data_ptr = rx_buf->data_ptr;
        if (unlikely(cons != rxr->rx_next_cons)) {
                int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
 
                bnxt_sched_reset(bp, rxr);
                return rc1;
        }
-       prefetch(data);
+       prefetch(data_ptr);
 
-       agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
-                               RX_CMP_AGG_BUFS_SHIFT;
+       misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
+       agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
 
        if (agg_bufs) {
                if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
                        return -EBUSY;
 
                cp_cons = NEXT_CMP(cp_cons);
-               *agg_event = true;
+               *event |= BNXT_AGG_EVENT;
        }
+       *event |= BNXT_RX_EVENT;
 
        rx_buf->data = NULL;
        if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
@@ -1399,17 +1476,29 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
        }
 
        len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
-       dma_addr = dma_unmap_addr(rx_buf, mapping);
+       dma_addr = rx_buf->mapping;
+
+       if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
+               rc = 1;
+               goto next_rx;
+       }
 
        if (len <= bp->rx_copy_thresh) {
-               skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
+               skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
                bnxt_reuse_rx_data(rxr, cons, data);
                if (!skb) {
                        rc = -ENOMEM;
                        goto next_rx;
                }
        } else {
-               skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
+               u32 payload;
+
+               if (rx_buf->data_ptr == data_ptr)
+                       payload = misc & RX_CMP_PAYLOAD_OFFSET;
+               else
+                       payload = 0;
+               skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
+                                     payload | len);
                if (!skb) {
                        rc = -ENOMEM;
                        goto next_rx;
@@ -1460,11 +1549,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
        }
 
        skb_record_rx_queue(skb, bnapi->index);
-       skb_mark_napi_id(skb, &bnapi->napi);
-       if (bnxt_busy_polling(bnapi))
-               netif_receive_skb(skb);
-       else
-               napi_gro_receive(&bnapi->napi, skb);
+       napi_gro_receive(&bnapi->napi, skb);
        rc = 1;
 
 next_rx:
@@ -1637,8 +1722,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
        u32 cons;
        int tx_pkts = 0;
        int rx_pkts = 0;
-       bool rx_event = false;
-       bool agg_event = false;
+       u8 event = 0;
        struct tx_cmp *txcmp;
 
        while (1) {
@@ -1660,12 +1744,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                        if (unlikely(tx_pkts > bp->tx_wake_thresh))
                                rx_pkts = budget;
                } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
-                       rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+                       rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
                        if (likely(rc >= 0))
                                rx_pkts += rc;
                        else if (rc == -EBUSY)  /* partial completion */
                                break;
-                       rx_event = true;
                } else if (unlikely((TX_CMP_TYPE(txcmp) ==
                                     CMPL_BASE_TYPE_HWRM_DONE) ||
                                    (TX_CMP_TYPE(txcmp) ==
@@ -1680,6 +1763,18 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                        break;
        }
 
+       if (event & BNXT_TX_EVENT) {
+               struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+               void __iomem *db = txr->tx_doorbell;
+               u16 prod = txr->tx_prod;
+
+               /* Sync BD data before updating doorbell */
+               wmb();
+
+               writel(DB_KEY_TX | prod, db);
+               writel(DB_KEY_TX | prod, db);
+       }
+
        cpr->cp_raw_cons = raw_cons;
        /* ACK completion ring before freeing tx ring and producing new
         * buffers in rx/agg rings to prevent overflowing the completion
@@ -1688,14 +1783,14 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
        BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
 
        if (tx_pkts)
-               bnxt_tx_int(bp, bnapi, tx_pkts);
+               bnapi->tx_int(bp, bnapi, tx_pkts);
 
-       if (rx_event) {
+       if (event & BNXT_RX_EVENT) {
                struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
 
                writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
                writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
-               if (agg_event) {
+               if (event & BNXT_AGG_EVENT) {
                        writel(DB_KEY_RX | rxr->rx_agg_prod,
                               rxr->rx_agg_doorbell);
                        writel(DB_KEY_RX | rxr->rx_agg_prod,
@@ -1716,7 +1811,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
        u32 cp_cons, tmp_raw_cons;
        u32 raw_cons = cpr->cp_raw_cons;
        u32 rx_pkts = 0;
-       bool agg_event = false;
+       u8 event = 0;
 
        while (1) {
                int rc;
@@ -1740,7 +1835,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
                        rxcmp1->rx_cmp_cfa_code_errors_v2 |=
                                cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
 
-                       rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+                       rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
                        if (likely(rc == -EIO))
                                rx_pkts++;
                        else if (rc == -EBUSY)  /* partial completion */
@@ -1763,13 +1858,13 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
        writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
        writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
 
-       if (agg_event) {
+       if (event & BNXT_AGG_EVENT) {
                writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
                writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
        }
 
        if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rx_pkts);
                BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
        }
        return rx_pkts;
@@ -1782,9 +1877,6 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
        struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
        int work_done = 0;
 
-       if (!bnxt_lock_napi(bnapi))
-               return budget;
-
        while (1) {
                work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
 
@@ -1792,42 +1884,16 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
                        break;
 
                if (!bnxt_has_work(bp, cpr)) {
-                       napi_complete(napi);
-                       BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+                       if (napi_complete_done(napi, work_done))
+                               BNXT_CP_DB_REARM(cpr->cp_doorbell,
+                                                cpr->cp_raw_cons);
                        break;
                }
        }
        mmiowb();
-       bnxt_unlock_napi(bnapi);
        return work_done;
 }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static int bnxt_busy_poll(struct napi_struct *napi)
-{
-       struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
-       struct bnxt *bp = bnapi->bp;
-       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
-       int rx_work, budget = 4;
-
-       if (atomic_read(&bp->intr_sem) != 0)
-               return LL_FLUSH_FAILED;
-
-       if (!bp->link_info.link_up)
-               return LL_FLUSH_FAILED;
-
-       if (!bnxt_lock_poll(bnapi))
-               return LL_FLUSH_BUSY;
-
-       rx_work = bnxt_poll_work(bp, bnapi, budget);
-
-       BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
-
-       bnxt_unlock_poll(bnapi);
-       return rx_work;
-}
-#endif
-
 static void bnxt_free_tx_skbs(struct bnxt *bp)
 {
        int i, max_idx;
@@ -1905,11 +1971,9 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
                                if (!data)
                                        continue;
 
-                               dma_unmap_single(
-                                       &pdev->dev,
-                                       dma_unmap_addr(tpa_info, mapping),
-                                       bp->rx_buf_use_size,
-                                       PCI_DMA_FROMDEVICE);
+                               dma_unmap_single(&pdev->dev, tpa_info->mapping,
+                                                bp->rx_buf_use_size,
+                                                bp->rx_dir);
 
                                tpa_info->data = NULL;
 
@@ -1919,19 +1983,20 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
 
                for (j = 0; j < max_idx; j++) {
                        struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
-                       u8 *data = rx_buf->data;
+                       void *data = rx_buf->data;
 
                        if (!data)
                                continue;
 
-                       dma_unmap_single(&pdev->dev,
-                                        dma_unmap_addr(rx_buf, mapping),
-                                        bp->rx_buf_use_size,
-                                        PCI_DMA_FROMDEVICE);
+                       dma_unmap_single(&pdev->dev, rx_buf->mapping,
+                                        bp->rx_buf_use_size, bp->rx_dir);
 
                        rx_buf->data = NULL;
 
-                       kfree(data);
+                       if (BNXT_RX_PAGE_MODE(bp))
+                               __free_page(data);
+                       else
+                               kfree(data);
                }
 
                for (j = 0; j < max_agg_idx; j++) {
@@ -1942,8 +2007,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
                        if (!page)
                                continue;
 
-                       dma_unmap_page(&pdev->dev,
-                                      dma_unmap_addr(rx_agg_buf, mapping),
+                       dma_unmap_page(&pdev->dev, rx_agg_buf->mapping,
                                       BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
 
                        rx_agg_buf->page = NULL;
@@ -2034,6 +2098,9 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
                struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
                struct bnxt_ring_struct *ring;
 
+               if (rxr->xdp_prog)
+                       bpf_prog_put(rxr->xdp_prog);
+
                kfree(rxr->rx_tpa);
                rxr->rx_tpa = NULL;
 
@@ -2172,6 +2239,8 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
                        memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
                }
                ring->queue_id = bp->q_info[j].queue_id;
+               if (i < bp->tx_nr_rings_xdp)
+                       continue;
                if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
                        j++;
        }
@@ -2319,6 +2388,15 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
        ring = &rxr->rx_ring_struct;
        bnxt_init_rxbd_pages(ring, type);
 
+       if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
+               rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
+               if (IS_ERR(rxr->xdp_prog)) {
+                       int rc = PTR_ERR(rxr->xdp_prog);
+
+                       rxr->xdp_prog = NULL;
+                       return rc;
+               }
+       }
        prod = rxr->rx_prod;
        for (i = 0; i < bp->rx_ring_size; i++) {
                if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
@@ -2365,6 +2443,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
                                        return -ENOMEM;
 
                                rxr->rx_tpa[i].data = data;
+                               rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
                                rxr->rx_tpa[i].mapping = mapping;
                        }
                } else {
@@ -2380,6 +2459,14 @@ static int bnxt_init_rx_rings(struct bnxt *bp)
 {
        int i, rc = 0;
 
+       if (BNXT_RX_PAGE_MODE(bp)) {
+               bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
+               bp->rx_dma_offset = XDP_PACKET_HEADROOM;
+       } else {
+               bp->rx_offset = BNXT_RX_OFFSET;
+               bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
+       }
+
        for (i = 0; i < bp->rx_nr_rings; i++) {
                rc = bnxt_init_one_rx_ring(bp, i);
                if (rc)
@@ -2503,9 +2590,11 @@ static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
        return pages;
 }
 
-static void bnxt_set_tpa_flags(struct bnxt *bp)
+void bnxt_set_tpa_flags(struct bnxt *bp)
 {
        bp->flags &= ~BNXT_FLAG_TPA;
+       if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+               return;
        if (bp->dev->features & NETIF_F_LRO)
                bp->flags |= BNXT_FLAG_LRO;
        if (bp->dev->features & NETIF_F_GRO)
@@ -2535,7 +2624,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
                agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
 
        bp->flags &= ~BNXT_FLAG_JUMBO;
-       if (rx_space > PAGE_SIZE) {
+       if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
                u32 jumbo_factor;
 
                bp->flags |= BNXT_FLAG_JUMBO;
@@ -2587,6 +2676,27 @@ void bnxt_set_ring_params(struct bnxt *bp)
        bp->cp_ring_mask = bp->cp_bit - 1;
 }
 
+int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
+{
+       if (page_mode) {
+               if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
+                       return -EOPNOTSUPP;
+               bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU;
+               bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+               bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
+               bp->dev->hw_features &= ~NETIF_F_LRO;
+               bp->dev->features &= ~NETIF_F_LRO;
+               bp->rx_dir = DMA_BIDIRECTIONAL;
+               bp->rx_skb_func = bnxt_rx_page_skb;
+       } else {
+               bp->dev->max_mtu = BNXT_MAX_MTU;
+               bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
+               bp->rx_dir = DMA_FROM_DEVICE;
+               bp->rx_skb_func = bnxt_rx_skb;
+       }
+       return 0;
+}
+
 static void bnxt_free_vnic_attributes(struct bnxt *bp)
 {
        int i;
@@ -2669,6 +2779,10 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
                        goto out;
                }
 
+               if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
+                   !(vnic->flags & BNXT_VNIC_RSS_FLAG))
+                       continue;
+
                /* Allocate rss table and hash key */
                vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
                                                     &vnic->rss_table_dma_addr,
@@ -2892,6 +3006,8 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
                bnxt_free_stats(bp);
                bnxt_free_ring_grps(bp);
                bnxt_free_vnics(bp);
+               kfree(bp->tx_ring_map);
+               bp->tx_ring_map = NULL;
                kfree(bp->tx_ring);
                bp->tx_ring = NULL;
                kfree(bp->rx_ring);
@@ -2944,6 +3060,12 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
                if (!bp->tx_ring)
                        return -ENOMEM;
 
+               bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
+                                         GFP_KERNEL);
+
+               if (!bp->tx_ring_map)
+                       return -ENOMEM;
+
                if (bp->flags & BNXT_FLAG_SHARED_RINGS)
                        j = 0;
                else
@@ -2952,6 +3074,15 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
                for (i = 0; i < bp->tx_nr_rings; i++, j++) {
                        bp->tx_ring[i].bnapi = bp->bnapi[j];
                        bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
+                       bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
+                       if (i >= bp->tx_nr_rings_xdp) {
+                               bp->tx_ring[i].txq_index = i -
+                                       bp->tx_nr_rings_xdp;
+                               bp->bnapi[j]->tx_int = bnxt_tx_int;
+                       } else {
+                               bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
+                               bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
+                       }
                }
 
                rc = bnxt_alloc_stats(bp);
@@ -2993,6 +3124,45 @@ alloc_mem_err:
        return rc;
 }
 
+static void bnxt_disable_int(struct bnxt *bp)
+{
+       int i;
+
+       if (!bp->bnapi)
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+               BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+       }
+}
+
+static void bnxt_disable_int_sync(struct bnxt *bp)
+{
+       int i;
+
+       atomic_inc(&bp->intr_sem);
+
+       bnxt_disable_int(bp);
+       for (i = 0; i < bp->cp_nr_rings; i++)
+               synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+static void bnxt_enable_int(struct bnxt *bp)
+{
+       int i;
+
+       atomic_set(&bp->intr_sem, 0);
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+
+               BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+       }
+}
+
 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
                            u16 cmpl_ring, u16 target_id)
 {
@@ -3292,6 +3462,9 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
 
+#define BNXT_NTP_TUNNEL_FLTR_FLAG                              \
+               CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
+
 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
                                             struct bnxt_ntuple_filter *fltr)
 {
@@ -3312,10 +3485,31 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
        req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
        req.ip_protocol = keys->basic.ip_proto;
 
-       req.src_ipaddr[0] = keys->addrs.v4addrs.src;
-       req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
-       req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
-       req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+       if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
+               int i;
+
+               req.ethertype = htons(ETH_P_IPV6);
+               req.ip_addr_type =
+                       CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
+               *(struct in6_addr *)&req.src_ipaddr[0] =
+                       keys->addrs.v6addrs.src;
+               *(struct in6_addr *)&req.dst_ipaddr[0] =
+                       keys->addrs.v6addrs.dst;
+               for (i = 0; i < 4; i++) {
+                       req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+                       req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
+               }
+       } else {
+               req.src_ipaddr[0] = keys->addrs.v4addrs.src;
+               req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+               req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
+               req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
+       }
+       if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
+               req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
+               req.tunnel_type =
+                       CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
+       }
 
        req.src_port = keys->ports.src;
        req.src_port_mask = cpu_to_be16(0xffff);
@@ -3562,6 +3756,12 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
                req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
                req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
                                           VNIC_CFG_REQ_ENABLES_MRU);
+       } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
+               req.rss_rule =
+                       cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
+               req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
+                                          VNIC_CFG_REQ_ENABLES_MRU);
+               req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
        } else {
                req.rss_rule = cpu_to_le16(0xffff);
        }
@@ -3665,6 +3865,27 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
        return rc;
 }
 
+static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
+{
+       struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_vnic_qcaps_input req = {0};
+       int rc;
+
+       if (bp->hwrm_spec_code < 0x10600)
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc) {
+               if (resp->flags &
+                   cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
+                       bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
 {
        u16 i;
@@ -3768,7 +3989,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
                req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
                break;
        case HWRM_RING_ALLOC_CMPL:
-               req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
+               req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
                req.length = cpu_to_le32(bp->cp_ring_mask + 1);
                if (bp->flags & BNXT_FLAG_USING_MSIX)
                        req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
@@ -3787,7 +4008,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
 
        if (rc || err) {
                switch (ring_type) {
-               case RING_FREE_REQ_RING_TYPE_CMPL:
+               case RING_FREE_REQ_RING_TYPE_L2_CMPL:
                        netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
                                   rc, err);
                        return -1;
@@ -3811,6 +4032,30 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
        return rc;
 }
 
+static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
+{
+       int rc;
+
+       if (BNXT_PF(bp)) {
+               struct hwrm_func_cfg_input req = {0};
+
+               bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+               req.fid = cpu_to_le16(0xffff);
+               req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+               req.async_event_cr = cpu_to_le16(idx);
+               rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       } else {
+               struct hwrm_func_vf_cfg_input req = {0};
+
+               bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+               req.enables =
+                       cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
+               req.async_event_cr = cpu_to_le16(idx);
+               rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       }
+       return rc;
+}
+
 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
 {
        int i, rc = 0;
@@ -3827,6 +4072,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
                        goto err_out;
                BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
                bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
+
+               if (!i) {
+                       rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
+                       if (rc)
+                               netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
+               }
        }
 
        for (i = 0; i < bp->tx_nr_rings; i++) {
@@ -3901,7 +4152,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
 
        if (rc || error_code) {
                switch (ring_type) {
-               case RING_FREE_REQ_RING_TYPE_CMPL:
+               case RING_FREE_REQ_RING_TYPE_L2_CMPL:
                        netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
                                   rc);
                        return rc;
@@ -3977,6 +4228,12 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
                }
        }
 
+       /* The completion rings are about to be freed.  After that the
+        * IRQ doorbell will not work anymore.  So we need to disable
+        * IRQ here.
+        */
+       bnxt_disable_int_sync(bp);
+
        for (i = 0; i < bp->cp_nr_rings; i++) {
                struct bnxt_napi *bnapi = bp->bnapi[i];
                struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
@@ -3984,7 +4241,7 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
 
                if (ring->fw_ring_id != INVALID_HW_RING_ID) {
                        hwrm_ring_free_send_msg(bp, ring,
-                                               RING_FREE_REQ_RING_TYPE_CMPL,
+                                               RING_FREE_REQ_RING_TYPE_L2_CMPL,
                                                INVALID_HW_RING_ID);
                        ring->fw_ring_id = INVALID_HW_RING_ID;
                        bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
@@ -3992,6 +4249,50 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
        }
 }
 
+/* Caller must hold bp->hwrm_cmd_lock */
+int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
+{
+       struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_func_qcfg_input req = {0};
+       int rc;
+
+       if (bp->hwrm_spec_code < 0x10601)
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+       req.fid = cpu_to_le16(fid);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc)
+               *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
+
+       return rc;
+}
+
+static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
+{
+       struct hwrm_func_cfg_input req = {0};
+       int rc;
+
+       if (bp->hwrm_spec_code < 0x10601)
+               return 0;
+
+       if (BNXT_VF(bp))
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+       req.fid = cpu_to_le16(0xffff);
+       req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
+       req.num_tx_rings = cpu_to_le16(*tx_rings);
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               return rc;
+
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
        u32 buf_tmrs, u16 flags,
        struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
@@ -4463,8 +4764,12 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
 
 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
 {
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
        int rc;
 
+       if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
+               goto skip_rss_ctx;
+
        /* allocate context for vnic */
        rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
        if (rc) {
@@ -4484,6 +4789,7 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
                bp->rsscos_nr_ctxs++;
        }
 
+skip_rss_ctx:
        /* configure default vnic, ring grp */
        rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
        if (rc) {
@@ -4518,13 +4824,17 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
        int i, rc = 0;
 
        for (i = 0; i < bp->rx_nr_rings; i++) {
+               struct bnxt_vnic_info *vnic;
                u16 vnic_id = i + 1;
                u16 ring_id = i;
 
                if (vnic_id >= bp->nr_vnics)
                        break;
 
-               bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
+               vnic = &bp->vnic_info[vnic_id];
+               vnic->flags |= BNXT_VNIC_RFS_FLAG;
+               if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+                       vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
                rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
                if (rc) {
                        netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
@@ -4698,40 +5008,13 @@ static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
        return bnxt_init_chip(bp, irq_re_init);
 }
 
-static void bnxt_disable_int(struct bnxt *bp)
-{
-       int i;
-
-       if (!bp->bnapi)
-               return;
-
-       for (i = 0; i < bp->cp_nr_rings; i++) {
-               struct bnxt_napi *bnapi = bp->bnapi[i];
-               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
-
-               BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
-       }
-}
-
-static void bnxt_enable_int(struct bnxt *bp)
-{
-       int i;
-
-       atomic_set(&bp->intr_sem, 0);
-       for (i = 0; i < bp->cp_nr_rings; i++) {
-               struct bnxt_napi *bnapi = bp->bnapi[i];
-               struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
-
-               BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
-       }
-}
-
 static int bnxt_set_real_num_queues(struct bnxt *bp)
 {
        int rc;
        struct net_device *dev = bp->dev;
 
-       rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
+       rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
+                                         bp->tx_nr_rings_xdp);
        if (rc)
                return rc;
 
@@ -4779,19 +5062,12 @@ static void bnxt_setup_msix(struct bnxt *bp)
 
        tcs = netdev_get_num_tc(dev);
        if (tcs > 1) {
-               bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
-               if (bp->tx_nr_rings_per_tc == 0) {
-                       netdev_reset_tc(dev);
-                       bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
-               } else {
-                       int i, off, count;
+               int i, off, count;
 
-                       bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
-                       for (i = 0; i < tcs; i++) {
-                               count = bp->tx_nr_rings_per_tc;
-                               off = i * count;
-                               netdev_set_tc_queue(dev, i, count, off);
-                       }
+               for (i = 0; i < tcs; i++) {
+                       count = bp->tx_nr_rings_per_tc;
+                       off = i * count;
+                       netdev_set_tc_queue(dev, i, count, off);
                }
        }
 
@@ -4836,6 +5112,26 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
        return rc;
 }
 
+#ifdef CONFIG_RFS_ACCEL
+static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+       if (BNXT_VF(bp))
+               return bp->vf.max_rsscos_ctxs;
+#endif
+       return bp->pf.max_rsscos_ctxs;
+}
+
+static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
+{
+#if defined(CONFIG_BNXT_SRIOV)
+       if (BNXT_VF(bp))
+               return bp->vf.max_vnics;
+#endif
+       return bp->pf.max_vnics;
+}
+#endif
+
 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
 {
 #if defined(CONFIG_BNXT_SRIOV)
@@ -5094,10 +5390,8 @@ static void bnxt_disable_napi(struct bnxt *bp)
        if (!bp->bnapi)
                return;
 
-       for (i = 0; i < bp->cp_nr_rings; i++) {
+       for (i = 0; i < bp->cp_nr_rings; i++)
                napi_disable(&bp->bnapi[i]->napi);
-               bnxt_disable_poll(bp->bnapi[i]);
-       }
 }
 
 static void bnxt_enable_napi(struct bnxt *bp)
@@ -5106,7 +5400,6 @@ static void bnxt_enable_napi(struct bnxt *bp)
 
        for (i = 0; i < bp->cp_nr_rings; i++) {
                bp->bnapi[i]->in_reset = false;
-               bnxt_enable_poll(bp->bnapi[i]);
                napi_enable(&bp->bnapi[i]->napi);
        }
 }
@@ -5150,7 +5443,7 @@ static void bnxt_report_link(struct bnxt *bp)
        if (bp->link_info.link_up) {
                const char *duplex;
                const char *flow_ctrl;
-               u16 speed;
+               u16 speed, fec;
 
                netif_carrier_on(bp->dev);
                if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
@@ -5172,6 +5465,12 @@ static void bnxt_report_link(struct bnxt *bp)
                        netdev_info(bp->dev, "EEE is %s\n",
                                    bp->eee.eee_active ? "active" :
                                                         "not active");
+               fec = bp->link_info.fec_cfg;
+               if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
+                       netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
+                                   (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
+                                   (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
+                                    (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
        } else {
                netif_carrier_off(bp->dev);
                netdev_err(bp->dev, "NIC Link is Down\n");
@@ -5296,6 +5595,11 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
                        }
                }
        }
+
+       link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
+       if (bp->hwrm_spec_code >= 0x10504)
+               link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
+
        /* TODO: need to add more logic to report VF link */
        if (chng_link_state) {
                if (link_info->phy_link_status == BNXT_LINK_LINK)
@@ -5384,7 +5688,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp,
 {
        u8 autoneg = bp->link_info.autoneg;
        u16 fw_link_speed = bp->link_info.req_link_speed;
-       u32 advertising = bp->link_info.advertising;
+       u16 advertising = bp->link_info.advertising;
 
        if (autoneg & BNXT_AUTONEG_SPEED) {
                req->auto_mode |=
@@ -5489,6 +5793,45 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
        return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 }
 
+static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
+{
+       struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_port_led_qcaps_input req = {0};
+       struct bnxt_pf_info *pf = &bp->pf;
+       int rc;
+
+       if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
+               return 0;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
+       req.port_id = cpu_to_le16(pf->port_id);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc) {
+               mutex_unlock(&bp->hwrm_cmd_lock);
+               return rc;
+       }
+       if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
+               int i;
+
+               bp->num_leds = resp->num_leds;
+               memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
+                                                bp->num_leds);
+               for (i = 0; i < bp->num_leds; i++) {
+                       struct bnxt_led_info *led = &bp->leds[i];
+                       __le16 caps = led->led_state_caps;
+
+                       if (!led->led_group_id ||
+                           !BNXT_LED_ALT_BLINK_CAP(caps)) {
+                               bp->num_leds = 0;
+                               break;
+                       }
+               }
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return 0;
+}
+
 static bool bnxt_eee_config_ok(struct bnxt *bp)
 {
        struct ethtool_eee *eee = &bp->eee;
@@ -5527,6 +5870,9 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
                           rc);
                return rc;
        }
+       if (!BNXT_SINGLE_PF(bp))
+               return 0;
+
        if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
            (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
            link_info->req_flow_ctrl)
@@ -5678,19 +6024,6 @@ static int bnxt_open(struct net_device *dev)
        return __bnxt_open_nic(bp, true, true);
 }
 
-static void bnxt_disable_int_sync(struct bnxt *bp)
-{
-       int i;
-
-       atomic_inc(&bp->intr_sem);
-       if (!netif_running(bp->dev))
-               return;
-
-       bnxt_disable_int(bp);
-       for (i = 0; i < bp->cp_nr_rings; i++)
-               synchronize_irq(bp->irq_tbl[i].vector);
-}
-
 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
 {
        int rc = 0;
@@ -5712,13 +6045,12 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
        while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
                msleep(20);
 
-       /* Flush rings before disabling interrupts */
+       /* Flush rings and and disable interrupts */
        bnxt_shutdown_nic(bp, irq_re_init);
 
        /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
 
        bnxt_disable_napi(bp);
-       bnxt_disable_int_sync(bp);
        del_timer_sync(&bp->timer);
        bnxt_free_skbs(bp);
 
@@ -5765,16 +6097,14 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        return -EOPNOTSUPP;
 }
 
-static struct rtnl_link_stats64 *
+static void
 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        u32 i;
        struct bnxt *bp = netdev_priv(dev);
 
-       memset(stats, 0, sizeof(struct rtnl_link_stats64));
-
        if (!bp->bnapi)
-               return stats;
+               return;
 
        /* TODO check if we need to synchronize with bnxt_close path */
        for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -5821,8 +6151,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
                stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
                stats->tx_errors = le64_to_cpu(tx->tx_err);
        }
-
-       return stats;
 }
 
 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
@@ -5975,20 +6303,36 @@ skip_uc:
        return rc;
 }
 
+/* If the chip and firmware supports RFS */
+static bool bnxt_rfs_supported(struct bnxt *bp)
+{
+       if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
+               return true;
+       if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+               return true;
+       return false;
+}
+
+/* If runtime conditions support RFS */
 static bool bnxt_rfs_capable(struct bnxt *bp)
 {
 #ifdef CONFIG_RFS_ACCEL
-       struct bnxt_pf_info *pf = &bp->pf;
-       int vnics;
+       int vnics, max_vnics, max_rss_ctxs;
 
-       if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
+       if (!(bp->flags & BNXT_FLAG_MSIX_CAP))
                return false;
 
        vnics = 1 + bp->rx_nr_rings;
-       if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) {
+       max_vnics = bnxt_get_max_func_vnics(bp);
+       max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
+
+       /* RSS contexts not a limiting factor */
+       if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+               max_rss_ctxs = max_vnics;
+       if (vnics > max_vnics || vnics > max_rss_ctxs) {
                netdev_warn(bp->dev,
                            "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
-                           min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1));
+                           min(max_rss_ctxs - 1, max_vnics - 1));
                return false;
        }
 
@@ -6044,6 +6388,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
        if (features & NETIF_F_LRO)
                flags |= BNXT_FLAG_LRO;
 
+       if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+               flags &= ~BNXT_FLAG_TPA;
+
        if (features & NETIF_F_HW_VLAN_CTAG_RX)
                flags |= BNXT_FLAG_STRIP_VLAN;
 
@@ -6296,6 +6643,37 @@ static void bnxt_sp_task(struct work_struct *work)
        clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
 }
 
+/* Under rtnl_lock */
+int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp)
+{
+       int max_rx, max_tx, tx_sets = 1;
+       int tx_rings_needed;
+       bool sh = true;
+       int rc;
+
+       if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
+               sh = false;
+
+       if (tcs)
+               tx_sets = tcs;
+
+       rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
+       if (rc)
+               return rc;
+
+       if (max_rx < rx)
+               return -ENOMEM;
+
+       tx_rings_needed = tx * tx_sets + tx_xdp;
+       if (max_tx < tx_rings_needed)
+               return -ENOMEM;
+
+       if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) ||
+           tx_rings_needed < (tx * tx_sets + tx_xdp))
+               return -ENOMEM;
+       return 0;
+}
+
 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
 {
        int rc;
@@ -6458,9 +6836,10 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
 {
        struct bnxt *bp = netdev_priv(dev);
        bool sh = false;
+       int rc;
 
        if (tc > bp->max_tc) {
-               netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
+               netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
                           tc, bp->max_tc);
                return -EINVAL;
        }
@@ -6471,13 +6850,10 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
        if (bp->flags & BNXT_FLAG_SHARED_RINGS)
                sh = true;
 
-       if (tc) {
-               int max_rx_rings, max_tx_rings, rc;
-
-               rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
-               if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings)
-                       return -ENOMEM;
-       }
+       rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
+                               tc, bp->tx_nr_rings_xdp);
+       if (rc)
+               return rc;
 
        /* Needs to close the device and do hw resource re-allocations */
        if (netif_running(bp->dev))
@@ -6521,6 +6897,7 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
            keys1->ports.ports == keys2->ports.ports &&
            keys1->basic.ip_proto == keys2->basic.ip_proto &&
            keys1->basic.n_proto == keys2->basic.n_proto &&
+           keys1->control.flags == keys2->control.flags &&
            ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
            ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
                return true;
@@ -6538,9 +6915,6 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
        int rc = 0, idx, bit_id, l2_idx = 0;
        struct hlist_head *head;
 
-       if (skb->encapsulation)
-               return -EPROTONOSUPPORT;
-
        if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
                struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
                int off = 0, j;
@@ -6567,12 +6941,23 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
                goto err_free;
        }
 
-       if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
+       if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
+            fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
            ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
             (fkeys->basic.ip_proto != IPPROTO_UDP))) {
                rc = -EPROTONOSUPPORT;
                goto err_free;
        }
+       if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
+           bp->hwrm_spec_code < 0x10601) {
+               rc = -EPROTONOSUPPORT;
+               goto err_free;
+       }
+       if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
+           bp->hwrm_spec_code < 0x10601) {
+               rc = -EPROTONOSUPPORT;
+               goto err_free;
+       }
 
        memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
        memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
@@ -6779,9 +7164,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
 #endif
        .ndo_udp_tunnel_add     = bnxt_udp_tunnel_add,
        .ndo_udp_tunnel_del     = bnxt_udp_tunnel_del,
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       .ndo_busy_poll          = bnxt_busy_poll,
-#endif
+       .ndo_xdp                = bnxt_xdp,
 };
 
 static void bnxt_remove_one(struct pci_dev *pdev)
@@ -6806,6 +7189,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
        pci_iounmap(pdev, bp->bar0);
        kfree(bp->edev);
        bp->edev = NULL;
+       if (bp->xdp_prog)
+               bpf_prog_put(bp->xdp_prog);
        free_netdev(dev);
 
        pci_release_regions(pdev);
@@ -6920,8 +7305,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
        int rc;
 
        rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
-       if (rc)
-               return rc;
+       if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
+               /* Not enough rings, try disabling agg rings. */
+               bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+               rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
+               if (rc)
+                       return rc;
+               bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+               bp->dev->hw_features &= ~NETIF_F_LRO;
+               bp->dev->features &= ~NETIF_F_LRO;
+               bnxt_set_ring_params(bp);
+       }
 
        if (bp->flags & BNXT_FLAG_ROCE_CAP) {
                int max_cp, max_stat, max_irq;
@@ -6960,6 +7354,11 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
                return rc;
        bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
        bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
+
+       rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
+       if (rc)
+               netdev_warn(bp->dev, "Unable to reserve tx rings\n");
+
        bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
        bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
                               bp->tx_nr_rings + bp->rx_nr_rings;
@@ -7068,7 +7467,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* MTU range: 60 - 9500 */
        dev->min_mtu = ETH_ZLEN;
-       dev->max_mtu = 9500;
+       dev->max_mtu = BNXT_MAX_MTU;
 
        bnxt_dcb_init(bp);
 
@@ -7107,11 +7506,18 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        bnxt_hwrm_func_qcfg(bp);
+       bnxt_hwrm_port_led_qcaps(bp);
 
+       bnxt_set_rx_skb_mode(bp, false);
        bnxt_set_tpa_flags(bp);
        bnxt_set_ring_params(bp);
        bnxt_set_max_func_irqs(bp, max_irqs);
-       bnxt_set_dflt_rings(bp);
+       rc = bnxt_set_dflt_rings(bp);
+       if (rc) {
+               netdev_err(bp->dev, "Not enough rings available.\n");
+               rc = -ENOMEM;
+               goto init_err;
+       }
 
        /* Default RSS hash cfg. */
        bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
@@ -7126,7 +7532,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                                    VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
        }
 
-       if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+       bnxt_hwrm_vnic_qcaps(bp);
+       if (bnxt_rfs_supported(bp)) {
                dev->hw_features |= NETIF_F_NTUPLE;
                if (bnxt_rfs_capable(bp)) {
                        bp->flags |= BNXT_FLAG_RFS;
index 16defe9ececc23f867230dd55d5a5143437b2a79..faf26a2f726b808792fd837437bf7abb9279a8c7 100644 (file)
@@ -1,6 +1,7 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
  * Copyright (c) 2014-2016 Broadcom Corporation
+ * Copyright (c) 2016-2017 Broadcom Limited
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #define BNXT_H
 
 #define DRV_MODULE_NAME                "bnxt_en"
-#define DRV_MODULE_VERSION     "1.6.0"
+#define DRV_MODULE_VERSION     "1.7.0"
 
 #define DRV_VER_MAJ    1
-#define DRV_VER_MIN    6
+#define DRV_VER_MIN    7
 #define DRV_VER_UPD    0
 
 struct tx_bd {
@@ -416,6 +417,11 @@ struct rx_tpa_end_cmp_ext {
 
 #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
 
+#define BNXT_MAX_MTU           9500
+#define BNXT_MAX_PAGE_MODE_MTU \
+       ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN -       \
+        XDP_PACKET_HEADROOM)
+
 #define BNXT_MIN_PKT_SIZE      52
 
 #define BNXT_NUM_TESTS(bp)     0
@@ -507,17 +513,25 @@ struct rx_tpa_end_cmp_ext {
 #define BNXT_HWRM_REQS_PER_PAGE                (BNXT_PAGE_SIZE /       \
                                         BNXT_HWRM_REQ_MAX_SIZE)
 
+#define BNXT_RX_EVENT  1
+#define BNXT_AGG_EVENT 2
+#define BNXT_TX_EVENT  4
+
 struct bnxt_sw_tx_bd {
        struct sk_buff          *skb;
        DEFINE_DMA_UNMAP_ADDR(mapping);
        u8                      is_gso;
        u8                      is_push;
-       unsigned short          nr_frags;
+       union {
+               unsigned short          nr_frags;
+               u16                     rx_prod;
+       };
 };
 
 struct bnxt_sw_rx_bd {
-       u8                      *data;
-       DEFINE_DMA_UNMAP_ADDR(mapping);
+       void                    *data;
+       u8                      *data_ptr;
+       dma_addr_t              mapping;
 };
 
 struct bnxt_sw_rx_agg_bd {
@@ -558,6 +572,7 @@ struct bnxt_tx_ring_info {
        struct bnxt_napi        *bnapi;
        u16                     tx_prod;
        u16                     tx_cons;
+       u16                     txq_index;
        void __iomem            *tx_doorbell;
 
        struct tx_bd            *tx_desc_ring[MAX_TX_PAGES];
@@ -576,7 +591,8 @@ struct bnxt_tx_ring_info {
 };
 
 struct bnxt_tpa_info {
-       u8                      *data;
+       void                    *data;
+       u8                      *data_ptr;
        dma_addr_t              mapping;
        u16                     len;
        unsigned short          gso_type;
@@ -608,6 +624,8 @@ struct bnxt_rx_ring_info {
        void __iomem            *rx_doorbell;
        void __iomem            *rx_agg_doorbell;
 
+       struct bpf_prog         *xdp_prog;
+
        struct rx_bd            *rx_desc_ring[MAX_RX_PAGES];
        struct bnxt_sw_rx_bd    *rx_buf_ring;
 
@@ -654,20 +672,13 @@ struct bnxt_napi {
        struct bnxt_rx_ring_info        *rx_ring;
        struct bnxt_tx_ring_info        *tx_ring;
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       atomic_t                poll_state;
-#endif
-       bool                    in_reset;
-};
+       void                    (*tx_int)(struct bnxt *, struct bnxt_napi *,
+                                         int);
+       u32                     flags;
+#define BNXT_NAPI_FLAG_XDP     0x1
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-enum bnxt_poll_state_t {
-       BNXT_STATE_IDLE = 0,
-       BNXT_STATE_NAPI,
-       BNXT_STATE_POLL,
-       BNXT_STATE_DISABLE,
+       bool                    in_reset;
 };
-#endif
 
 struct bnxt_irq {
        irq_handler_t   handler;
@@ -720,6 +731,7 @@ struct bnxt_vnic_info {
 #define BNXT_VNIC_RFS_FLAG     2
 #define BNXT_VNIC_MCAST_FLAG   4
 #define BNXT_VNIC_UCAST_FLAG   8
+#define BNXT_VNIC_RFS_NEW_RSS_FLAG     0x10
 };
 
 #if defined(CONFIG_BNXT_SRIOV)
@@ -840,7 +852,7 @@ struct bnxt_link_info {
 #define BNXT_LINK_SPEED_40GB   PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
 #define BNXT_LINK_SPEED_50GB   PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
        u16                     support_speeds;
-       u16                     auto_link_speeds;
+       u16                     auto_link_speeds;       /* fw adv setting */
 #define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
 #define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
 #define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
@@ -855,6 +867,10 @@ struct bnxt_link_info {
        u16                     force_link_speed;
        u32                     preemphasis;
        u8                      module_status;
+       u16                     fec_cfg;
+#define BNXT_FEC_AUTONEG       PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED
+#define BNXT_FEC_ENC_BASE_R    PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED
+#define BNXT_FEC_ENC_RS                PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED
 
        /* copy of requested setting from ethtool cmd */
        u8                      autoneg;
@@ -863,7 +879,7 @@ struct bnxt_link_info {
        u8                      req_duplex;
        u8                      req_flow_ctrl;
        u16                     req_link_speed;
-       u32                     advertising;
+       u16                     advertising;    /* user adv setting */
        bool                    force_link_chng;
 
        /* a copy of phy_qcfg output used to report link
@@ -879,6 +895,20 @@ struct bnxt_queue_info {
        u8      queue_profile;
 };
 
+#define BNXT_MAX_LED                   4
+
+struct bnxt_led_info {
+       u8      led_id;
+       u8      led_type;
+       u8      led_group_id;
+       u8      unused;
+       __le16  led_state_caps;
+#define BNXT_LED_ALT_BLINK_CAP(x)      ((x) &  \
+       cpu_to_le16(PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED))
+
+       __le16  led_color_caps;
+};
+
 #define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
 #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
 #define BNXT_CAG_REG_BASE              0x300000
@@ -956,10 +986,13 @@ struct bnxt {
        #define BNXT_FLAG_PORT_STATS    0x400
        #define BNXT_FLAG_UDP_RSS_CAP   0x800
        #define BNXT_FLAG_EEE_CAP       0x1000
+       #define BNXT_FLAG_NEW_RSS_CAP   0x2000
        #define BNXT_FLAG_ROCEV1_CAP    0x8000
        #define BNXT_FLAG_ROCEV2_CAP    0x10000
        #define BNXT_FLAG_ROCE_CAP      (BNXT_FLAG_ROCEV1_CAP | \
                                         BNXT_FLAG_ROCEV2_CAP)
+       #define BNXT_FLAG_NO_AGG_RINGS  0x20000
+       #define BNXT_FLAG_RX_PAGE_MODE  0x40000
        #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
 
        #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |             \
@@ -971,6 +1004,7 @@ struct bnxt {
 #define BNXT_NPAR(bp)          ((bp)->port_partition_type)
 #define BNXT_SINGLE_PF(bp)     (BNXT_PF(bp) && !BNXT_NPAR(bp))
 #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
+#define BNXT_RX_PAGE_MODE(bp)  ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
 
        struct bnxt_en_dev      *edev;
        struct bnxt_en_dev *    (*ulp_probe)(struct net_device *);
@@ -979,12 +1013,21 @@ struct bnxt {
 
        struct bnxt_rx_ring_info        *rx_ring;
        struct bnxt_tx_ring_info        *tx_ring;
+       u16                     *tx_ring_map;
 
        struct sk_buff *        (*gro_func)(struct bnxt_tpa_info *, int, int,
                                            struct sk_buff *);
 
+       struct sk_buff *        (*rx_skb_func)(struct bnxt *,
+                                              struct bnxt_rx_ring_info *,
+                                              u16, void *, u8 *, dma_addr_t,
+                                              unsigned int);
+
        u32                     rx_buf_size;
        u32                     rx_buf_use_size;        /* useable size */
+       u16                     rx_offset;
+       u16                     rx_dma_offset;
+       enum dma_data_direction rx_dir;
        u32                     rx_ring_size;
        u32                     rx_agg_ring_size;
        u32                     rx_copy_thresh;
@@ -1000,6 +1043,7 @@ struct bnxt {
        int                     tx_nr_pages;
        int                     tx_nr_rings;
        int                     tx_nr_rings_per_tc;
+       int                     tx_nr_rings_xdp;
 
        int                     tx_wake_thresh;
        int                     tx_push_thresh;
@@ -1132,6 +1176,11 @@ struct bnxt {
        struct ethtool_eee      eee;
        u32                     lpi_tmr_lo;
        u32                     lpi_tmr_hi;
+
+       u8                      num_leds;
+       struct bnxt_led_info    leds[BNXT_MAX_LED];
+
+       struct bpf_prog         *xdp_prog;
 };
 
 #define BNXT_RX_STATS_OFFSET(counter)                  \
@@ -1141,93 +1190,6 @@ struct bnxt {
        ((offsetof(struct tx_port_stats, counter) +     \
          sizeof(struct rx_port_stats) + 512) / 8)
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
-{
-       atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
-}
-
-/* called from the NAPI poll routine to get ownership of a bnapi */
-static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
-{
-       int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
-                               BNXT_STATE_NAPI);
-
-       return rc == BNXT_STATE_IDLE;
-}
-
-static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
-{
-       atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
-}
-
-/* called from the busy poll routine to get ownership of a bnapi */
-static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
-{
-       int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
-                               BNXT_STATE_POLL);
-
-       return rc == BNXT_STATE_IDLE;
-}
-
-static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
-{
-       atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
-}
-
-static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
-{
-       return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL;
-}
-
-static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
-{
-       int old;
-
-       while (1) {
-               old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
-                                    BNXT_STATE_DISABLE);
-               if (old == BNXT_STATE_IDLE)
-                       break;
-               usleep_range(500, 5000);
-       }
-}
-
-#else
-
-static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
-{
-}
-
-static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
-{
-       return true;
-}
-
-static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
-{
-}
-
-static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
-{
-       return false;
-}
-
-static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
-{
-}
-
-static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
-{
-       return false;
-}
-
-static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
-{
-}
-
-#endif
-
 #define I2C_DEV_ADDR_A0                                0xa0
 #define I2C_DEV_ADDR_A2                                0xa2
 #define SFP_EEPROM_SFF_8472_COMP_ADDR          0x5e
@@ -1238,7 +1200,23 @@ static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
 #define SFF_MODULE_ID_QSFP28                   0x11
 #define BNXT_MAX_PHY_I2C_RESP_SIZE             64
 
+static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
+{
+       /* Tell compiler to fetch tx indices from memory. */
+       barrier();
+
+       return bp->tx_ring_size -
+               ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
+}
+
+extern const u16 bnxt_lhint_arr[];
+
+int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
+                      u16 prod, gfp_t gfp);
+void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
+void bnxt_set_tpa_flags(struct bnxt *bp);
 void bnxt_set_ring_params(struct bnxt *);
+int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
 void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
 int _hwrm_send_message(struct bnxt *, void *, u32, int);
 int hwrm_send_message(struct bnxt *, void *, u32, int);
@@ -1246,6 +1224,7 @@ int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
                                     int bmap_size);
 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
+int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
 int bnxt_hwrm_set_coal(struct bnxt *);
 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
@@ -1259,6 +1238,7 @@ int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
 int bnxt_hwrm_fw_set_time(struct bnxt *);
 int bnxt_open_nic(struct bnxt *, bool, bool);
 int bnxt_close_nic(struct bnxt *, bool, bool);
+int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp);
 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
 int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
 void bnxt_restore_pf_fw_resources(struct bnxt *bp);
index 784aa77610bc5756ee8a8d0efe9f37d5679cbf7a..6903a873f072ae14f4a7638514446d6ad7b1c6a0 100644 (file)
@@ -357,7 +357,7 @@ static void bnxt_get_channels(struct net_device *dev,
        int max_rx_rings, max_tx_rings, tcs;
 
        bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
-       channel->max_combined = max_t(int, max_rx_rings, max_tx_rings);
+       channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
 
        if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
                max_rx_rings = 0;
@@ -387,9 +387,10 @@ static int bnxt_set_channels(struct net_device *dev,
                             struct ethtool_channels *channel)
 {
        struct bnxt *bp = netdev_priv(dev);
-       int max_rx_rings, max_tx_rings, tcs;
-       u32 rc = 0;
+       int req_tx_rings, req_rx_rings, tcs;
        bool sh = false;
+       int tx_xdp = 0;
+       int rc = 0;
 
        if (channel->other_count)
                return -EINVAL;
@@ -409,19 +410,22 @@ static int bnxt_set_channels(struct net_device *dev,
        if (channel->combined_count)
                sh = true;
 
-       bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
-
        tcs = netdev_get_num_tc(dev);
-       if (tcs > 1)
-               max_tx_rings /= tcs;
-
-       if (sh &&
-           channel->combined_count > max_t(int, max_rx_rings, max_tx_rings))
-               return -ENOMEM;
 
-       if (!sh && (channel->rx_count > max_rx_rings ||
-                   channel->tx_count > max_tx_rings))
-               return -ENOMEM;
+       req_tx_rings = sh ? channel->combined_count : channel->tx_count;
+       req_rx_rings = sh ? channel->combined_count : channel->rx_count;
+       if (bp->tx_nr_rings_xdp) {
+               if (!sh) {
+                       netdev_err(dev, "Only combined mode supported when XDP is enabled.\n");
+                       return -EINVAL;
+               }
+               tx_xdp = req_rx_rings;
+       }
+       rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs, tx_xdp);
+       if (rc) {
+               netdev_warn(dev, "Unable to allocate the requested rings\n");
+               return rc;
+       }
 
        if (netif_running(dev)) {
                if (BNXT_PF(bp)) {
@@ -439,19 +443,17 @@ static int bnxt_set_channels(struct net_device *dev,
 
        if (sh) {
                bp->flags |= BNXT_FLAG_SHARED_RINGS;
-               bp->rx_nr_rings = min_t(int, channel->combined_count,
-                                       max_rx_rings);
-               bp->tx_nr_rings_per_tc = min_t(int, channel->combined_count,
-                                              max_tx_rings);
+               bp->rx_nr_rings = channel->combined_count;
+               bp->tx_nr_rings_per_tc = channel->combined_count;
        } else {
                bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
                bp->rx_nr_rings = channel->rx_count;
                bp->tx_nr_rings_per_tc = channel->tx_count;
        }
-
-       bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+       bp->tx_nr_rings_xdp = tx_xdp;
+       bp->tx_nr_rings = bp->tx_nr_rings_per_tc + tx_xdp;
        if (tcs > 1)
-               bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
+               bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs + tx_xdp;
 
        bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
                               bp->tx_nr_rings + bp->rx_nr_rings;
@@ -524,24 +526,49 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
 
 fltr_found:
        fkeys = &fltr->fkeys;
-       if (fkeys->basic.ip_proto == IPPROTO_TCP)
-               fs->flow_type = TCP_V4_FLOW;
-       else if (fkeys->basic.ip_proto == IPPROTO_UDP)
-               fs->flow_type = UDP_V4_FLOW;
-       else
-               goto fltr_err;
+       if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+               if (fkeys->basic.ip_proto == IPPROTO_TCP)
+                       fs->flow_type = TCP_V4_FLOW;
+               else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+                       fs->flow_type = UDP_V4_FLOW;
+               else
+                       goto fltr_err;
 
-       fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
-       fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+               fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
+               fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
 
-       fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
-       fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+               fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
+               fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+
+               fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
+               fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+
+               fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
+               fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+       } else {
+               int i;
 
-       fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
-       fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+               if (fkeys->basic.ip_proto == IPPROTO_TCP)
+                       fs->flow_type = TCP_V6_FLOW;
+               else if (fkeys->basic.ip_proto == IPPROTO_UDP)
+                       fs->flow_type = UDP_V6_FLOW;
+               else
+                       goto fltr_err;
+
+               *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
+                       fkeys->addrs.v6addrs.src;
+               *(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
+                       fkeys->addrs.v6addrs.dst;
+               for (i = 0; i < 4; i++) {
+                       fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
+                       fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
+               }
+               fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
+               fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
 
-       fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
-       fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+               fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
+               fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
+       }
 
        fs->ring_cookie = fltr->rxq;
        rc = 0;
@@ -893,7 +920,7 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
 static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
                                struct ethtool_link_ksettings *lk_ksettings)
 {
-       u16 fw_speeds = link_info->auto_link_speeds;
+       u16 fw_speeds = link_info->advertising;
        u8 fw_pause = 0;
 
        if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
@@ -1090,8 +1117,9 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
        struct bnxt *bp = netdev_priv(dev);
        struct bnxt_link_info *link_info = &bp->link_info;
        const struct ethtool_link_settings *base = &lk_ksettings->base;
-       u32 speed, fw_advertising = 0;
        bool set_pause = false;
+       u16 fw_advertising = 0;
+       u32 speed;
        int rc = 0;
 
        if (!BNXT_SINGLE_PF(bp))
@@ -1550,17 +1578,37 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
        bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1);
        install.install_type = cpu_to_le32(install_type);
 
-       rc = hwrm_send_message(bp, &install, sizeof(install),
-                              INSTALL_PACKAGE_TIMEOUT);
-       if (rc)
-               return -EOPNOTSUPP;
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &install, sizeof(install),
+                               INSTALL_PACKAGE_TIMEOUT);
+       if (rc) {
+               rc = -EOPNOTSUPP;
+               goto flash_pkg_exit;
+       }
+
+       if (resp->error_code) {
+               u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
+
+               if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
+                       install.flags |= cpu_to_le16(
+                              NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
+                       rc = _hwrm_send_message(bp, &install, sizeof(install),
+                                               INSTALL_PACKAGE_TIMEOUT);
+                       if (rc) {
+                               rc = -EOPNOTSUPP;
+                               goto flash_pkg_exit;
+                       }
+               }
+       }
 
        if (resp->result) {
                netdev_err(dev, "PKG install error = %d, problem_item = %d\n",
                           (s8)resp->result, (int)resp->problem_item);
-               return -ENOPKG;
+               rc = -ENOPKG;
        }
-       return 0;
+flash_pkg_exit:
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
 }
 
 static int bnxt_flash_device(struct net_device *dev,
@@ -2039,6 +2087,47 @@ static int bnxt_nway_reset(struct net_device *dev)
        return rc;
 }
 
+static int bnxt_set_phys_id(struct net_device *dev,
+                           enum ethtool_phys_id_state state)
+{
+       struct hwrm_port_led_cfg_input req = {0};
+       struct bnxt *bp = netdev_priv(dev);
+       struct bnxt_pf_info *pf = &bp->pf;
+       struct bnxt_led_cfg *led_cfg;
+       u8 led_state;
+       __le16 duration;
+       int i, rc;
+
+       if (!bp->num_leds || BNXT_VF(bp))
+               return -EOPNOTSUPP;
+
+       if (state == ETHTOOL_ID_ACTIVE) {
+               led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
+               duration = cpu_to_le16(500);
+       } else if (state == ETHTOOL_ID_INACTIVE) {
+               led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
+               duration = cpu_to_le16(0);
+       } else {
+               return -EINVAL;
+       }
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
+       req.port_id = cpu_to_le16(pf->port_id);
+       req.num_leds = bp->num_leds;
+       led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
+       for (i = 0; i < bp->num_leds; i++, led_cfg++) {
+               req.enables |= BNXT_LED_DFLT_ENABLES(i);
+               led_cfg->led_id = bp->leds[i].led_id;
+               led_cfg->led_state = led_state;
+               led_cfg->led_blink_on = duration;
+               led_cfg->led_blink_off = duration;
+               led_cfg->led_group_id = bp->leds[i].led_group_id;
+       }
+       rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (rc)
+               rc = -EIO;
+       return rc;
+}
+
 const struct ethtool_ops bnxt_ethtool_ops = {
        .get_link_ksettings     = bnxt_get_link_ksettings,
        .set_link_ksettings     = bnxt_set_link_ksettings,
@@ -2070,5 +2159,6 @@ const struct ethtool_ops bnxt_ethtool_ops = {
        .set_eee                = bnxt_set_eee,
        .get_module_info        = bnxt_get_module_info,
        .get_module_eeprom      = bnxt_get_module_eeprom,
-       .nway_reset             = bnxt_nway_reset
+       .nway_reset             = bnxt_nway_reset,
+       .set_phys_id            = bnxt_set_phys_id,
 };
index 3abc03b60dbc8dd48da8e201cea12fadaa3bb0f7..ed1e555292e9ce404b44017eca84fac5edb23d85 100644 (file)
 #ifndef BNXT_ETHTOOL_H
 #define BNXT_ETHTOOL_H
 
+struct bnxt_led_cfg {
+       u8 led_id;
+       u8 led_state;
+       u8 led_color;
+       u8 unused;
+       __le16 led_blink_on;
+       __le16 led_blink_off;
+       u8 led_group_id;
+       u8 rsvd;
+};
+
+#define BNXT_LED_DFLT_ENA                              \
+       (PORT_LED_CFG_REQ_ENABLES_LED0_ID |             \
+        PORT_LED_CFG_REQ_ENABLES_LED0_STATE |          \
+        PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON |       \
+        PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF |      \
+        PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID)
+
+#define BNXT_LED_DFLT_ENA_SHIFT        6
+
+#define BNXT_LED_DFLT_ENABLES(x)                       \
+       cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
+
 extern const struct ethtool_ops bnxt_ethtool_ops;
 
 u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
index 2ddfa51519a1a914fb41a779abd23e36e3bd7f32..6e275c23d68bfe22561903e9b1d538a160ab8948 100644 (file)
@@ -1,7 +1,7 @@
 /* Broadcom NetXtreme-C/E network driver.
  *
  * Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016 Broadcom Limited
+ * Copyright (c) 2016-2017 Broadcom Limited
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #ifndef BNXT_HSI_H
 #define BNXT_HSI_H
 
-/* HSI and HWRM Specification 1.6.0 */
+/* HSI and HWRM Specification 1.7.0 */
 #define HWRM_VERSION_MAJOR     1
-#define HWRM_VERSION_MINOR     6
+#define HWRM_VERSION_MINOR     7
 #define HWRM_VERSION_UPDATE    0
 
-#define HWRM_VERSION_STR       "1.6.0"
+#define HWRM_VERSION_STR       "1.7.0"
 /*
  * Following is the signature for HWRM message field that indicates not
  * applicable (All F's). Need to cast it the size of the field if needed.
@@ -549,6 +549,8 @@ struct hwrm_ver_get_output {
        __le32 dev_caps_cfg;
        #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED  0x1UL
        #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED  0x2UL
+       #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED      0x4UL
+       #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED       0x8UL
        u8 roce_fw_maj;
        u8 roce_fw_min;
        u8 roce_fw_bld;
@@ -832,20 +834,32 @@ struct hwrm_func_qcfg_output {
        __le32 min_bw;
        #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK                 0xfffffffUL
        #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT                  0
-       #define FUNC_QCFG_RESP_MIN_BW_RSVD                          0x10000000UL
+       #define FUNC_QCFG_RESP_MIN_BW_SCALE                         0x10000000UL
+       #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS                   (0x0UL << 28)
+       #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES                  (0x1UL << 28)
+       #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST    FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
        #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK            0xe0000000UL
        #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT     29
-       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MBPS           (0x0UL << 29)
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA           (0x0UL << 29)
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO           (0x2UL << 29)
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE           (0x4UL << 29)
+       #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA           (0x6UL << 29)
        #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
        #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
        #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST    FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 max_bw;
        #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK                 0xfffffffUL
        #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT                  0
-       #define FUNC_QCFG_RESP_MAX_BW_RSVD                          0x10000000UL
+       #define FUNC_QCFG_RESP_MAX_BW_SCALE                         0x10000000UL
+       #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS                   (0x0UL << 28)
+       #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES                  (0x1UL << 28)
+       #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST    FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
        #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK            0xe0000000UL
        #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT     29
-       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MBPS           (0x0UL << 29)
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA           (0x0UL << 29)
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO           (0x2UL << 29)
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE           (0x4UL << 29)
+       #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA           (0x6UL << 29)
        #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
        #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
        #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST    FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -921,20 +935,32 @@ struct hwrm_func_cfg_input {
        __le32 min_bw;
        #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK                   0xfffffffUL
        #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT                    0
-       #define FUNC_CFG_REQ_MIN_BW_RSVD                            0x10000000UL
+       #define FUNC_CFG_REQ_MIN_BW_SCALE                           0x10000000UL
+       #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS                     (0x0UL << 28)
+       #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES            (0x1UL << 28)
+       #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST    FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
        #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK              0xe0000000UL
        #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT               29
-       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MBPS             (0x0UL << 29)
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA             (0x0UL << 29)
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO             (0x2UL << 29)
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE             (0x4UL << 29)
+       #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA             (0x6UL << 29)
        #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100    (0x1UL << 29)
        #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID          (0x7UL << 29)
        #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST    FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 max_bw;
        #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK                   0xfffffffUL
        #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT                    0
-       #define FUNC_CFG_REQ_MAX_BW_RSVD                            0x10000000UL
+       #define FUNC_CFG_REQ_MAX_BW_SCALE                           0x10000000UL
+       #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS                     (0x0UL << 28)
+       #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES            (0x1UL << 28)
+       #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST    FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
        #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK              0xe0000000UL
        #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT               29
-       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MBPS             (0x0UL << 29)
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA             (0x0UL << 29)
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO             (0x2UL << 29)
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE             (0x4UL << 29)
+       #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA             (0x6UL << 29)
        #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100    (0x1UL << 29)
        #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID          (0x7UL << 29)
        #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST    FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -1529,6 +1555,20 @@ struct hwrm_port_phy_qcfg_output {
        #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET                  0x8UL
        #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE                 0x9UL
        #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY    0xaUL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L       0xbUL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S       0xcUL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N       0xdUL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR             0xeUL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4           0xfUL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4           0x10UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4           0x11UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4           0x12UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10          0x13UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4    0x14UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4    0x15UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4    0x16UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4    0x17UL
+       #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE      0x18UL
        u8 media_type;
        #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN              0x0UL
        #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP                   0x1UL
@@ -1919,6 +1959,219 @@ struct hwrm_port_phy_i2c_read_output {
        u8 valid;
 };
 
+/* hwrm_port_led_cfg */
+/* Input (64 bytes) */
+struct hwrm_port_led_cfg_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       #define PORT_LED_CFG_REQ_ENABLES_LED0_ID                    0x1UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE                 0x2UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR                 0x4UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON              0x8UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF     0x10UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID              0x20UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED1_ID                    0x40UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE                 0x80UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR                 0x100UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON              0x200UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF     0x400UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID              0x800UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED2_ID                    0x1000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE                 0x2000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR                 0x4000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON              0x8000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF     0x10000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID              0x20000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED3_ID                    0x40000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE                 0x80000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR                 0x100000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON              0x200000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF     0x400000UL
+       #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID              0x800000UL
+       __le16 port_id;
+       u8 num_leds;
+       u8 rsvd;
+       u8 led0_id;
+       u8 led0_state;
+       #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED0_STATE_OFF            0x1UL
+       #define PORT_LED_CFG_REQ_LED0_STATE_ON                     0x2UL
+       #define PORT_LED_CFG_REQ_LED0_STATE_BLINK                  0x3UL
+       #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT               0x4UL
+       u8 led0_color;
+       #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER                  0x1UL
+       #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN                  0x2UL
+       #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER             0x3UL
+       u8 unused_0;
+       __le16 led0_blink_on;
+       __le16 led0_blink_off;
+       u8 led0_group_id;
+       u8 rsvd0;
+       u8 led1_id;
+       u8 led1_state;
+       #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED1_STATE_OFF            0x1UL
+       #define PORT_LED_CFG_REQ_LED1_STATE_ON                     0x2UL
+       #define PORT_LED_CFG_REQ_LED1_STATE_BLINK                  0x3UL
+       #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT               0x4UL
+       u8 led1_color;
+       #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER                  0x1UL
+       #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN                  0x2UL
+       #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER             0x3UL
+       u8 unused_1;
+       __le16 led1_blink_on;
+       __le16 led1_blink_off;
+       u8 led1_group_id;
+       u8 rsvd1;
+       u8 led2_id;
+       u8 led2_state;
+       #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED2_STATE_OFF            0x1UL
+       #define PORT_LED_CFG_REQ_LED2_STATE_ON                     0x2UL
+       #define PORT_LED_CFG_REQ_LED2_STATE_BLINK                  0x3UL
+       #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT               0x4UL
+       u8 led2_color;
+       #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER                  0x1UL
+       #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN                  0x2UL
+       #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER             0x3UL
+       u8 unused_2;
+       __le16 led2_blink_on;
+       __le16 led2_blink_off;
+       u8 led2_group_id;
+       u8 rsvd2;
+       u8 led3_id;
+       u8 led3_state;
+       #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED3_STATE_OFF            0x1UL
+       #define PORT_LED_CFG_REQ_LED3_STATE_ON                     0x2UL
+       #define PORT_LED_CFG_REQ_LED3_STATE_BLINK                  0x3UL
+       #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT               0x4UL
+       u8 led3_color;
+       #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT                0x0UL
+       #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER                  0x1UL
+       #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN                  0x2UL
+       #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER             0x3UL
+       u8 unused_3;
+       __le16 led3_blink_on;
+       __le16 led3_blink_off;
+       u8 led3_group_id;
+       u8 rsvd3;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_led_cfg_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le32 unused_0;
+       u8 unused_1;
+       u8 unused_2;
+       u8 unused_3;
+       u8 valid;
+};
+
+/* hwrm_port_led_qcaps */
+/* Input (24 bytes) */
+struct hwrm_port_led_qcaps_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le16 port_id;
+       __le16 unused_0[3];
+};
+
+/* Output (48 bytes) */
+struct hwrm_port_led_qcaps_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       u8 num_leds;
+       u8 unused_0[3];
+       u8 led0_id;
+       u8 led0_type;
+       #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED                0x0UL
+       #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY             0x1UL
+       #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID              0xffUL
+       u8 led0_group_id;
+       u8 unused_1;
+       __le16 led0_state_caps;
+       #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED         0x1UL
+       #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED  0x2UL
+       #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED   0x4UL
+       #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+       #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+       __le16 led0_color_caps;
+       #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD            0x1UL
+       #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+       #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+       u8 led1_id;
+       u8 led1_type;
+       #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED                0x0UL
+       #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY             0x1UL
+       #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID              0xffUL
+       u8 led1_group_id;
+       u8 unused_2;
+       __le16 led1_state_caps;
+       #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED         0x1UL
+       #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED  0x2UL
+       #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED   0x4UL
+       #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+       #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+       __le16 led1_color_caps;
+       #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD            0x1UL
+       #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+       #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+       u8 led2_id;
+       u8 led2_type;
+       #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED                0x0UL
+       #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY             0x1UL
+       #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID              0xffUL
+       u8 led2_group_id;
+       u8 unused_3;
+       __le16 led2_state_caps;
+       #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED         0x1UL
+       #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED  0x2UL
+       #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED   0x4UL
+       #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+       #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+       __le16 led2_color_caps;
+       #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD            0x1UL
+       #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+       #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+       u8 led3_id;
+       u8 led3_type;
+       #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED                0x0UL
+       #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY             0x1UL
+       #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID              0xffUL
+       u8 led3_group_id;
+       u8 unused_4;
+       __le16 led3_state_caps;
+       #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED         0x1UL
+       #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED  0x2UL
+       #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED   0x4UL
+       #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+       #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+       __le16 led3_color_caps;
+       #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD            0x1UL
+       #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+       #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+       u8 unused_5;
+       u8 unused_6;
+       u8 unused_7;
+       u8 valid;
+};
+
 /* hwrm_queue_qportcfg */
 /* Input (24 bytes) */
 struct hwrm_queue_qportcfg_input {
@@ -2216,20 +2469,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
        __le32 queue_id0_min_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id0_max_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2244,20 +2509,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
        __le32 queue_id1_min_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id1_max_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2272,20 +2549,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
        __le32 queue_id2_min_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id2_max_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2300,20 +2589,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
        __le32 queue_id3_min_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id3_max_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2328,20 +2629,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
        __le32 queue_id4_min_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id4_max_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2356,20 +2669,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
        __le32 queue_id5_min_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id5_max_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2384,20 +2709,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
        __le32 queue_id6_min_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id6_max_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2412,20 +2749,32 @@ struct hwrm_queue_cos2bw_qcfg_output {
        __le32 queue_id7_min_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id7_max_bw;
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_RSVD       0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE      0x10000000UL
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2467,20 +2816,32 @@ struct hwrm_queue_cos2bw_cfg_input {
        __le32 queue_id0_min_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id0_max_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2495,20 +2856,32 @@ struct hwrm_queue_cos2bw_cfg_input {
        __le32 queue_id1_min_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id1_max_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2523,20 +2896,32 @@ struct hwrm_queue_cos2bw_cfg_input {
        __le32 queue_id2_min_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id2_max_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2551,20 +2936,32 @@ struct hwrm_queue_cos2bw_cfg_input {
        __le32 queue_id3_min_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id3_max_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2579,20 +2976,32 @@ struct hwrm_queue_cos2bw_cfg_input {
        __le32 queue_id4_min_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id4_max_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2607,20 +3016,32 @@ struct hwrm_queue_cos2bw_cfg_input {
        __le32 queue_id5_min_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id5_max_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2635,20 +3056,32 @@ struct hwrm_queue_cos2bw_cfg_input {
        __le32 queue_id6_min_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id6_max_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2663,20 +3096,32 @@ struct hwrm_queue_cos2bw_cfg_input {
        __le32 queue_id7_min_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
        __le32 queue_id7_max_bw;
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_RSVD          0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE         0x10000000UL
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS  (0x0UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
-       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MBPS (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+       #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
        #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST    QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -2797,6 +3242,41 @@ struct hwrm_vnic_cfg_output {
        u8 valid;
 };
 
+/* hwrm_vnic_qcaps */
+/* Input (24 bytes) */
+struct hwrm_vnic_qcaps_input {
+       __le16 req_type;
+       __le16 cmpl_ring;
+       __le16 seq_id;
+       __le16 target_id;
+       __le64 resp_addr;
+       __le32 enables;
+       __le32 unused_0;
+};
+
+/* Output (24 bytes) */
+struct hwrm_vnic_qcaps_output {
+       __le16 error_code;
+       __le16 req_type;
+       __le16 seq_id;
+       __le16 resp_len;
+       __le16 mru;
+       u8 unused_0;
+       u8 unused_1;
+       __le32 flags;
+       #define VNIC_QCAPS_RESP_FLAGS_UNUSED                        0x1UL
+       #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP                0x2UL
+       #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP                  0x4UL
+       #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP            0x8UL
+       #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP            0x10UL
+       #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP               0x20UL
+       __le32 unused_2;
+       u8 unused_3;
+       u8 unused_4;
+       u8 unused_5;
+       u8 valid;
+};
+
 /* hwrm_vnic_tpa_cfg */
 /* Input (40 bytes) */
 struct hwrm_vnic_tpa_cfg_input {
@@ -2992,9 +3472,10 @@ struct hwrm_ring_alloc_input {
        #define RING_ALLOC_REQ_ENABLES_RESERVED4                    0x10UL
        #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID                 0x20UL
        u8 ring_type;
-       #define RING_ALLOC_REQ_RING_TYPE_CMPL                      0x0UL
+       #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL                   0x0UL
        #define RING_ALLOC_REQ_RING_TYPE_TX                        0x1UL
        #define RING_ALLOC_REQ_RING_TYPE_RX                        0x2UL
+       #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL                 0x3UL
        u8 unused_0;
        __le16 unused_1;
        __le64 page_tbl_addr;
@@ -3028,10 +3509,16 @@ struct hwrm_ring_alloc_input {
        __le32 max_bw;
        #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK                 0xfffffffUL
        #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT                  0
-       #define RING_ALLOC_REQ_MAX_BW_RSVD                          0x10000000UL
+       #define RING_ALLOC_REQ_MAX_BW_SCALE                         0x10000000UL
+       #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS                   (0x0UL << 28)
+       #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES                  (0x1UL << 28)
+       #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST    RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
        #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK            0xe0000000UL
        #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT     29
-       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MBPS           (0x0UL << 29)
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA           (0x0UL << 29)
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO           (0x2UL << 29)
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE           (0x4UL << 29)
+       #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA           (0x6UL << 29)
        #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100  (0x1UL << 29)
        #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID       (0x7UL << 29)
        #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST    RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
@@ -3066,9 +3553,10 @@ struct hwrm_ring_free_input {
        __le16 target_id;
        __le64 resp_addr;
        u8 ring_type;
-       #define RING_FREE_REQ_RING_TYPE_CMPL                       0x0UL
+       #define RING_FREE_REQ_RING_TYPE_L2_CMPL            0x0UL
        #define RING_FREE_REQ_RING_TYPE_TX                         0x1UL
        #define RING_FREE_REQ_RING_TYPE_RX                         0x2UL
+       #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL                  0x3UL
        u8 unused_0;
        __le16 ring_id;
        __le32 unused_1;
@@ -3166,9 +3654,10 @@ struct hwrm_ring_reset_input {
        __le16 target_id;
        __le64 resp_addr;
        u8 ring_type;
-       #define RING_RESET_REQ_RING_TYPE_CMPL                      0x0UL
+       #define RING_RESET_REQ_RING_TYPE_L2_CMPL                   0x0UL
        #define RING_RESET_REQ_RING_TYPE_TX                        0x1UL
        #define RING_RESET_REQ_RING_TYPE_RX                        0x2UL
+       #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL                 0x3UL
        u8 unused_0;
        __le16 ring_id;
        __le32 unused_1;
@@ -3597,6 +4086,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
        __le32 flags;
        #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK          0x1UL
        #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP              0x2UL
+       #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER     0x4UL
        __le32 enables;
        #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID   0x1UL
        #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE      0x2UL
@@ -3697,7 +4187,7 @@ struct hwrm_cfa_ntuple_filter_free_output {
 };
 
 /* hwrm_cfa_ntuple_filter_cfg */
-/* Input (40 bytes) */
+/* Input (48 bytes) */
 struct hwrm_cfa_ntuple_filter_cfg_input {
        __le16 req_type;
        __le16 cmpl_ring;
@@ -3707,10 +4197,14 @@ struct hwrm_cfa_ntuple_filter_cfg_input {
        __le32 enables;
        #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID       0x1UL
        #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+       #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
        __le32 unused_0;
        __le64 ntuple_filter_id;
        __le32 new_dst_id;
        __le32 new_mirror_vnic_id;
+       __le16 new_meter_instance_id;
+       #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
+       __le16 unused_1[3];
 };
 
 /* Output (16 bytes) */
@@ -4058,9 +4552,7 @@ struct hwrm_fw_set_structured_data_input {
        __le64 src_data_addr;
        __le16 data_len;
        u8 hdr_cnt;
-       u8 unused_0;
-       __le16 port_id;
-       __le16 unused_1;
+       u8 unused_0[5];
 };
 
 /* Output (16 bytes) */
@@ -4077,7 +4569,7 @@ struct hwrm_fw_set_structured_data_output {
 };
 
 /* hwrm_fw_get_structured_data */
-/* Input (40 bytes) */
+/* Input (32 bytes) */
 struct hwrm_fw_get_structured_data_input {
        __le16 req_type;
        __le16 cmpl_ring;
@@ -4095,10 +4587,9 @@ struct hwrm_fw_get_structured_data_input {
        #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_ADMIN 0x200UL
        #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_PEER  0x201UL
        #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
+       #define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_HOST_OPERATIONAL 0x300UL
        u8 count;
        u8 unused_0;
-       __le16 port_id;
-       __le16 unused_1[3];
 };
 
 /* Output (16 bytes) */
@@ -4582,7 +5073,11 @@ struct hwrm_nvm_install_update_input {
        __le32 install_type;
        #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL         0x0UL
        #define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL    0xffffffffUL
-       __le32 unused_0;
+       __le16 flags;
+       #define NVM_INSTALL_UPDATE_REQ_FLAGS_ERASE_UNUSED_SPACE    0x1UL
+       #define NVM_INSTALL_UPDATE_REQ_FLAGS_REMOVE_UNUSED_PKG     0x2UL
+       #define NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG     0x4UL
+       __le16 unused_0;
 };
 
 /* Output (24 bytes) */
@@ -4608,6 +5103,15 @@ struct hwrm_nvm_install_update_output {
        u8 valid;
 };
 
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_nvm_install_update_cmd_err {
+       u8 code;
+       #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_UNKNOWN    0x0UL
+       #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR           0x1UL
+       #define NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE           0x2UL
+       u8 unused_0[7];
+};
+
 /* Hardware Resource Manager Specification */
 /* Input (16 bytes) */
 struct input {
@@ -4735,11 +5239,26 @@ struct cmd_nums {
        #define HWRM_WOL_FILTER_FREE                               (0xf1UL)
        #define HWRM_WOL_FILTER_QCFG                               (0xf2UL)
        #define HWRM_WOL_REASON_QCFG                               (0xf3UL)
+       #define HWRM_CFA_METER_PROFILE_ALLOC                       (0xf5UL)
+       #define HWRM_CFA_METER_PROFILE_FREE                        (0xf6UL)
+       #define HWRM_CFA_METER_PROFILE_CFG                         (0xf7UL)
+       #define HWRM_CFA_METER_INSTANCE_ALLOC                      (0xf8UL)
+       #define HWRM_CFA_METER_INSTANCE_FREE                       (0xf9UL)
+       #define HWRM_CFA_VF_PAIR_ALLOC                             (0x100UL)
+       #define HWRM_CFA_VF_PAIR_FREE                              (0x101UL)
+       #define HWRM_CFA_VF_PAIR_INFO                              (0x102UL)
+       #define HWRM_CFA_FLOW_ALLOC                                (0x103UL)
+       #define HWRM_CFA_FLOW_FREE                                 (0x104UL)
+       #define HWRM_CFA_FLOW_FLUSH                                (0x105UL)
+       #define HWRM_CFA_FLOW_STATS                                (0x106UL)
+       #define HWRM_CFA_FLOW_INFO                                 (0x107UL)
        #define HWRM_DBG_READ_DIRECT                               (0xff10UL)
        #define HWRM_DBG_READ_INDIRECT                             (0xff11UL)
        #define HWRM_DBG_WRITE_DIRECT                              (0xff12UL)
        #define HWRM_DBG_WRITE_INDIRECT                    (0xff13UL)
        #define HWRM_DBG_DUMP                                      (0xff14UL)
+       #define HWRM_NVM_VALIDATE_OPTION                           (0xffefUL)
+       #define HWRM_NVM_FLUSH                                     (0xfff0UL)
        #define HWRM_NVM_GET_VARIABLE                              (0xfff1UL)
        #define HWRM_NVM_SET_VARIABLE                              (0xfff2UL)
        #define HWRM_NVM_INSTALL_UPDATE                    (0xfff3UL)
@@ -4939,12 +5458,13 @@ struct ctx_hw_stats {
 struct hwrm_struct_hdr {
        __le16 struct_id;
        #define STRUCT_HDR_STRUCT_ID_LLDP_CFG                      0x41bUL
-       #define STRUCT_HDR_STRUCT_ID_DCBX_ETS_CFG                  0x41dUL
-       #define STRUCT_HDR_STRUCT_ID_DCBX_PFC_CFG                  0x41fUL
-       #define STRUCT_HDR_STRUCT_ID_DCBX_APP_CFG                  0x421UL
-       #define STRUCT_HDR_STRUCT_ID_DCBX_STATE_CFG                0x422UL
-       #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC_CFG              0x424UL
-       #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE_CFG               0x426UL
+       #define STRUCT_HDR_STRUCT_ID_DCBX_ETS                      0x41dUL
+       #define STRUCT_HDR_STRUCT_ID_DCBX_PFC                      0x41fUL
+       #define STRUCT_HDR_STRUCT_ID_DCBX_APP                      0x421UL
+       #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE    0x422UL
+       #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC                  0x424UL
+       #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE                   0x426UL
+       #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION              0xaUL
        __le16 len;
        u8 version;
        u8 count;
@@ -4954,14 +5474,14 @@ struct hwrm_struct_hdr {
        __le16 unused_0[3];
 };
 
-/* DCBX Application configuration structure (8 bytes) */
-struct hwrm_struct_data_dcbx_app_cfg {
-       __le16 protocol_id;
+/* DCBX Application configuration structure (1057) (8 bytes) */
+struct hwrm_struct_data_dcbx_app {
+       __be16 protocol_id;
        u8 protocol_selector;
-       #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
-       #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
-       #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
-       #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+       #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+       #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT   0x2UL
+       #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT   0x3UL
+       #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
        u8 priority;
        u8 valid;
        u8 unused_0[3];
index c69602508666cbf4a0e14226bf1b0d612ae907ae..0b8cd7443843241efcff2e632869c6ccc7a40fbe 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/etherdevice.h>
 #include "bnxt_hsi.h"
 #include "bnxt.h"
+#include "bnxt_ulp.h"
 #include "bnxt_sriov.h"
 #include "bnxt_ethtool.h"
 
@@ -416,6 +417,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
        u16 vf_ring_grps;
        struct hwrm_func_cfg_input req = {0};
        struct bnxt_pf_info *pf = &bp->pf;
+       int total_vf_tx_rings = 0;
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
 
@@ -429,6 +431,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
                vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
        vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
        vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
+       vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs;
+       vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
 
        req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
                                  FUNC_CFG_REQ_ENABLES_MRU |
@@ -451,7 +455,6 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
        req.num_rx_rings = cpu_to_le16(vf_rx_rings);
        req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
        req.num_l2_ctxs = cpu_to_le16(4);
-       vf_vnics = 1;
 
        req.num_vnics = cpu_to_le16(vf_vnics);
        /* FIXME spec currently uses 1 bit for stats ctx */
@@ -459,6 +462,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
 
        mutex_lock(&bp->hwrm_cmd_lock);
        for (i = 0; i < num_vfs; i++) {
+               int vf_tx_rsvd = vf_tx_rings;
+
                req.fid = cpu_to_le16(pf->first_vf_id + i);
                rc = _hwrm_send_message(bp, &req, sizeof(req),
                                        HWRM_CMD_TIMEOUT);
@@ -466,10 +471,15 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
                        break;
                pf->active_vfs = i + 1;
                pf->vf[i].fw_fid = le16_to_cpu(req.fid);
+               rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
+                                             &vf_tx_rsvd);
+               if (rc)
+                       break;
+               total_vf_tx_rings += vf_tx_rsvd;
        }
        mutex_unlock(&bp->hwrm_cmd_lock);
        if (!rc) {
-               pf->max_tx_rings -= vf_tx_rings * num_vfs;
+               pf->max_tx_rings -= total_vf_tx_rings;
                pf->max_rx_rings -= vf_rx_rings * num_vfs;
                pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
                pf->max_cp_rings -= vf_cp_rings * num_vfs;
@@ -506,6 +516,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
                            min_rx_rings)
                                rx_ok = 1;
                }
+               if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings)
+                       rx_ok = 0;
 
                if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
                        tx_ok = 1;
@@ -544,6 +556,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
        if (rc)
                goto err_out2;
 
+       bnxt_ulp_sriov_cfg(bp, *num_vfs);
+
        rc = pci_enable_sriov(bp->pdev, *num_vfs);
        if (rc)
                goto err_out2;
@@ -585,6 +599,8 @@ void bnxt_sriov_disable(struct bnxt *bp)
        rtnl_lock();
        bnxt_restore_pf_fw_resources(bp);
        rtnl_unlock();
+
+       bnxt_ulp_sriov_cfg(bp, 0);
 }
 
 int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
new file mode 100644 (file)
index 0000000..899c30f
--- /dev/null
@@ -0,0 +1,240 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/filter.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_xdp.h"
+
+static void bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+                         dma_addr_t mapping, u32 len, u16 rx_prod)
+{
+       struct bnxt_sw_tx_bd *tx_buf;
+       struct tx_bd_ext *txbd1;
+       struct tx_bd *txbd;
+       u32 flags;
+       u16 prod;
+
+       prod = txr->tx_prod;
+       tx_buf = &txr->tx_buf_ring[prod];
+       tx_buf->rx_prod = rx_prod;
+
+       txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+       flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
+               (2 << TX_BD_FLAGS_BD_CNT_SHIFT) | TX_BD_FLAGS_COAL_NOW |
+               TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
+       txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+       txbd->tx_bd_opaque = prod;
+       txbd->tx_bd_haddr = cpu_to_le64(mapping);
+
+       prod = NEXT_TX(prod);
+       txbd1 = (struct tx_bd_ext *)
+               &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
+
+       txbd1->tx_bd_hsize_lflags = cpu_to_le32(0);
+       txbd1->tx_bd_mss = cpu_to_le32(0);
+       txbd1->tx_bd_cfa_action = cpu_to_le32(0);
+       txbd1->tx_bd_cfa_meta = cpu_to_le32(0);
+
+       prod = NEXT_TX(prod);
+       txr->tx_prod = prod;
+}
+
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+{
+       struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
+       struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+       struct bnxt_sw_tx_bd *tx_buf;
+       u16 tx_cons = txr->tx_cons;
+       u16 last_tx_cons = tx_cons;
+       u16 rx_prod;
+       int i;
+
+       for (i = 0; i < nr_pkts; i++) {
+               last_tx_cons = tx_cons;
+               tx_cons = NEXT_TX(tx_cons);
+               tx_cons = NEXT_TX(tx_cons);
+       }
+       txr->tx_cons = tx_cons;
+       if (bnxt_tx_avail(bp, txr) == bp->tx_ring_size) {
+               rx_prod = rxr->rx_prod;
+       } else {
+               tx_buf = &txr->tx_buf_ring[last_tx_cons];
+               rx_prod = tx_buf->rx_prod;
+       }
+       writel(DB_KEY_RX | rx_prod, rxr->rx_doorbell);
+}
+
+/* returns the following:
+ * true    - packet consumed by XDP and new buffer is allocated.
+ * false   - packet should be passed to the stack.
+ */
+bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+                struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
+{
+       struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
+       struct bnxt_tx_ring_info *txr;
+       struct bnxt_sw_rx_bd *rx_buf;
+       struct pci_dev *pdev;
+       struct xdp_buff xdp;
+       dma_addr_t mapping;
+       void *orig_data;
+       u32 tx_avail;
+       u32 offset;
+       u32 act;
+
+       if (!xdp_prog)
+               return false;
+
+       pdev = bp->pdev;
+       txr = rxr->bnapi->tx_ring;
+       rx_buf = &rxr->rx_buf_ring[cons];
+       offset = bp->rx_offset;
+
+       xdp.data_hard_start = *data_ptr - offset;
+       xdp.data = *data_ptr;
+       xdp.data_end = *data_ptr + *len;
+       orig_data = xdp.data;
+       mapping = rx_buf->mapping - bp->rx_dma_offset;
+
+       dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
+
+       rcu_read_lock();
+       act = bpf_prog_run_xdp(xdp_prog, &xdp);
+       rcu_read_unlock();
+
+       tx_avail = bnxt_tx_avail(bp, txr);
+       /* If the tx ring is not full, we must not update the rx producer yet
+        * because we may still be transmitting on some BDs.
+        */
+       if (tx_avail != bp->tx_ring_size)
+               *event &= ~BNXT_RX_EVENT;
+
+       if (orig_data != xdp.data) {
+               offset = xdp.data - xdp.data_hard_start;
+               *data_ptr = xdp.data_hard_start + offset;
+               *len = xdp.data_end - xdp.data;
+       }
+       switch (act) {
+       case XDP_PASS:
+               return false;
+
+       case XDP_TX:
+               if (tx_avail < 2) {
+                       trace_xdp_exception(bp->dev, xdp_prog, act);
+                       bnxt_reuse_rx_data(rxr, cons, page);
+                       return true;
+               }
+
+               *event = BNXT_TX_EVENT;
+               dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
+                                          bp->rx_dir);
+               bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
+                             NEXT_RX(rxr->rx_prod));
+               bnxt_reuse_rx_data(rxr, cons, page);
+               return true;
+       default:
+               bpf_warn_invalid_xdp_action(act);
+               /* Fall thru */
+       case XDP_ABORTED:
+               trace_xdp_exception(bp->dev, xdp_prog, act);
+               /* Fall thru */
+       case XDP_DROP:
+               bnxt_reuse_rx_data(rxr, cons, page);
+               break;
+       }
+       return true;
+}
+
+/* Under rtnl_lock */
+static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
+{
+       struct net_device *dev = bp->dev;
+       int tx_xdp = 0, rc, tc;
+       struct bpf_prog *old;
+
+       if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
+               netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
+                           bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
+               return -EOPNOTSUPP;
+       }
+       if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
+               netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
+               return -EOPNOTSUPP;
+       }
+       if (prog)
+               tx_xdp = bp->rx_nr_rings;
+
+       tc = netdev_get_num_tc(dev);
+       if (!tc)
+               tc = 1;
+       rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
+                               tc, tx_xdp);
+       if (rc) {
+               netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
+               return rc;
+       }
+       if (netif_running(dev))
+               bnxt_close_nic(bp, true, false);
+
+       old = xchg(&bp->xdp_prog, prog);
+       if (old)
+               bpf_prog_put(old);
+
+       if (prog) {
+               bnxt_set_rx_skb_mode(bp, true);
+       } else {
+               int rx, tx;
+
+               bnxt_set_rx_skb_mode(bp, false);
+               bnxt_get_max_rings(bp, &rx, &tx, true);
+               if (rx > 1) {
+                       bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
+                       bp->dev->hw_features |= NETIF_F_LRO;
+               }
+       }
+       bp->tx_nr_rings_xdp = tx_xdp;
+       bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
+       bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
+       bp->num_stat_ctxs = bp->cp_nr_rings;
+       bnxt_set_tpa_flags(bp);
+       bnxt_set_ring_params(bp);
+
+       if (netif_running(dev))
+               return bnxt_open_nic(bp, true, false);
+
+       return 0;
+}
+
+int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc;
+
+       switch (xdp->command) {
+       case XDP_SETUP_PROG:
+               rc = bnxt_xdp_set(bp, xdp->prog);
+               break;
+       case XDP_QUERY_PROG:
+               xdp->prog_attached = !!bp->xdp_prog;
+               rc = 0;
+               break;
+       default:
+               rc = -EINVAL;
+               break;
+       }
+       return rc;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
new file mode 100644 (file)
index 0000000..b529f2c
--- /dev/null
@@ -0,0 +1,19 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_XDP_H
+#define BNXT_XDP_H
+
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
+bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
+                struct page *page, u8 **data_ptr, unsigned int *len,
+                u8 *event);
+int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp);
+
+#endif
index b1d2ac818710b36328a6e0d50a77c2436d57a850..cec94bbb2ea5ad17a7bec44a76d34c0d8f9cd128 100644 (file)
@@ -3665,7 +3665,7 @@ static int cnic_cm_destroy(struct cnic_sock *csk)
 static inline u16 cnic_get_vlan(struct net_device *dev,
                                struct net_device **vlan_dev)
 {
-       if (dev->priv_flags & IFF_802_1Q_VLAN) {
+       if (is_vlan_dev(dev)) {
                *vlan_dev = vlan_dev_real_dev(dev);
                return vlan_dev_vlan_id(dev);
        }
index 435a2e4739d16d721918b55cc54c855fd3f50a56..89d4feba1a9aeafbf639effa3941701c0d9b556f 100644 (file)
@@ -2537,7 +2537,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget)
        sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
 #ifdef CONFIG_SBMAC_COALESCE
                __raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
index ae42de4fdddf6b77d2c2cf1606795e139d4b472b..a448177990fe4287b971e23b8b2171ed02f0ff2b 100644 (file)
@@ -14145,8 +14145,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
        .set_link_ksettings     = tg3_set_link_ksettings,
 };
 
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
-                                               struct rtnl_link_stats64 *stats)
+static void tg3_get_stats64(struct net_device *dev,
+                           struct rtnl_link_stats64 *stats)
 {
        struct tg3 *tp = netdev_priv(dev);
 
@@ -14154,13 +14154,11 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
        if (!tp->hw_stats) {
                *stats = tp->net_stats_prev;
                spin_unlock_bh(&tp->lock);
-               return stats;
+               return;
        }
 
        tg3_get_nstats(tp, stats);
        spin_unlock_bh(&tp->lock);
-
-       return stats;
 }
 
 static void tg3_set_rx_mode(struct net_device *dev)
index 112030828c4b7074a00c8475696461fb4a6c97cc..6e13c937d715e03de7191949b7e685be54d7ae5b 100644 (file)
@@ -1881,7 +1881,7 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget)
                return rcvd;
 
 poll_exit:
-       napi_complete(napi);
+       napi_complete_done(napi, rcvd);
 
        rx_ctrl->rx_complete++;
 
@@ -3111,7 +3111,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
  * Used spin_lock to synchronize reading of stats structures, which
  * is written by BNA under the same lock.
  */
-static struct rtnl_link_stats64 *
+static void
 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 {
        struct bnad *bnad = netdev_priv(netdev);
@@ -3123,8 +3123,6 @@ bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
        bnad_netdev_hwstats_fill(bnad, stats);
 
        spin_unlock_irqrestore(&bnad->bna_lock, flags);
-
-       return stats;
 }
 
 static void
@@ -3427,7 +3425,7 @@ static const struct net_device_ops bnad_netdev_ops = {
        .ndo_open               = bnad_open,
        .ndo_stop               = bnad_stop,
        .ndo_start_xmit         = bnad_start_xmit,
-       .ndo_get_stats64                = bnad_get_stats64,
+       .ndo_get_stats64        = bnad_get_stats64,
        .ndo_set_rx_mode        = bnad_set_rx_mode,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = bnad_set_mac_address,
index baba2db9d9c25988da94cb323e5d1a6832a12b51..016d481c6476eddf9fa1586ff8b32df94bae1cc6 100644 (file)
@@ -1146,7 +1146,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
 
        work_done = bp->macbgem_ops.mog_rx(bp, budget);
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                /* Packets received while interrupts were disabled */
                status = macb_readl(bp, RSR);
@@ -2146,6 +2146,9 @@ static int macb_open(struct net_device *dev)
 
        netif_tx_start_all_queues(dev);
 
+       if (bp->ptp_info)
+               bp->ptp_info->ptp_init(dev);
+
        return 0;
 }
 
@@ -2167,6 +2170,9 @@ static int macb_close(struct net_device *dev)
 
        macb_free_consistent(bp);
 
+       if (bp->ptp_info)
+               bp->ptp_info->ptp_remove(dev);
+
        return 0;
 }
 
@@ -2440,6 +2446,17 @@ static int macb_set_ringparam(struct net_device *netdev,
        return 0;
 }
 
+static int macb_get_ts_info(struct net_device *netdev,
+                           struct ethtool_ts_info *info)
+{
+       struct macb *bp = netdev_priv(netdev);
+
+       if (bp->ptp_info)
+               return bp->ptp_info->get_ts_info(netdev, info);
+
+       return ethtool_op_get_ts_info(netdev, info);
+}
+
 static const struct ethtool_ops macb_ethtool_ops = {
        .get_regs_len           = macb_get_regs_len,
        .get_regs               = macb_get_regs,
@@ -2457,7 +2474,7 @@ static const struct ethtool_ops gem_ethtool_ops = {
        .get_regs_len           = macb_get_regs_len,
        .get_regs               = macb_get_regs,
        .get_link               = ethtool_op_get_link,
-       .get_ts_info            = ethtool_op_get_ts_info,
+       .get_ts_info            = macb_get_ts_info,
        .get_ethtool_stats      = gem_get_ethtool_stats,
        .get_strings            = gem_get_ethtool_strings,
        .get_sset_count         = gem_get_sset_count,
@@ -2470,6 +2487,7 @@ static const struct ethtool_ops gem_ethtool_ops = {
 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
        struct phy_device *phydev = dev->phydev;
+       struct macb *bp = netdev_priv(dev);
 
        if (!netif_running(dev))
                return -EINVAL;
@@ -2477,7 +2495,17 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        if (!phydev)
                return -ENODEV;
 
-       return phy_mii_ioctl(phydev, rq, cmd);
+       if (!bp->ptp_info)
+               return phy_mii_ioctl(phydev, rq, cmd);
+
+       switch (cmd) {
+       case SIOCSHWTSTAMP:
+               return bp->ptp_info->set_hwtst(dev, rq, cmd);
+       case SIOCGHWTSTAMP:
+               return bp->ptp_info->get_hwtst(dev, rq);
+       default:
+               return phy_mii_ioctl(phydev, rq, cmd);
+       }
 }
 
 static int macb_set_features(struct net_device *netdev,
index fc8550a5d47f75df540521c27f07f5c2d03995f9..234a49eaccfd2dd2f80f5e402e262af957285cec 100644 (file)
@@ -10,6 +10,8 @@
 #ifndef _MACB_H
 #define _MACB_H
 
+#include <linux/phy.h>
+
 #define MACB_GREGS_NBR 16
 #define MACB_GREGS_VERSION 2
 #define MACB_MAX_QUEUES 8
 #define GEM_RXIPCCNT           0x01a8 /* IP header Checksum Error Counter */
 #define GEM_RXTCPCCNT          0x01ac /* TCP Checksum Error Counter */
 #define GEM_RXUDPCCNT          0x01b0 /* UDP Checksum Error Counter */
+#define GEM_TISUBN             0x01bc /* 1588 Timer Increment Sub-ns */
+#define GEM_TSH                        0x01c0 /* 1588 Timer Seconds High */
+#define GEM_TSL                        0x01d0 /* 1588 Timer Seconds Low */
+#define GEM_TN                 0x01d4 /* 1588 Timer Nanoseconds */
+#define GEM_TA                 0x01d8 /* 1588 Timer Adjust */
+#define GEM_TI                 0x01dc /* 1588 Timer Increment */
+#define GEM_EFTSL              0x01e0 /* PTP Event Frame Tx Seconds Low */
+#define GEM_EFTN               0x01e4 /* PTP Event Frame Tx Nanoseconds */
+#define GEM_EFRSL              0x01e8 /* PTP Event Frame Rx Seconds Low */
+#define GEM_EFRN               0x01ec /* PTP Event Frame Rx Nanoseconds */
+#define GEM_PEFTSL             0x01f0 /* PTP Peer Event Frame Tx Secs Low */
+#define GEM_PEFTN              0x01f4 /* PTP Peer Event Frame Tx Ns */
+#define GEM_PEFRSL             0x01f8 /* PTP Peer Event Frame Rx Sec Low */
+#define GEM_PEFRN              0x01fc /* PTP Peer Event Frame Rx Ns */
 #define GEM_DCFG1              0x0280 /* Design Config 1 */
 #define GEM_DCFG2              0x0284 /* Design Config 2 */
 #define GEM_DCFG3              0x0288 /* Design Config 3 */
 #define MACB_NCR_TPF_SIZE      1
 #define MACB_TZQ_OFFSET                12 /* Transmit zero quantum pause frame */
 #define MACB_TZQ_SIZE          1
+#define MACB_SRTSM_OFFSET      15
 
 /* Bitfields in NCFGR */
 #define MACB_SPD_OFFSET                0 /* Speed */
 #define MACB_PTZ_SIZE          1
 #define MACB_WOL_OFFSET                14 /* Enable wake-on-lan interrupt */
 #define MACB_WOL_SIZE          1
+#define MACB_DRQFR_OFFSET      18 /* PTP Delay Request Frame Received */
+#define MACB_DRQFR_SIZE                1
+#define MACB_SFR_OFFSET                19 /* PTP Sync Frame Received */
+#define MACB_SFR_SIZE          1
+#define MACB_DRQFT_OFFSET      20 /* PTP Delay Request Frame Transmitted */
+#define MACB_DRQFT_SIZE                1
+#define MACB_SFT_OFFSET                21 /* PTP Sync Frame Transmitted */
+#define MACB_SFT_SIZE          1
+#define MACB_PDRQFR_OFFSET     22 /* PDelay Request Frame Received */
+#define MACB_PDRQFR_SIZE       1
+#define MACB_PDRSFR_OFFSET     23 /* PDelay Response Frame Received */
+#define MACB_PDRSFR_SIZE       1
+#define MACB_PDRQFT_OFFSET     24 /* PDelay Request Frame Transmitted */
+#define MACB_PDRQFT_SIZE       1
+#define MACB_PDRSFT_OFFSET     25 /* PDelay Response Frame Transmitted */
+#define MACB_PDRSFT_SIZE       1
+#define MACB_SRI_OFFSET                26 /* TSU Seconds Register Increment */
+#define MACB_SRI_SIZE          1
+
+/* Timer increment fields */
+#define MACB_TI_CNS_OFFSET     0
+#define MACB_TI_CNS_SIZE       8
+#define MACB_TI_ACNS_OFFSET    8
+#define MACB_TI_ACNS_SIZE      8
+#define MACB_TI_NIT_OFFSET     16
+#define MACB_TI_NIT_SIZE       8
 
 /* Bitfields in MAN */
 #define MACB_DATA_OFFSET       0 /* data */
 #define GEM_DAW64_OFFSET                       23
 #define GEM_DAW64_SIZE                         1
 
+/* Bitfields in TISUBN */
+#define GEM_SUBNSINCR_OFFSET                   0
+#define GEM_SUBNSINCR_SIZE                     16
+
+/* Bitfields in TI */
+#define GEM_NSINCR_OFFSET                      0
+#define GEM_NSINCR_SIZE                                8
+
+/* Bitfields in ADJ */
+#define GEM_ADDSUB_OFFSET                      31
+#define GEM_ADDSUB_SIZE                                1
 /* Constants for CLK */
 #define MACB_CLK_DIV8                          0
 #define MACB_CLK_DIV16                         1
 #define MACB_CAPS_NO_GIGABIT_HALF              0x00000008
 #define MACB_CAPS_USRIO_DISABLED               0x00000010
 #define MACB_CAPS_JUMBO                                0x00000020
+#define MACB_CAPS_GEM_HAS_PTP                  0x00000040
 #define MACB_CAPS_FIFO_MODE                    0x10000000
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE       0x20000000
 #define MACB_CAPS_SG_DISABLED                  0x40000000
@@ -792,6 +847,20 @@ struct macb_or_gem_ops {
        int     (*mog_rx)(struct macb *bp, int budget);
 };
 
+/* MACB-PTP interface: adapt to platform needs. */
+struct macb_ptp_info {
+       void (*ptp_init)(struct net_device *ndev);
+       void (*ptp_remove)(struct net_device *ndev);
+       s32 (*get_ptp_max_adj)(void);
+       unsigned int (*get_tsu_rate)(struct macb *bp);
+       int (*get_ts_info)(struct net_device *dev,
+                          struct ethtool_ts_info *info);
+       int (*get_hwtst)(struct net_device *netdev,
+                        struct ifreq *ifr);
+       int (*set_hwtst)(struct net_device *netdev,
+                        struct ifreq *ifr, int cmd);
+};
+
 struct macb_config {
        u32                     caps;
        unsigned int            dma_burst_length;
@@ -885,6 +954,7 @@ struct macb {
 
        u32                     wol;
 
+       struct macb_ptp_info    *ptp_info;      /* macb-ptp interface */
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
        enum macb_hw_dma_cap hw_dma_cap;
 #endif
@@ -895,4 +965,9 @@ static inline bool macb_is_gem(struct macb *bp)
        return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
 }
 
+static inline bool gem_has_ptp(struct macb *bp)
+{
+       return !!(bp->caps & MACB_CAPS_GEM_HAS_PTP);
+}
+
 #endif /* _MACB_H */
index ce7de6f725129b53728571fc69edb0830f948726..2bd7c638b178d5801bf2f3e4525e02734454ff6d 100644 (file)
@@ -1247,7 +1247,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
        work_done = xgmac_rx(priv, budget);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
        }
        return work_done;
@@ -1446,9 +1446,9 @@ static void xgmac_poll_controller(struct net_device *dev)
 }
 #endif
 
-static struct rtnl_link_stats64 *
+static void
 xgmac_get_stats64(struct net_device *dev,
-                      struct rtnl_link_stats64 *storage)
+                 struct rtnl_link_stats64 *storage)
 {
        struct xgmac_priv *priv = netdev_priv(dev);
        void __iomem *base = priv->base;
@@ -1476,7 +1476,6 @@ xgmac_get_stats64(struct net_device *dev,
 
        writel(0, base + XGMAC_MMC_CTRL);
        spin_unlock_bh(&priv->stats_lock);
-       return storage;
 }
 
 static int xgmac_set_mac_address(struct net_device *dev, void *p)
index b00c3002360e6725f2b8ffef51535ce957f79bbc..50384cede8be9b84431690074022bbff4bbc9199 100644 (file)
@@ -296,12 +296,16 @@ lio_ethtool_get_channels(struct net_device *dev,
                rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
                tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
        } else if (OCTEON_CN23XX_PF(oct)) {
-               struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
 
-               max_rx = CFG_GET_OQ_MAX_Q(conf23);
-               max_tx = CFG_GET_IQ_MAX_Q(conf23);
-               rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf23, lio->ifidx);
-               tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf23, lio->ifidx);
+               max_rx = oct->sriov_info.num_pf_rings;
+               max_tx = oct->sriov_info.num_pf_rings;
+               rx_count = lio->linfo.num_rxpciq;
+               tx_count = lio->linfo.num_txpciq;
+       } else if (OCTEON_CN23XX_VF(oct)) {
+               max_tx = oct->sriov_info.rings_per_vf;
+               max_rx = oct->sriov_info.rings_per_vf;
+               rx_count = lio->linfo.num_rxpciq;
+               tx_count = lio->linfo.num_txpciq;
        }
 
        channel->max_rx = max_rx;
index 39a9665c9d004581121f727bdb2117b9052819b7..be9c0e3f5ade7d4e61694da214702f0223ab5d59 100644 (file)
@@ -15,6 +15,7 @@
  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  * NONINFRINGEMENT.  See the GNU General Public License for more details.
  ***********************************************************************/
+#include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/firmware.h>
 #include <net/vxlan.h>
@@ -2223,25 +2224,6 @@ static void if_cfg_callback(struct octeon_device *oct,
        wake_up_interruptible(&ctx->wc);
 }
 
-/**
- * \brief Select queue based on hash
- * @param dev Net device
- * @param skb sk_buff structure
- * @returns selected queue number
- */
-static u16 select_q(struct net_device *dev, struct sk_buff *skb,
-                   void *accel_priv __attribute__((unused)),
-                   select_queue_fallback_t fallback __attribute__((unused)))
-{
-       u32 qindex = 0;
-       struct lio *lio;
-
-       lio = GET_LIO(dev);
-       qindex = skb_tx_hash(dev, skb);
-
-       return (u16)(qindex % (lio->linfo.num_txpciq));
-}
-
 /** Routine to push packets arriving on Octeon interface upto network layer.
  * @param oct_id   - octeon device id.
  * @param skbuff   - skbuff struct to be passed to network layer.
@@ -2263,6 +2245,7 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
        struct skb_shared_hwtstamps *shhwtstamps;
        u64 ns;
        u16 vtag = 0;
+       u32 r_dh_off;
        struct net_device *netdev = (struct net_device *)arg;
        struct octeon_droq *droq = container_of(param, struct octeon_droq,
                                                napi);
@@ -2308,6 +2291,8 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
                        put_page(pg_info->page);
                }
 
+               r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
+
                if (((oct->chip_id == OCTEON_CN66XX) ||
                     (oct->chip_id == OCTEON_CN68XX)) &&
                    ptp_enable) {
@@ -2320,16 +2305,27 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
                                        /* Nanoseconds are in the first 64-bits
                                         * of the packet.
                                         */
-                                       memcpy(&ns, (skb->data), sizeof(ns));
+                                       memcpy(&ns, (skb->data + r_dh_off),
+                                              sizeof(ns));
+                                       r_dh_off -= BYTES_PER_DHLEN_UNIT;
                                        shhwtstamps = skb_hwtstamps(skb);
                                        shhwtstamps->hwtstamp =
                                                ns_to_ktime(ns +
                                                            lio->ptp_adjust);
                                }
-                               skb_pull(skb, sizeof(ns));
                        }
                }
 
+               if (rh->r_dh.has_hash) {
+                       __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
+                       u32 hash = be32_to_cpu(*hash_be);
+
+                       skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
+                       r_dh_off -= BYTES_PER_DHLEN_UNIT;
+               }
+
+               skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
+
                skb->protocol = eth_type_trans(skb, skb->dev);
                if ((netdev->features & NETIF_F_RXCSUM) &&
                    (((rh->r_dh.encap_on) &&
@@ -2365,7 +2361,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
                if (packet_was_received) {
                        droq->stats.rx_bytes_received += len;
                        droq->stats.rx_pkts_received++;
-                       netdev->last_rx = jiffies;
                } else {
                        droq->stats.rx_dropped++;
                        netif_info(lio, rx_err, lio->netdev,
@@ -2441,7 +2436,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
        iq = oct->instr_queue[iq_no];
        if (iq) {
                /* Process iq buffers with in the budget limits */
-               tx_done = octeon_flush_iq(oct, iq, 1, budget);
+               tx_done = octeon_flush_iq(oct, iq, budget);
                /* Update iq read-index rather than waiting for next interrupt.
                 * Return back if tx_done is false.
                 */
@@ -2451,8 +2446,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
                        __func__, iq_no);
        }
 
-       if ((work_done < budget) && (tx_done)) {
-               napi_complete(napi);
+       /* force enable interrupt if reg cnts are high to avoid wraparound */
+       if ((work_done < budget && tx_done) ||
+           (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
+           (droq->pkt_count >= MAX_REG_CNT)) {
+               tx_done = 1;
+               napi_complete_done(napi, work_done);
                octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
                                             POLL_EVENT_ENABLE_INTR, 0);
                return 0;
@@ -2629,7 +2628,9 @@ static int liquidio_open(struct net_device *netdev)
                        oct->droq[0]->ops.poll_mode = 1;
        }
 
-       oct_ptp_open(netdev);
+       if ((oct->chip_id == OCTEON_CN66XX || oct->chip_id == OCTEON_CN68XX) &&
+           ptp_enable)
+               oct_ptp_open(netdev);
 
        ifstate_set(lio, LIO_IFSTATE_RUNNING);
 
@@ -2677,13 +2678,7 @@ static int liquidio_stop(struct net_device *netdev)
        lio->linfo.link.s.link_up = 0;
        lio->link_changes++;
 
-       /* Pause for a moment and wait for Octeon to flush out (to the wire) any
-        * egress packets that are in-flight.
-        */
-       set_current_state(TASK_INTERRUPTIBLE);
-       schedule_timeout(msecs_to_jiffies(100));
-
-       /* Now it should be safe to tell Octeon that nic interface is down. */
+       /* Tell Octeon that nic interface is down. */
        send_rx_ctrl_cmd(lio, 0);
 
        if (OCTEON_CN23XX_PF(oct)) {
@@ -2973,9 +2968,13 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
  */
 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 {
+       struct lio *lio = GET_LIO(netdev);
+
        switch (cmd) {
        case SIOCSHWTSTAMP:
-               return hwtstamp_ioctl(netdev, ifr);
+               if ((lio->oct_dev->chip_id == OCTEON_CN66XX ||
+                    lio->oct_dev->chip_id == OCTEON_CN68XX) && ptp_enable)
+                       return hwtstamp_ioctl(netdev, ifr);
        default:
                return -EOPNOTSUPP;
        }
@@ -3322,11 +3321,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        netif_trans_update(netdev);
 
-       if (skb_shinfo(skb)->gso_size)
-               stats->tx_done += skb_shinfo(skb)->gso_segs;
+       if (tx_info->s.gso_segs)
+               stats->tx_done += tx_info->s.gso_segs;
        else
                stats->tx_done++;
-       stats->tx_tot_bytes += skb->len;
+       stats->tx_tot_bytes += ndata.datasize;
 
        return NETDEV_TX_OK;
 
@@ -3741,7 +3740,6 @@ static const struct net_device_ops lionetdevops = {
        .ndo_set_vf_vlan        = liquidio_set_vf_vlan,
        .ndo_get_vf_config      = liquidio_get_vf_config,
        .ndo_set_vf_link_state  = liquidio_set_vf_link_state,
-       .ndo_select_queue       = select_q
 };
 
 /** \brief Entry point for the liquidio module
index 70d96c10c673db101ef0eb87a46bb1598499cbcc..9d5e03502c76cbfe3c8372a5d3e73c67e07e3a03 100644 (file)
@@ -15,6 +15,7 @@
  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  * NONINFRINGEMENT.  See the GNU General Public License for more details.
  ***********************************************************************/
+#include <linux/module.h>
 #include <linux/pci.h>
 #include <net/vxlan.h>
 #include "liquidio_common.h"
@@ -1455,26 +1456,6 @@ static void if_cfg_callback(struct octeon_device *oct,
        wake_up_interruptible(&ctx->wc);
 }
 
-/**
- * \brief Select queue based on hash
- * @param dev Net device
- * @param skb sk_buff structure
- * @returns selected queue number
- */
-static u16 select_q(struct net_device *dev, struct sk_buff *skb,
-                   void *accel_priv __attribute__((unused)),
-                   select_queue_fallback_t fallback __attribute__((unused)))
-{
-       struct lio *lio;
-       u32 qindex;
-
-       lio = GET_LIO(dev);
-
-       qindex = skb_tx_hash(dev, skb);
-
-       return (u16)(qindex % (lio->linfo.num_txpciq));
-}
-
 /** Routine to push packets arriving on Octeon interface upto network layer.
  * @param oct_id   - octeon device id.
  * @param skbuff   - skbuff struct to be passed to network layer.
@@ -1497,6 +1478,7 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
        struct net_device *netdev = (struct net_device *)arg;
        struct sk_buff *skb = (struct sk_buff *)skbuff;
        u16 vtag = 0;
+       u32 r_dh_off;
 
        if (netdev) {
                struct lio *lio = GET_LIO(netdev);
@@ -1540,7 +1522,20 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
                        put_page(pg_info->page);
                }
 
-               skb_pull(skb, rh->r_dh.len * 8);
+               r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
+
+               if (rh->r_dh.has_hwtstamp)
+                       r_dh_off -= BYTES_PER_DHLEN_UNIT;
+
+               if (rh->r_dh.has_hash) {
+                       __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
+                       u32 hash = be32_to_cpu(*hash_be);
+
+                       skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
+                       r_dh_off -= BYTES_PER_DHLEN_UNIT;
+               }
+
+               skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
                skb->protocol = eth_type_trans(skb, skb->dev);
 
                if ((netdev->features & NETIF_F_RXCSUM) &&
@@ -1577,7 +1572,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
                if (packet_was_received) {
                        droq->stats.rx_bytes_received += len;
                        droq->stats.rx_pkts_received++;
-                       netdev->last_rx = jiffies;
                } else {
                        droq->stats.rx_dropped++;
                        netif_info(lio, rx_err, lio->netdev,
@@ -1627,7 +1621,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
        iq = oct->instr_queue[iq_no];
        if (iq) {
                /* Process iq buffers with in the budget limits */
-               tx_done = octeon_flush_iq(oct, iq, 1, budget);
+               tx_done = octeon_flush_iq(oct, iq, budget);
                /* Update iq read-index rather than waiting for next interrupt.
                 * Return back if tx_done is false.
                 */
@@ -1637,8 +1631,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
                        __func__, iq_no);
        }
 
-       if ((work_done < budget) && (tx_done)) {
-               napi_complete(napi);
+       /* force enable interrupt if reg cnts are high to avoid wraparound */
+       if ((work_done < budget && tx_done) ||
+           (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
+           (droq->pkt_count >= MAX_REG_CNT)) {
+               tx_done = 1;
+               napi_complete_done(napi, work_done);
                octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
                                             POLL_EVENT_ENABLE_INTR, 0);
                return 0;
@@ -2440,11 +2438,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        netif_trans_update(netdev);
 
-       if (skb_shinfo(skb)->gso_size)
-               stats->tx_done += skb_shinfo(skb)->gso_segs;
+       if (tx_info->s.gso_segs)
+               stats->tx_done += tx_info->s.gso_segs;
        else
                stats->tx_done++;
-       stats->tx_tot_bytes += skb->len;
+       stats->tx_tot_bytes += ndata.datasize;
 
        return NETDEV_TX_OK;
 
@@ -2703,7 +2701,6 @@ static const struct net_device_ops lionetdevops = {
        .ndo_set_features       = liquidio_set_features,
        .ndo_udp_tunnel_add     = liquidio_add_vxlan_port,
        .ndo_udp_tunnel_del     = liquidio_del_vxlan_port,
-       .ndo_select_queue       = select_q,
 };
 
 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
index ba329f6ca779d5c11925ba7b42dae9d51e015652..294c6f3c6b48254044c610c78625c9c3c86e9b1f 100644 (file)
@@ -98,6 +98,9 @@ enum octeon_tag_type {
 #define CVM_DRV_INVALID_APP         (CVM_DRV_APP_START + 0x2)
 #define CVM_DRV_APP_END             (CVM_DRV_INVALID_APP - 1)
 
+#define BYTES_PER_DHLEN_UNIT        8
+#define MAX_REG_CNT                 2000000U
+
 static inline u32 incr_index(u32 index, u32 count, u32 max)
 {
        if ((index + count) >= max)
index 1cb3514fc949dafbb1f4280b92416c85f6aac6f0..b3dc2e9651a8e205d7e6e451109f98e96065de2c 100644 (file)
@@ -429,15 +429,11 @@ struct octeon_config {
 
 /* The following config values are fixed and should not be modified. */
 
-/* Maximum address space to be mapped for Octeon's BAR1 index-based access. */
-#define  MAX_BAR1_MAP_INDEX                     2
+#define  BAR1_INDEX_DYNAMIC_MAP          2
+#define  BAR1_INDEX_STATIC_MAP          15
 #define  OCTEON_BAR1_ENTRY_SIZE         (4 * 1024 * 1024)
 
-/* BAR1 Index 0 to (MAX_BAR1_MAP_INDEX - 1) for normal mapped memory access.
- * Bar1 register at MAX_BAR1_MAP_INDEX used by driver for dynamic access.
- */
-#define  MAX_BAR1_IOREMAP_SIZE  ((MAX_BAR1_MAP_INDEX + 1) * \
-                                OCTEON_BAR1_ENTRY_SIZE)
+#define  MAX_BAR1_IOREMAP_SIZE  (16 * OCTEON_BAR1_ENTRY_SIZE)
 
 /* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking
  * NoResponse Lists are now maintained with each IQ. (Dec' 2007).
index 3265e0b7923ee4712190696e49abe011461c2386..53f38d05f7c2ecb5952e74a7b87ca18d7209b960 100644 (file)
@@ -18,6 +18,7 @@
 /**
  * @file octeon_console.c
  */
+#include <linux/moduleparam.h>
 #include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/crc32.h>
@@ -549,6 +550,16 @@ int octeon_init_consoles(struct octeon_device *oct)
                return ret;
        }
 
+       /* Dedicate one of Octeon's BAR1 index registers to create a static
+        * mapping to a region of Octeon DRAM that contains the PCI console
+        * named block.
+        */
+       oct->console_nb_info.bar1_index = BAR1_INDEX_STATIC_MAP;
+       oct->fn_list.bar1_idx_setup(oct, addr, oct->console_nb_info.bar1_index,
+                                   true);
+       oct->console_nb_info.dram_region_base = addr
+               & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL);
+
        /* num_consoles > 0, is an indication that the consoles
         * are accessible
         */
index a8df493a50127d6a23e899bd95b582f0157f13ae..9675ffbf25e6bd9bf34d346204f1b0fbcbcfa185 100644 (file)
@@ -1361,6 +1361,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
                spin_lock_bh(&droq->lock);
                writel(droq->pkt_count, droq->pkts_sent_reg);
                droq->pkt_count = 0;
+               /* this write needs to be flushed before we release the lock */
+               mmiowb();
                spin_unlock_bh(&droq->lock);
                oct = droq->oct_dev;
        }
@@ -1368,6 +1370,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
                spin_lock_bh(&iq->lock);
                writel(iq->pkt_in_done, iq->inst_cnt_reg);
                iq->pkt_in_done = 0;
+               /* this write needs to be flushed before we release the lock */
+               mmiowb();
                spin_unlock_bh(&iq->lock);
                oct = iq->oct_dev;
        }
index 18f6836250a6c04c7ba98f85ca884e679b3d341e..c301a3852482845ee65bf260c48dbd69853e9522 100644 (file)
@@ -477,6 +477,12 @@ struct octeon_device {
        /* Console caches */
        struct octeon_console console[MAX_OCTEON_MAPS];
 
+       /* Console named block info */
+       struct {
+               u64 dram_region_base;
+               int bar1_index;
+       } console_nb_info;
+
        /* Coprocessor clock rate. */
        u64 coproc_clock_rate;
 
index e04ca8f0b4a75a0af4950400fb0cba93d9ccddf8..4608a5af35a3204b54378dc03eef94c976370ac1 100644 (file)
@@ -369,5 +369,5 @@ int octeon_setup_iq(struct octeon_device *oct, int ifidx,
                    void *app_ctx);
 int
 octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
-               u32 pending_thresh, u32 napi_budget);
+               u32 napi_budget);
 #endif                         /* __OCTEON_IQ_H__ */
index 73696b427f068372a747d0689b3828799c2c36d2..201b9875f9bbef778d2687b7750424ba55320de8 100644 (file)
@@ -131,6 +131,7 @@ int octeon_mbox_write(struct octeon_device *oct,
 {
        struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no];
        u32 count, i, ret = OCTEON_MBOX_STATUS_SUCCESS;
+       long timeout = LIO_MBOX_WRITE_WAIT_TIME;
        unsigned long flags;
 
        spin_lock_irqsave(&mbox->lock, flags);
@@ -158,7 +159,7 @@ int octeon_mbox_write(struct octeon_device *oct,
        count = 0;
 
        while (readq(mbox->mbox_write_reg) != OCTEON_PFVFSIG) {
-               schedule_timeout_uninterruptible(LIO_MBOX_WRITE_WAIT_TIME);
+               schedule_timeout_uninterruptible(timeout);
                if (count++ == LIO_MBOX_WRITE_WAIT_CNT) {
                        ret = OCTEON_MBOX_STATUS_FAILED;
                        break;
@@ -171,7 +172,7 @@ int octeon_mbox_write(struct octeon_device *oct,
                        count = 0;
                        while (readq(mbox->mbox_write_reg) !=
                               OCTEON_PFVFACK) {
-                               schedule_timeout_uninterruptible(10);
+                               schedule_timeout_uninterruptible(timeout);
                                if (count++ == LIO_MBOX_WRITE_WAIT_CNT) {
                                        ret = OCTEON_MBOX_STATUS_FAILED;
                                        break;
index fe60a3e6247bf0b377ffea038d35d0e7d36bc2ce..c9376fe075bc0afaf74ffc6279c376c7271e8825 100644 (file)
@@ -31,8 +31,8 @@
 #define OCTEON_PFVFSIG                 0x1122334455667788
 #define OCTEON_PFVFERR                 0xDEADDEADDEADDEAD
 
-#define LIO_MBOX_WRITE_WAIT_CNT          1000
-#define LIO_MBOX_WRITE_WAIT_TIME           10
+#define LIO_MBOX_WRITE_WAIT_CNT         1000
+#define LIO_MBOX_WRITE_WAIT_TIME        msecs_to_jiffies(1)
 
 enum octeon_mbox_cmd_status {
        OCTEON_MBOX_STATUS_SUCCESS = 0,
index 13a18c9a7a5160d0a28e916ba3939b2a9fd0d952..5cd96e7d426ca5bbb98b1575ac0e74d153b854b1 100644 (file)
@@ -23,7 +23,7 @@
 #include "response_manager.h"
 #include "octeon_device.h"
 
-#define MEMOPS_IDX   MAX_BAR1_MAP_INDEX
+#define MEMOPS_IDX   BAR1_INDEX_DYNAMIC_MAP
 
 #ifdef __BIG_ENDIAN_BITFIELD
 static inline void
@@ -96,6 +96,25 @@ __octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr,
        u32 copy_len = 0, index_reg_val = 0;
        unsigned long flags;
        u8 __iomem *mapped_addr;
+       u64 static_mapping_base;
+
+       static_mapping_base = oct->console_nb_info.dram_region_base;
+
+       if (static_mapping_base &&
+           static_mapping_base == (addr & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL))) {
+               int bar1_index = oct->console_nb_info.bar1_index;
+
+               mapped_addr = oct->mmio[1].hw_addr
+                       + (bar1_index << ilog2(OCTEON_BAR1_ENTRY_SIZE))
+                       + (addr & (OCTEON_BAR1_ENTRY_SIZE - 1ULL));
+
+               if (op)
+                       octeon_pci_fastread(oct, mapped_addr, hostbuf, len);
+               else
+                       octeon_pci_fastwrite(oct, mapped_addr, hostbuf, len);
+
+               return;
+       }
 
        spin_lock_irqsave(&oct->mem_access_lock, flags);
 
index c3d6a822836222e48cae80d10aa2549cb3a4f074..0243be8dd56fc32736c13cef0487514efcfe6a68 100644 (file)
@@ -49,7 +49,7 @@ octeon_alloc_soft_command_resp(struct octeon_device    *oct,
        /* Add in the response related fields. Opcode and Param are already
         * there.
         */
-       if (OCTEON_CN23XX_PF(oct)) {
+       if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
                ih3      = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
                rdp     = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
                irh     = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
@@ -70,7 +70,7 @@ octeon_alloc_soft_command_resp(struct octeon_device    *oct,
 
        *sc->status_word = COMPLETION_WORD_INIT;
 
-       if (OCTEON_CN23XX_PF(oct))
+       if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))
                sc->cmd.cmd3.rptr =  sc->dmarptr;
        else
                sc->cmd.cmd2.rptr =  sc->dmarptr;
index 3ce66759e80adab406bf234926846a72828cf878..707bc15adec61351c1384b8454c85a87a2c4b437 100644 (file)
@@ -455,7 +455,7 @@ lio_process_iq_request_list(struct octeon_device *oct,
 /* Can only be called from process context */
 int
 octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
-               u32 pending_thresh, u32 napi_budget)
+               u32 napi_budget)
 {
        u32 inst_processed = 0;
        u32 tot_inst_processed = 0;
@@ -468,33 +468,32 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
 
        iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
 
-       if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
-               do {
-                       /* Process any outstanding IQ packets. */
-                       if (iq->flush_index == iq->octeon_read_index)
-                               break;
-
-                       if (napi_budget)
-                               inst_processed = lio_process_iq_request_list
-                                       (oct, iq,
-                                        napi_budget - tot_inst_processed);
-                       else
-                               inst_processed =
-                                       lio_process_iq_request_list(oct, iq, 0);
+       do {
+               /* Process any outstanding IQ packets. */
+               if (iq->flush_index == iq->octeon_read_index)
+                       break;
 
-                       if (inst_processed) {
-                               atomic_sub(inst_processed, &iq->instr_pending);
-                               iq->stats.instr_processed += inst_processed;
-                       }
+               if (napi_budget)
+                       inst_processed =
+                               lio_process_iq_request_list(oct, iq,
+                                                           napi_budget -
+                                                           tot_inst_processed);
+               else
+                       inst_processed =
+                               lio_process_iq_request_list(oct, iq, 0);
+
+               if (inst_processed) {
+                       atomic_sub(inst_processed, &iq->instr_pending);
+                       iq->stats.instr_processed += inst_processed;
+               }
 
-                       tot_inst_processed += inst_processed;
-                       inst_processed = 0;
+               tot_inst_processed += inst_processed;
+               inst_processed = 0;
 
-               } while (tot_inst_processed < napi_budget);
+       } while (tot_inst_processed < napi_budget);
 
-               if (napi_budget && (tot_inst_processed >= napi_budget))
-                       tx_done = 0;
-       }
+       if (napi_budget && (tot_inst_processed >= napi_budget))
+               tx_done = 0;
 
        iq->last_db_time = jiffies;
 
@@ -530,7 +529,7 @@ static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
        iq->last_db_time = jiffies;
 
        /* Flush the instruction queue */
-       octeon_flush_iq(oct, iq, 1, 0);
+       octeon_flush_iq(oct, iq, 0);
 
        lio_enable_irq(NULL, iq);
 }
index 21f80f5744ba2f048b4d7e0a8a8862e45977a339..a2138686c6055d3d18d9294a14d2468b638cd9c3 100644 (file)
@@ -501,7 +501,7 @@ static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
 
        if (work_done < budget) {
                /* We stopped because no more packets were available. */
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                octeon_mgmt_enable_rx_irq(p);
        }
        octeon_mgmt_update_rx_stats(netdev);
index 2e74bbaa38e1e41ceae25885586424cd74cfa07a..02a986cdbb39cf96ed8f28c6a9326d05af60a1f3 100644 (file)
@@ -471,12 +471,46 @@ static void nicvf_get_ringparam(struct net_device *netdev,
        struct nicvf *nic = netdev_priv(netdev);
        struct queue_set *qs = nic->qs;
 
-       ring->rx_max_pending = MAX_RCV_BUF_COUNT;
-       ring->rx_pending = qs->rbdr_len;
+       ring->rx_max_pending = MAX_CMP_QUEUE_LEN;
+       ring->rx_pending = qs->cq_len;
        ring->tx_max_pending = MAX_SND_QUEUE_LEN;
        ring->tx_pending = qs->sq_len;
 }
 
+static int nicvf_set_ringparam(struct net_device *netdev,
+                              struct ethtool_ringparam *ring)
+{
+       struct nicvf *nic = netdev_priv(netdev);
+       struct queue_set *qs = nic->qs;
+       u32 rx_count, tx_count;
+
+       /* Due to HW errata this is not supported on T88 pass 1.x silicon */
+       if (pass1_silicon(nic->pdev))
+               return -EINVAL;
+
+       if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+               return -EINVAL;
+
+       tx_count = clamp_t(u32, ring->tx_pending,
+                          MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN);
+       rx_count = clamp_t(u32, ring->rx_pending,
+                          MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN);
+
+       if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len))
+               return 0;
+
+       /* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */
+       qs->sq_len = rounddown_pow_of_two(tx_count);
+       qs->cq_len = rounddown_pow_of_two(rx_count);
+
+       if (netif_running(netdev)) {
+               nicvf_stop(netdev);
+               nicvf_open(netdev);
+       }
+
+       return 0;
+}
+
 static int nicvf_get_rss_hash_opts(struct nicvf *nic,
                                   struct ethtool_rxnfc *info)
 {
@@ -635,7 +669,7 @@ static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
 }
 
 static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
-                         const u8 *hkey, u8 hfunc)
+                         const u8 *hkey, const u8 hfunc)
 {
        struct nicvf *nic = netdev_priv(dev);
        struct nicvf_rss_info *rss = &nic->rss_info;
@@ -787,6 +821,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
        .get_regs               = nicvf_get_regs,
        .get_coalesce           = nicvf_get_coalesce,
        .get_ringparam          = nicvf_get_ringparam,
+       .set_ringparam          = nicvf_set_ringparam,
        .get_rxnfc              = nicvf_get_rxnfc,
        .set_rxnfc              = nicvf_set_rxnfc,
        .get_rxfh_key_size      = nicvf_get_rxfh_key_size,
index 2006f58b14b17ec3c5262b7244b3375539a8fae3..6feaa24bcfd42bb9647298a0b665e6bf3b11d496 100644 (file)
@@ -749,7 +749,7 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
 
        if (work_done < budget) {
                /* Slow packet rate, exit polling */
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                /* Re-enable interrupts */
                cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
                                               cq->cq_idx);
@@ -1274,7 +1274,8 @@ int nicvf_open(struct net_device *netdev)
        /* Configure receive side scaling and MTU */
        if (!nic->sqs_mode) {
                nicvf_rss_init(nic);
-               if (nicvf_update_hw_max_frs(nic, netdev->mtu))
+               err = nicvf_update_hw_max_frs(nic, netdev->mtu);
+               if (err)
                        goto cleanup;
 
                /* Clear percpu stats */
@@ -1461,8 +1462,8 @@ void nicvf_update_stats(struct nicvf *nic)
                nicvf_update_sq_stats(nic, qidx);
 }
 
-static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
-                                           struct rtnl_link_stats64 *stats)
+static void nicvf_get_stats64(struct net_device *netdev,
+                             struct rtnl_link_stats64 *stats)
 {
        struct nicvf *nic = netdev_priv(netdev);
        struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
@@ -1478,7 +1479,6 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
        stats->tx_packets = hw_stats->tx_frames;
        stats->tx_dropped = hw_stats->tx_drops;
 
-       return stats;
 }
 
 static void nicvf_tx_timeout(struct net_device *dev)
index d2ac133e36f177aa548a1a5e21964737e4b1e27e..ac0390be3b126e957071bde64daebdd29b536c34 100644 (file)
@@ -603,7 +603,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
        cq_cfg.ena = 1;
        cq_cfg.reset = 0;
        cq_cfg.caching = 0;
-       cq_cfg.qsize = CMP_QSIZE;
+       cq_cfg.qsize = ilog2(qs->cq_len >> 10);
        cq_cfg.avg_con = 0;
        nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
 
@@ -652,9 +652,12 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
        sq_cfg.ena = 1;
        sq_cfg.reset = 0;
        sq_cfg.ldwb = 0;
-       sq_cfg.qsize = SND_QSIZE;
+       sq_cfg.qsize = ilog2(qs->sq_len >> 10);
        sq_cfg.tstmp_bgx_intf = 0;
-       sq_cfg.cq_limit = 0;
+       /* CQ's level at which HW will stop processing SQEs to avoid
+        * transmitting a pkt with no space in CQ to post CQE_TX.
+        */
+       sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len;
        nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
 
        /* Set threshold value for interrupt generation */
@@ -816,11 +819,21 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
 {
        bool disable = false;
        struct queue_set *qs = nic->qs;
+       struct queue_set *pqs = nic->pnicvf->qs;
        int qidx;
 
        if (!qs)
                return 0;
 
+       /* Take primary VF's queue lengths.
+        * This is needed to take queue lengths set from ethtool
+        * into consideration.
+        */
+       if (nic->sqs_mode && pqs) {
+               qs->cq_len = pqs->cq_len;
+               qs->sq_len = pqs->sq_len;
+       }
+
        if (enable) {
                if (nicvf_alloc_resources(nic))
                        return -ENOMEM;
index 9e2104675bc9dc0cbae020f73476d04836c01870..5cb84da99a2de5bc594464db8759c8359d20447f 100644 (file)
@@ -59,8 +59,9 @@
 /* Default queue count per QS, its lengths and threshold values */
 #define DEFAULT_RBDR_CNT       1
 
-#define SND_QSIZE              SND_QUEUE_SIZE2
+#define SND_QSIZE              SND_QUEUE_SIZE0
 #define SND_QUEUE_LEN          (1ULL << (SND_QSIZE + 10))
+#define MIN_SND_QUEUE_LEN      (1ULL << (SND_QUEUE_SIZE0 + 10))
 #define MAX_SND_QUEUE_LEN      (1ULL << (SND_QUEUE_SIZE6 + 10))
 #define SND_QUEUE_THRESH       2ULL
 #define MIN_SQ_DESC_PER_PKT_XMIT       2
 /* Keep CQ and SQ sizes same, if timestamping
  * is enabled this equation will change.
  */
-#define CMP_QSIZE              CMP_QUEUE_SIZE2
+#define CMP_QSIZE              CMP_QUEUE_SIZE0
 #define CMP_QUEUE_LEN          (1ULL << (CMP_QSIZE + 10))
+#define MIN_CMP_QUEUE_LEN      (1ULL << (CMP_QUEUE_SIZE0 + 10))
+#define MAX_CMP_QUEUE_LEN      (1ULL << (CMP_QUEUE_SIZE6 + 10))
 #define CMP_QUEUE_CQE_THRESH   (NAPI_POLL_WEIGHT / 2)
 #define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */
 
+/* No of CQEs that might anyway gets used by HW due to pipelining
+ * effects irrespective of PASS/DROP/LEVELS being configured
+ */
+#define CMP_QUEUE_PIPELINE_RSVD 544
+
 #define RBDR_SIZE              RBDR_SIZE0
 #define RCV_BUF_COUNT          (1ULL << (RBDR_SIZE + 13))
 #define MAX_RCV_BUF_COUNT      (1ULL << (RBDR_SIZE6 + 13))
  * RED accepts pkt if unused CQE < 2304 & >= 2560
  * DROPs pkts if unused CQE < 2304
  */
-#define RQ_PASS_CQ_LVL         160ULL
-#define RQ_DROP_CQ_LVL         144ULL
+#define RQ_PASS_CQ_LVL         192ULL
+#define RQ_DROP_CQ_LVL         184ULL
 
 /* RED and Backpressure levels of RBDR for pkt reception
  * For RBDR, level is a measure of fullness i.e 0x0 means empty
index 1e4695270da6cc422c441542783a0ae24dd943a6..4c8e8cf730bbc2ee1d488d42d9d42163d442fb75 100644 (file)
@@ -978,17 +978,15 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
        struct device *dev = &bgx->pdev->dev;
        struct lmac *lmac;
        char str[20];
-       u8 dlm;
 
-       if (lmacid > bgx->max_lmac)
+       if (!bgx->is_dlm && lmacid)
                return;
 
        lmac = &bgx->lmac[lmacid];
-       dlm = (lmacid / 2) + (bgx->bgx_id * 2);
        if (!bgx->is_dlm)
                sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
        else
-               sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm);
+               sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid);
 
        switch (lmac->lmac_type) {
        case BGX_MODE_SGMII:
@@ -1074,7 +1072,6 @@ static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
 static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
 {
        struct lmac *lmac;
-       struct lmac *olmac;
        u64 cmr_cfg;
        u8 lmac_type;
        u8 lane_to_sds;
@@ -1094,62 +1091,26 @@ static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
                return;
        }
 
-       /* On 81xx BGX can be split across 2 DLMs
-        * firmware programs lmac_type of LMAC0 and LMAC2
+       /* For DLMs or SLMs on 80/81/83xx so many lane configurations
+        * are possible and vary across boards. Also Kernel doesn't have
+        * any way to identify board type/info and since firmware does,
+        * just take lmac type and serdes lane config as is.
         */
-       if ((idx == 0) || (idx == 2)) {
-               cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
-               lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
-               lane_to_sds = (u8)(cmr_cfg & 0xFF);
-               /* Check if config is not reset value */
-               if ((lmac_type == 0) && (lane_to_sds == 0xE4))
-                       lmac->lmac_type = BGX_MODE_INVALID;
-               else
-                       lmac->lmac_type = lmac_type;
-               lmac_set_training(bgx, lmac, lmac->lmacid);
-               lmac_set_lane2sds(bgx, lmac);
-
-               olmac = &bgx->lmac[idx + 1];
-               /*  Check if other LMAC on the same DLM is already configured by
-                *  firmware, if so use the same config or else set as same, as
-                *  that of LMAC 0/2.
-                *  This check is needed as on 80xx only one lane of each of the
-                *  DLM of BGX0 is used, so have to rely on firmware for
-                *  distingushing 80xx from 81xx.
-                */
-               cmr_cfg = bgx_reg_read(bgx, idx + 1, BGX_CMRX_CFG);
-               lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
-               lane_to_sds = (u8)(cmr_cfg & 0xFF);
-               if ((lmac_type == 0) && (lane_to_sds == 0xE4)) {
-                       olmac->lmac_type = lmac->lmac_type;
-                       lmac_set_lane2sds(bgx, olmac);
-               } else {
-                       olmac->lmac_type = lmac_type;
-                       olmac->lane_to_sds = lane_to_sds;
-               }
-               lmac_set_training(bgx, olmac, olmac->lmacid);
-       }
-}
-
-static bool is_dlm0_in_bgx_mode(struct bgx *bgx)
-{
-       struct lmac *lmac;
-
-       if (!bgx->is_dlm)
-               return true;
-
-       lmac = &bgx->lmac[0];
-       if (lmac->lmac_type == BGX_MODE_INVALID)
-               return false;
-
-       return true;
+       cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
+       lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
+       lane_to_sds = (u8)(cmr_cfg & 0xFF);
+       /* Check if config is reset value */
+       if ((lmac_type == 0) && (lane_to_sds == 0xE4))
+               lmac->lmac_type = BGX_MODE_INVALID;
+       else
+               lmac->lmac_type = lmac_type;
+       lmac->lane_to_sds = lane_to_sds;
+       lmac_set_training(bgx, lmac, lmac->lmacid);
 }
 
 static void bgx_get_qlm_mode(struct bgx *bgx)
 {
        struct lmac *lmac;
-       struct lmac *lmac01;
-       struct lmac *lmac23;
        u8  idx;
 
        /* Init all LMAC's type to invalid */
@@ -1165,29 +1126,9 @@ static void bgx_get_qlm_mode(struct bgx *bgx)
        if (bgx->lmac_count > bgx->max_lmac)
                bgx->lmac_count = bgx->max_lmac;
 
-       for (idx = 0; idx < bgx->max_lmac; idx++)
-               bgx_set_lmac_config(bgx, idx);
-
-       if (!bgx->is_dlm || bgx->is_rgx) {
-               bgx_print_qlm_mode(bgx, 0);
-               return;
-       }
-
-       if (bgx->lmac_count) {
-               bgx_print_qlm_mode(bgx, 0);
-               bgx_print_qlm_mode(bgx, 2);
-       }
-
-       /* If DLM0 is not in BGX mode then LMAC0/1 have
-        * to be configured with serdes lanes of DLM1
-        */
-       if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2))
-               return;
        for (idx = 0; idx < bgx->lmac_count; idx++) {
-               lmac01 = &bgx->lmac[idx];
-               lmac23 = &bgx->lmac[idx + 2];
-               lmac01->lmac_type = lmac23->lmac_type;
-               lmac01->lane_to_sds = lmac23->lane_to_sds;
+               bgx_set_lmac_config(bgx, idx);
+               bgx_print_qlm_mode(bgx, idx);
        }
 }
 
index 86f467a2c4859608cc0337e57534d4a2d5e54582..d56142b985349380068a1c313c71b62110598c84 100644 (file)
@@ -1605,7 +1605,7 @@ int t1_poll(struct napi_struct *napi, int budget)
        int work_done = process_responses(adapter, budget);
 
        if (likely(work_done < budget)) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                writel(adapter->sge->respQ.cidx,
                       adapter->regs + A_SG_SLEEPING);
        }
index 5f226eda8cd68751aa7360534f74f59604f88374..52063587e1e9a55b5afc2d261d4e713ad9d0b381 100644 (file)
@@ -351,7 +351,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
                e->smt_idx = smt_idx;
                atomic_set(&e->refcnt, 1);
                neigh_replace(e, neigh);
-               if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
+               if (is_vlan_dev(neigh->dev))
                        e->vlan = vlan_dev_vlan_id(neigh->dev);
                else
                        e->vlan = VLAN_NONE;
index e4b5b057f41786733ea9883de572702aeec78e68..1b9d154f114923841702ee67af2e0f17d94e39e9 100644 (file)
@@ -1843,7 +1843,7 @@ static int ofld_poll(struct napi_struct *napi, int budget)
                __skb_queue_head_init(&queue);
                skb_queue_splice_init(&q->rx_queue, &queue);
                if (skb_queue_empty(&queue)) {
-                       napi_complete(napi);
+                       napi_complete_done(napi, work_done);
                        spin_unlock_irq(&q->lock);
                        return work_done;
                }
@@ -2414,7 +2414,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
        int work_done = process_responses(adap, qs, budget);
 
        if (likely(work_done < budget)) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                /*
                 * Because we don't atomically flush the following
index 0bce1bf9ca0fc587f84b8f2f5f8bd28227b22362..163543b1ea0bf1bec215f0613a6bce2a729fa337 100644 (file)
@@ -263,6 +263,11 @@ struct tp_params {
        u32 vlan_pri_map;               /* cached TP_VLAN_PRI_MAP */
        u32 ingress_config;             /* cached TP_INGRESS_CONFIG */
 
+       /* cached TP_OUT_CONFIG compressed error vector
+        * and passing outer header info for encapsulated packets.
+        */
+       int rx_pkt_encap;
+
        /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets.  This is a
         * subset of the set of fields which may be present in the Compressed
         * Filter Tuple portion of filters and TCP TCB connections.  The
@@ -581,22 +586,6 @@ struct sge_rspq {                   /* state for an SGE response queue */
        rspq_handler_t handler;
        rspq_flush_handler_t flush_handler;
        struct t4_lro_mgr lro_mgr;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#define CXGB_POLL_STATE_IDLE           0
-#define CXGB_POLL_STATE_NAPI           BIT(0) /* NAPI owns this poll */
-#define CXGB_POLL_STATE_POLL           BIT(1) /* poll owns this poll */
-#define CXGB_POLL_STATE_NAPI_YIELD     BIT(2) /* NAPI yielded this poll */
-#define CXGB_POLL_STATE_POLL_YIELD     BIT(3) /* poll yielded this poll */
-#define CXGB_POLL_YIELD                        (CXGB_POLL_STATE_NAPI_YIELD |   \
-                                        CXGB_POLL_STATE_POLL_YIELD)
-#define CXGB_POLL_LOCKED               (CXGB_POLL_STATE_NAPI |         \
-                                        CXGB_POLL_STATE_POLL)
-#define CXGB_POLL_USER_PEND            (CXGB_POLL_STATE_POLL |         \
-                                        CXGB_POLL_STATE_POLL_YIELD)
-       unsigned int bpoll_state;
-       spinlock_t bpoll_lock;          /* lock for busy poll */
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 };
 
 struct sge_eth_stats {              /* Ethernet queue statistics */
@@ -782,6 +771,10 @@ struct vf_info {
        bool pf_set_mac;
 };
 
+struct mbox_list {
+       struct list_head list;
+};
+
 struct adapter {
        void __iomem *regs;
        void __iomem *bar2;
@@ -844,6 +837,10 @@ struct adapter {
        struct work_struct db_drop_task;
        bool tid_release_task_busy;
 
+       /* lock for mailbox cmd list */
+       spinlock_t mbox_lock;
+       struct mbox_list mlist;
+
        /* support for mailbox command/reply logging */
 #define T4_OS_LOG_MBOX_CMDS 256
        struct mbox_cmd_log *mbox_log;
@@ -1160,102 +1157,6 @@ static inline struct adapter *netdev2adap(const struct net_device *dev)
        return netdev2pinfo(dev)->adapter;
 }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
-{
-       spin_lock_init(&q->bpoll_lock);
-       q->bpoll_state = CXGB_POLL_STATE_IDLE;
-}
-
-static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
-{
-       bool rc = true;
-
-       spin_lock(&q->bpoll_lock);
-       if (q->bpoll_state & CXGB_POLL_LOCKED) {
-               q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD;
-               rc = false;
-       } else {
-               q->bpoll_state = CXGB_POLL_STATE_NAPI;
-       }
-       spin_unlock(&q->bpoll_lock);
-       return rc;
-}
-
-static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
-{
-       bool rc = false;
-
-       spin_lock(&q->bpoll_lock);
-       if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
-               rc = true;
-       q->bpoll_state = CXGB_POLL_STATE_IDLE;
-       spin_unlock(&q->bpoll_lock);
-       return rc;
-}
-
-static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
-{
-       bool rc = true;
-
-       spin_lock_bh(&q->bpoll_lock);
-       if (q->bpoll_state & CXGB_POLL_LOCKED) {
-               q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD;
-               rc = false;
-       } else {
-               q->bpoll_state |= CXGB_POLL_STATE_POLL;
-       }
-       spin_unlock_bh(&q->bpoll_lock);
-       return rc;
-}
-
-static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
-{
-       bool rc = false;
-
-       spin_lock_bh(&q->bpoll_lock);
-       if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
-               rc = true;
-       q->bpoll_state = CXGB_POLL_STATE_IDLE;
-       spin_unlock_bh(&q->bpoll_lock);
-       return rc;
-}
-
-static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
-{
-       return q->bpoll_state & CXGB_POLL_USER_PEND;
-}
-#else
-static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
-{
-}
-
-static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
-{
-       return true;
-}
-
-static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
-{
-       return false;
-}
-
-static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
-{
-       return false;
-}
-
-static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
-{
-       return false;
-}
-
-static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
-{
-       return false;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 /* Return a version number to identify the type of adapter.  The scheme is:
  * - bits 0..9: chip version
  * - bits 10..15: chip revision
@@ -1312,7 +1213,6 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
 int t4_sge_init(struct adapter *adap);
 void t4_sge_start(struct adapter *adap);
 void t4_sge_stop(struct adapter *adap);
-int cxgb_busy_poll(struct napi_struct *napi);
 void cxgb4_set_ethtool_ops(struct net_device *netdev);
 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
 extern int dbfifo_int_thresh;
@@ -1488,6 +1388,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
               const u8 *fw_data, unsigned int fw_size,
               struct fw_hdr *card_fw, enum dev_state state, int *reset);
 int t4_prep_adapter(struct adapter *adapter);
+int t4_shutdown_adapter(struct adapter *adapter);
 
 enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
 int t4_bar2_sge_qregs(struct adapter *adapter,
index 6f951877430bd51db55c0075b176f79480d1e4b4..afb0967d2ce60cafab81701e057017b96a700bbc 100644 (file)
@@ -188,18 +188,24 @@ static void link_report(struct net_device *dev)
                const struct port_info *p = netdev_priv(dev);
 
                switch (p->link_cfg.speed) {
-               case 10000:
-                       s = "10Gbps";
+               case 100:
+                       s = "100Mbps";
                        break;
                case 1000:
-                       s = "1000Mbps";
+                       s = "1Gbps";
                        break;
-               case 100:
-                       s = "100Mbps";
+               case 10000:
+                       s = "10Gbps";
+                       break;
+               case 25000:
+                       s = "25Gbps";
                        break;
                case 40000:
                        s = "40Gbps";
                        break;
+               case 100000:
+                       s = "100Gbps";
+                       break;
                default:
                        pr_info("%s: unsupported speed: %d\n",
                                dev->name, p->link_cfg.speed);
@@ -738,14 +744,8 @@ static void quiesce_rx(struct adapter *adap)
        for (i = 0; i < adap->sge.ingr_sz; i++) {
                struct sge_rspq *q = adap->sge.ingr_map[i];
 
-               if (q && q->handler) {
+               if (q && q->handler)
                        napi_disable(&q->napi);
-                       local_bh_disable();
-                       while (!cxgb_poll_lock_napi(q))
-                               mdelay(1);
-                       local_bh_enable();
-               }
-
        }
 }
 
@@ -776,10 +776,9 @@ static void enable_rx(struct adapter *adap)
 
                if (!q)
                        continue;
-               if (q->handler) {
-                       cxgb_busy_poll_init_lock(q);
+               if (q->handler)
                        napi_enable(&q->napi);
-               }
+
                /* 0-increment GTS to start the timer and enable interrupts */
                t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
                             SEINTARM_V(q->intr_params) |
@@ -1806,7 +1805,7 @@ static void check_neigh_update(struct neighbour *neigh)
        const struct device *parent;
        const struct net_device *netdev = neigh->dev;
 
-       if (netdev->priv_flags & IFF_802_1Q_VLAN)
+       if (is_vlan_dev(netdev))
                netdev = vlan_dev_real_dev(netdev);
        parent = netdev->dev.parent;
        if (parent && parent->driver == &cxgb4_driver.driver)
@@ -2112,7 +2111,7 @@ static int cxgb4_inet6addr_handler(struct notifier_block *this,
 #if IS_ENABLED(CONFIG_BONDING)
        struct adapter *adap;
 #endif
-       if (event_dev->priv_flags & IFF_802_1Q_VLAN)
+       if (is_vlan_dev(event_dev))
                event_dev = vlan_dev_real_dev(event_dev);
 #if IS_ENABLED(CONFIG_BONDING)
        if (event_dev->flags & IFF_MASTER) {
@@ -2369,8 +2368,8 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
 }
 EXPORT_SYMBOL(cxgb4_remove_server_filter);
 
-static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
-                                               struct rtnl_link_stats64 *ns)
+static void cxgb_get_stats(struct net_device *dev,
+                          struct rtnl_link_stats64 *ns)
 {
        struct port_stats stats;
        struct port_info *p = netdev_priv(dev);
@@ -2383,7 +2382,7 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
        spin_lock(&adapter->stats_lock);
        if (!netif_device_present(dev)) {
                spin_unlock(&adapter->stats_lock);
-               return ns;
+               return;
        }
        t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
                                 &p->stats_base);
@@ -2401,7 +2400,7 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
        ns->rx_over_errors   = 0;
        ns->rx_crc_errors    = stats.rx_fcs_err;
        ns->rx_frame_errors  = stats.rx_symbol_err;
-       ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
+       ns->rx_dropped       = stats.rx_ovflow0 + stats.rx_ovflow1 +
                               stats.rx_ovflow2 + stats.rx_ovflow3 +
                               stats.rx_trunc0 + stats.rx_trunc1 +
                               stats.rx_trunc2 + stats.rx_trunc3;
@@ -2417,7 +2416,6 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
        ns->tx_errors = stats.tx_error_frames;
        ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
                ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
-       return ns;
 }
 
 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@ -2578,6 +2576,19 @@ static int cxgb_get_vf_config(struct net_device *dev,
        ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr);
        return 0;
 }
+
+static int cxgb_get_phys_port_id(struct net_device *dev,
+                                struct netdev_phys_item_id *ppid)
+{
+       struct port_info *pi = netdev_priv(dev);
+       unsigned int phy_port_id;
+
+       phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
+       ppid->id_len = sizeof(phy_port_id);
+       memcpy(ppid->id, &phy_port_id, ppid->id_len);
+       return 0;
+}
+
 #endif
 
 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
@@ -2745,9 +2756,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
        .ndo_fcoe_enable      = cxgb_fcoe_enable,
        .ndo_fcoe_disable     = cxgb_fcoe_disable,
 #endif /* CONFIG_CHELSIO_T4_FCOE */
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       .ndo_busy_poll        = cxgb_busy_poll,
-#endif
        .ndo_set_tx_maxrate   = cxgb_set_tx_maxrate,
        .ndo_setup_tc         = cxgb_setup_tc,
 };
@@ -2757,6 +2765,7 @@ static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
        .ndo_open             = dummy_open,
        .ndo_set_vf_mac       = cxgb_set_vf_mac,
        .ndo_get_vf_config    = cxgb_get_vf_config,
+       .ndo_get_phys_port_id = cxgb_get_phys_port_id,
 };
 #endif
 
@@ -2777,8 +2786,24 @@ static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
 
 void t4_fatal_err(struct adapter *adap)
 {
-       t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
-       t4_intr_disable(adap);
+       int port;
+
+       /* Disable the SGE since ULDs are going to free resources that
+        * could be exposed to the adapter.  RDMA MWs for example...
+        */
+       t4_shutdown_adapter(adap);
+       for_each_port(adap, port) {
+               struct net_device *dev = adap->port[port];
+
+               /* If we get here in very early initialization the network
+                * devices may not have been set up yet.
+                */
+               if (!dev)
+                       continue;
+
+               netif_tx_stop_all_queues(dev);
+               netif_carrier_off(dev);
+       }
        dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
 }
 
@@ -4397,9 +4422,9 @@ static void print_port_info(const struct net_device *dev)
                spd = " 8 GT/s";
 
        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
-               bufp += sprintf(bufp, "100/");
+               bufp += sprintf(bufp, "100M/");
        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
-               bufp += sprintf(bufp, "1000/");
+               bufp += sprintf(bufp, "1G/");
        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
                bufp += sprintf(bufp, "10G/");
        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
@@ -4511,12 +4536,14 @@ static int config_mgmt_dev(struct pci_dev *pdev)
        int err;
 
        snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
-       netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, dummy_setup);
+       netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN,
+                             dummy_setup);
        if (!netdev)
                return -ENOMEM;
 
        pi = netdev_priv(netdev);
        pi->adapter = adap;
+       pi->port_id = adap->pf % adap->params.nports;
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
        adap->port[0] = netdev;
@@ -4606,6 +4633,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        u32 whoami, pl_rev;
        enum chip_type chip;
        static int adap_idx = 1;
+#ifdef CONFIG_PCI_IOV
+       u32 v, port_vec;
+#endif
 
        printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
 
@@ -4707,6 +4737,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        spin_lock_init(&adapter->stats_lock);
        spin_lock_init(&adapter->tid_release_lock);
        spin_lock_init(&adapter->win0_lock);
+       spin_lock_init(&adapter->mbox_lock);
+
+       INIT_LIST_HEAD(&adapter->mlist.list);
 
        INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
        INIT_WORK(&adapter->db_full_task, process_db_full);
@@ -4874,8 +4907,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                         "continuing\n");
                adapter->params.offload = 0;
        } else {
-               adapter->tc_u32 = cxgb4_init_tc_u32(adapter,
-                                                   CXGB4_MAX_LINK_HANDLE);
+               adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
                if (!adapter->tc_u32)
                        dev_warn(&pdev->dev,
                                 "could not offload tc u32, continuing\n");
@@ -4982,6 +5014,19 @@ sriov:
                err = -ENOMEM;
                goto free_adapter;
        }
+       spin_lock_init(&adapter->mbox_lock);
+       INIT_LIST_HEAD(&adapter->mlist.list);
+
+       v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+           FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
+       err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1,
+                             &v, &port_vec);
+       if (err < 0) {
+               dev_err(adapter->pdev_dev, "Could not fetch port params\n");
+               goto free_adapter;
+       }
+
+       adapter->params.nports = hweight32(port_vec);
        pci_set_drvdata(pdev, adapter);
        return 0;
 
index 52af62e0ecb6bf01cdeffa0efc15e9870c134de2..a1b19422b3395fa0645d7d0ede4ae461af644599 100644 (file)
@@ -437,28 +437,26 @@ void cxgb4_cleanup_tc_u32(struct adapter *adap)
        t4_free_mem(adap->tc_u32);
 }
 
-struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
-                                            unsigned int size)
+struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap)
 {
+       unsigned int max_tids = adap->tids.nftids;
        struct cxgb4_tc_u32_table *t;
        unsigned int i;
 
-       if (!size)
+       if (!max_tids)
                return NULL;
 
        t = t4_alloc_mem(sizeof(*t) +
-                        (size * sizeof(struct cxgb4_link)));
+                        (max_tids * sizeof(struct cxgb4_link)));
        if (!t)
                return NULL;
 
-       t->size = size;
+       t->size = max_tids;
 
        for (i = 0; i < t->size; i++) {
                struct cxgb4_link *link = &t->table[i];
                unsigned int bmap_size;
-               unsigned int max_tids;
 
-               max_tids = adap->tids.nftids;
                bmap_size = BITS_TO_LONGS(max_tids);
                link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size);
                if (!link->tid_map)
index 6bdc885eff2253f5ea27673200a8d633e2dbf491..021261a41c1312f6bd43fa3cc58dddc73eb023db 100644 (file)
@@ -37,8 +37,6 @@
 
 #include <net/pkt_cls.h>
 
-#define CXGB4_MAX_LINK_HANDLE 32
-
 static inline bool can_tc_u32_offload(struct net_device *dev)
 {
        struct adapter *adap = netdev2adap(dev);
@@ -52,6 +50,5 @@ int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
                       struct tc_cls_u32_offload *cls);
 
 void cxgb4_cleanup_tc_u32(struct adapter *adapter);
-struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
-                                            unsigned int size);
+struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap);
 #endif /* __CXGB4_TC_U32_H */
index 8098902c094a1d6e9e340dfbd54102079823e488..d0868c2320da843eab0ea441b0f6848fc5acda95 100644 (file)
@@ -408,10 +408,9 @@ static void enable_rx(struct adapter *adap, struct sge_rspq *q)
        if (!q)
                return;
 
-       if (q->handler) {
-               cxgb_busy_poll_init_lock(q);
+       if (q->handler)
                napi_enable(&q->napi);
-       }
+
        /* 0-increment GTS to start the timer and enable interrupts */
        t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
                     SEINTARM_V(q->intr_params) |
@@ -420,13 +419,8 @@ static void enable_rx(struct adapter *adap, struct sge_rspq *q)
 
 static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
 {
-       if (q && q->handler) {
+       if (q && q->handler)
                napi_disable(&q->napi);
-               local_bh_disable();
-               while (!cxgb_poll_lock_napi(q))
-                       mdelay(1);
-               local_bh_enable();
-       }
 }
 
 static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
@@ -597,7 +591,6 @@ void t4_uld_mem_free(struct adapter *adap)
 
 void t4_uld_clean_up(struct adapter *adap)
 {
-       struct sge_uld_rxq_info *rxq_info;
        unsigned int i;
 
        if (!adap->uld)
@@ -605,7 +598,6 @@ void t4_uld_clean_up(struct adapter *adap)
        for (i = 0; i < CXGB4_ULD_MAX; i++) {
                if (!adap->uld[i].handle)
                        continue;
-               rxq_info = adap->sge.uld_rxq_info[i];
                if (adap->flags & FULL_INIT_DONE)
                        quiesce_rx_uld(adap, i);
                if (adap->flags & USING_MSIX)
index 60a26037a1c675453a8261822c113712c129874a..7c8c5b9a3c22ebb5edb67ab3be378f46bbb83baf 100644 (file)
@@ -432,7 +432,7 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
        else
                lport = netdev2pinfo(physdev)->lport;
 
-       if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
+       if (is_vlan_dev(neigh->dev))
                vlan = vlan_dev_vlan_id(neigh->dev);
        else
                vlan = VLAN_NONE;
index cbd68a8fe2e48b54bd5a9296eac7e8cca32063e4..c9026352a842191d51913f62f4424bb74f1bf7a0 100644 (file)
@@ -397,9 +397,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
                struct ch_sched_params info;
                struct ch_sched_params tp;
 
-               memset(&info, 0, sizeof(info));
-               memset(&tp, 0, sizeof(tp));
-
                memcpy(&tp, p, sizeof(tp));
                /* Don't try to match class parameter */
                tp.u.params.class = SCHED_CLS_NONE;
@@ -409,7 +406,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
                        if (e->state == SCHED_STATE_UNUSED)
                                continue;
 
-                       memset(&info, 0, sizeof(info));
                        memcpy(&info, &e->info, sizeof(info));
                        /* Don't try to match class parameter */
                        info.u.params.class = SCHED_CLS_NONE;
@@ -458,7 +454,6 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
                if (!e)
                        goto out;
 
-               memset(&np, 0, sizeof(np));
                memcpy(&np, p, sizeof(np));
                np.u.params.class = e->idx;
 
index 9f606478c29cb2ad7fd445b9c6709eb2c92d5e8c..f05f0d400324e878ff1ac0a7545e46e218028f5f 100644 (file)
@@ -43,9 +43,7 @@
 #include <linux/export.h>
 #include <net/ipv6.h>
 #include <net/tcp.h>
-#ifdef CONFIG_NET_RX_BUSY_POLL
 #include <net/busy_poll.h>
-#endif /* CONFIG_NET_RX_BUSY_POLL */
 #ifdef CONFIG_CHELSIO_T4_FCOE
 #include <scsi/fc/fc_fcoe.h>
 #endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1774,15 +1772,20 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
        struct sge_uld_txq *txq;
        unsigned int idx = skb_txq(skb);
 
-       txq_info = adap->sge.uld_txq_info[tx_uld_type];
-       txq = &txq_info->uldtxq[idx];
-
        if (unlikely(is_ctrl_pkt(skb))) {
                /* Single ctrl queue is a requirement for LE workaround path */
                if (adap->tids.nsftids)
                        idx = 0;
                return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
        }
+
+       txq_info = adap->sge.uld_txq_info[tx_uld_type];
+       if (unlikely(!txq_info)) {
+               WARN_ON(true);
+               return NET_XMIT_DROP;
+       }
+
+       txq = &txq_info->uldtxq[idx];
        return ofld_xmit(txq, skb);
 }
 
@@ -2038,16 +2041,22 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
        struct sge *s = &q->adap->sge;
        int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
                            CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
+       u16 err_vec;
        struct port_info *pi;
 
        if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
                return handle_trace_pkt(q->adap, si);
 
        pkt = (const struct cpl_rx_pkt *)rsp;
-       csum_ok = pkt->csum_calc && !pkt->err_vec &&
+       /* Compressed error vector is enabled for T6 only */
+       if (q->adap->params.tp.rx_pkt_encap)
+               err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
+       else
+               err_vec = be16_to_cpu(pkt->err_vec);
+
+       csum_ok = pkt->csum_calc && !err_vec &&
                  (q->netdev->features & NETIF_F_RXCSUM);
        if ((pkt->l2info & htonl(RXF_TCP_F)) &&
-           !(cxgb_poll_busy_polling(q)) &&
            (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
                do_gro(rxq, si, pkt);
                return 0;
@@ -2092,7 +2101,12 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
                if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
                        if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
                            (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
-                               if (!(pkt->err_vec & cpu_to_be16(RXERR_CSUM_F)))
+                               if (q->adap->params.tp.rx_pkt_encap)
+                                       csum_ok = err_vec &
+                                                 T6_COMPR_RXERR_SUM_F;
+                               else
+                                       csum_ok = err_vec & RXERR_CSUM_F;
+                               if (!csum_ok)
                                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                        }
                }
@@ -2273,38 +2287,6 @@ static int process_responses(struct sge_rspq *q, int budget)
        return budget - budget_left;
 }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-int cxgb_busy_poll(struct napi_struct *napi)
-{
-       struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
-       unsigned int params, work_done;
-       u32 val;
-
-       if (!cxgb_poll_lock_poll(q))
-               return LL_FLUSH_BUSY;
-
-       work_done = process_responses(q, 4);
-       params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1);
-       q->next_intr_params = params;
-       val = CIDXINC_V(work_done) | SEINTARM_V(params);
-
-       /* If we don't have access to the new User GTS (T5+), use the old
-        * doorbell mechanism; otherwise use the new BAR2 mechanism.
-        */
-       if (unlikely(!q->bar2_addr))
-               t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
-                            val | INGRESSQID_V((u32)q->cntxt_id));
-       else {
-               writel(val | INGRESSQID_V(q->bar2_qid),
-                      q->bar2_addr + SGE_UDB_GTS);
-               wmb();
-       }
-
-       cxgb_poll_unlock_poll(q);
-       return work_done;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 /**
  *     napi_rx_handler - the NAPI handler for Rx processing
  *     @napi: the napi instance
@@ -2323,9 +2305,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
        int work_done;
        u32 val;
 
-       if (!cxgb_poll_lock_napi(q))
-               return budget;
-
        work_done = process_responses(q, budget);
        if (likely(work_done < budget)) {
                int timer_index;
@@ -2365,7 +2344,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
                       q->bar2_addr + SGE_UDB_GTS);
                wmb();
        }
-       cxgb_poll_unlock_napi(q);
        return work_done;
 }
 
index e8139514d32ca5ee2df37167d480681e381e37ae..87000cd397372ab999d25ea2c935f0385fa941ee 100644 (file)
@@ -284,6 +284,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
                1, 1, 3, 5, 10, 10, 20, 50, 100, 200
        };
 
+       struct mbox_list entry;
        u16 access = 0;
        u16 execute = 0;
        u32 v;
@@ -311,11 +312,62 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
                timeout = -timeout;
        }
 
+       /* Queue ourselves onto the mailbox access list.  When our entry is at
+        * the front of the list, we have rights to access the mailbox.  So we
+        * wait [for a while] till we're at the front [or bail out with an
+        * EBUSY] ...
+        */
+       spin_lock(&adap->mbox_lock);
+       list_add_tail(&entry.list, &adap->mlist.list);
+       spin_unlock(&adap->mbox_lock);
+
+       delay_idx = 0;
+       ms = delay[0];
+
+       for (i = 0; ; i += ms) {
+               /* If we've waited too long, return a busy indication.  This
+                * really ought to be based on our initial position in the
+                * mailbox access list but this is a start.  We very rearely
+                * contend on access to the mailbox ...
+                */
+               pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+               if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
+                       spin_lock(&adap->mbox_lock);
+                       list_del(&entry.list);
+                       spin_unlock(&adap->mbox_lock);
+                       ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
+                       t4_record_mbox(adap, cmd, size, access, ret);
+                       return ret;
+               }
+
+               /* If we're at the head, break out and start the mailbox
+                * protocol.
+                */
+               if (list_first_entry(&adap->mlist.list, struct mbox_list,
+                                    list) == &entry)
+                       break;
+
+               /* Delay for a bit before checking again ... */
+               if (sleep_ok) {
+                       ms = delay[delay_idx];  /* last element may repeat */
+                       if (delay_idx < ARRAY_SIZE(delay) - 1)
+                               delay_idx++;
+                       msleep(ms);
+               } else {
+                       mdelay(ms);
+               }
+       }
+
+       /* Loop trying to get ownership of the mailbox.  Return an error
+        * if we can't gain ownership.
+        */
        v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
        for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
                v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
-
        if (v != MBOX_OWNER_DRV) {
+               spin_lock(&adap->mbox_lock);
+               list_del(&entry.list);
+               spin_unlock(&adap->mbox_lock);
                ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
                t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
                return ret;
@@ -366,6 +418,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
                        execute = i + ms;
                        t4_record_mbox(adap, cmd_rpl,
                                       MBOX_LEN, access, execute);
+                       spin_lock(&adap->mbox_lock);
+                       list_del(&entry.list);
+                       spin_unlock(&adap->mbox_lock);
                        return -FW_CMD_RETVAL_G((int)res);
                }
        }
@@ -375,6 +430,10 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
        dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
                *(const u8 *)cmd, mbox);
        t4_report_fw_error(adap);
+       spin_lock(&adap->mbox_lock);
+       list_del(&entry.list);
+       spin_unlock(&adap->mbox_lock);
+       t4_fatal_err(adap);
        return ret;
 }
 
@@ -5382,22 +5441,28 @@ unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
 const char *t4_get_port_type_description(enum fw_port_type port_type)
 {
        static const char *const port_type_description[] = {
-               "XFI",
-               "XAUI",
-               "SGMII",
-               "XFI",
-               "XAUI",
+               "Fiber_XFI",
+               "Fiber_XAUI",
+               "BT_SGMII",
+               "BT_XFI",
+               "BT_XAUI",
                "KX4",
                "CX4",
                "KX",
                "KR",
-               "R SFP+",
-               "KR/KX",
-               "KR/KX/KX4",
-               "R QSFP_10G",
-               "R QSA",
-               "R QSFP",
-               "R BP40_BA",
+               "SFP",
+               "BP_AP",
+               "BP4_AP",
+               "QSFP_10G",
+               "QSA",
+               "QSFP",
+               "BP40_BA",
+               "KR4_100G",
+               "CR4_QSFP",
+               "CR_QSFP",
+               "CR2_QSFP",
+               "SFP28",
+               "KR_SFP28",
        };
 
        if (port_type < ARRAY_SIZE(port_type_description))
@@ -5438,6 +5503,7 @@ void t4_get_port_stats_offset(struct adapter *adap, int idx,
 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
 {
        u32 bgmap = t4_get_mps_bg_map(adap, idx);
+       u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
 
 #define GET_STAT(name) \
        t4_read_reg64(adap, \
@@ -5469,6 +5535,14 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
        p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
        p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
 
+       if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+               if (stat_ctl & COUNTPAUSESTATTX_F) {
+                       p->tx_frames -= p->tx_pause;
+                       p->tx_octets -= p->tx_pause * 64;
+               }
+               if (stat_ctl & COUNTPAUSEMCTX_F)
+                       p->tx_mcast_frames -= p->tx_pause;
+       }
        p->rx_octets           = GET_STAT(RX_PORT_BYTES);
        p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
        p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
@@ -5497,6 +5571,15 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
        p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
        p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
 
+       if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+               if (stat_ctl & COUNTPAUSESTATRX_F) {
+                       p->rx_frames -= p->rx_pause;
+                       p->rx_octets -= p->rx_pause * 64;
+               }
+               if (stat_ctl & COUNTPAUSEMCRX_F)
+                       p->rx_mcast_frames -= p->rx_pause;
+       }
+
        p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
        p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
        p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
@@ -7476,6 +7559,39 @@ int t4_prep_adapter(struct adapter *adapter)
        return 0;
 }
 
+/**
+ *     t4_shutdown_adapter - shut down adapter, host & wire
+ *     @adapter: the adapter
+ *
+ *     Perform an emergency shutdown of the adapter and stop it from
+ *     continuing any further communication on the ports or DMA to the
+ *     host.  This is typically used when the adapter and/or firmware
+ *     have crashed and we want to prevent any further accidental
+ *     communication with the rest of the world.  This will also force
+ *     the port Link Status to go down -- if register writes work --
+ *     which should help our peers figure out that we're down.
+ */
+int t4_shutdown_adapter(struct adapter *adapter)
+{
+       int port;
+
+       t4_intr_disable(adapter);
+       t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
+       for_each_port(adapter, port) {
+               u32 a_port_cfg = PORT_REG(port,
+                                         is_t4(adapter->params.chip)
+                                         ? XGMAC_PORT_CFG_A
+                                         : MAC_PORT_CFG_A);
+
+               t4_write_reg(adapter, a_port_cfg,
+                            t4_read_reg(adapter, a_port_cfg)
+                            & ~SIGNAL_DET_V(1));
+       }
+       t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
+
+       return 0;
+}
+
 /**
  *     t4_bar2_sge_qregs - return BAR2 SGE Queue register information
  *     @adapter: the adapter
@@ -7686,6 +7802,13 @@ int t4_init_tp_params(struct adapter *adap)
                                 &adap->params.tp.ingress_config, 1,
                                 TP_INGRESS_CONFIG_A);
        }
+       /* For T6, cache the adapter's compressed error vector
+        * and passing outer header info for encapsulated packets.
+        */
+       if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+               v = t4_read_reg(adap, TP_OUT_CONFIG_A);
+               adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
+       }
 
        /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
         * shift positions of several elements of the Compressed Filter Tuple
index a267173f59972f174947578935189b1e474dc94b..5043b64805f0b2fb74a4535131f731f7dc7fa302 100644 (file)
@@ -1175,6 +1175,21 @@ struct cpl_rx_pkt {
 #define RXERR_CSUM_V(x) ((x) << RXERR_CSUM_S)
 #define RXERR_CSUM_F    RXERR_CSUM_V(1U)
 
+#define T6_COMPR_RXERR_LEN_S    1
+#define T6_COMPR_RXERR_LEN_V(x) ((x) << T6_COMPR_RXERR_LEN_S)
+#define T6_COMPR_RXERR_LEN_F    T6_COMPR_RXERR_LEN_V(1U)
+
+#define T6_COMPR_RXERR_VEC_S    0
+#define T6_COMPR_RXERR_VEC_M    0x3F
+#define T6_COMPR_RXERR_VEC_V(x) ((x) << T6_COMPR_RXERR_LEN_S)
+#define T6_COMPR_RXERR_VEC_G(x) \
+               (((x) >> T6_COMPR_RXERR_VEC_S) & T6_COMPR_RXERR_VEC_M)
+
+/* Logical OR of RX_ERROR_CSUM, RX_ERROR_CSIP */
+#define T6_COMPR_RXERR_SUM_S    4
+#define T6_COMPR_RXERR_SUM_V(x) ((x) << T6_COMPR_RXERR_SUM_S)
+#define T6_COMPR_RXERR_SUM_F    T6_COMPR_RXERR_SUM_V(1U)
+
 struct cpl_trace_pkt {
        u8 opcode;
        u8 intf;
index ecf3ccc257bcc4fc81e8cf35647982f8c4186763..a323185507ec248dbab8dc345cdaf73ec935f38d 100644 (file)
@@ -169,6 +169,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
        CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/
        CH_PCI_ID_TABLE_FENTRY(0x509d), /* Custom T540-CR*/
+       CH_PCI_ID_TABLE_FENTRY(0x509e), /* Custom T520-CR */
+       CH_PCI_ID_TABLE_FENTRY(0x509f), /* Custom T540-CR */
+       CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */
 
        /* T6 adapters:
         */
@@ -185,6 +188,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
        CH_PCI_ID_TABLE_FENTRY(0x6011),
        CH_PCI_ID_TABLE_FENTRY(0x6014),
        CH_PCI_ID_TABLE_FENTRY(0x6015),
+       CH_PCI_ID_TABLE_FENTRY(0x6080),
+       CH_PCI_ID_TABLE_FENTRY(0x6081),
 CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
 
 #endif /* __T4_PCI_ID_TBL_H__ */
index 9fea255c7e87acbad0a8a1277a4782868e7d2c18..3348d33c36faca900e92bca25607ac79b0246908 100644 (file)
 #define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S)
 #define PERR_INT_CAUSE_F    PERR_INT_CAUSE_V(1U)
 
+#define DBG_GPIO_EN_A          0x6010
+#define XGMAC_PORT_CFG_A       0x1000
+#define MAC_PORT_CFG_A         0x800
+
+#define SIGNAL_DET_S    14
+#define SIGNAL_DET_V(x) ((x) << SIGNAL_DET_S)
+#define SIGNAL_DET_F    SIGNAL_DET_V(1U)
+
 #define MC_ECC_STATUS_A                0x751c
 #define MC_P_ECC_STATUS_A      0x4131c
 
 #define DBGLARPTR_M    0x7fU
 #define DBGLARPTR_V(x) ((x) << DBGLARPTR_S)
 
+#define CRXPKTENC_S    3
+#define CRXPKTENC_V(x) ((x) << CRXPKTENC_S)
+#define CRXPKTENC_F    CRXPKTENC_V(1U)
+
 #define TP_DBG_LA_DATAL_A      0x7ed8
 #define TP_DBG_LA_CONFIG_A     0x7ed4
 #define TP_OUT_CONFIG_A                0x7d04
 
 #define MPS_CMN_CTL_A  0x9000
 
+#define COUNTPAUSEMCRX_S    5
+#define COUNTPAUSEMCRX_V(x) ((x) << COUNTPAUSEMCRX_S)
+#define COUNTPAUSEMCRX_F    COUNTPAUSEMCRX_V(1U)
+
+#define COUNTPAUSESTATRX_S    4
+#define COUNTPAUSESTATRX_V(x) ((x) << COUNTPAUSESTATRX_S)
+#define COUNTPAUSESTATRX_F    COUNTPAUSESTATRX_V(1U)
+
+#define COUNTPAUSEMCTX_S    3
+#define COUNTPAUSEMCTX_V(x) ((x) << COUNTPAUSEMCTX_S)
+#define COUNTPAUSEMCTX_F    COUNTPAUSEMCTX_V(1U)
+
+#define COUNTPAUSESTATTX_S    2
+#define COUNTPAUSESTATTX_V(x) ((x) << COUNTPAUSESTATTX_S)
+#define COUNTPAUSESTATTX_F    COUNTPAUSESTATTX_V(1U)
+
 #define NUMPORTS_S    0
 #define NUMPORTS_M    0x3U
 #define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M)
 
 #define MPS_INT_CAUSE_A 0x9008
 #define MPS_TX_INT_CAUSE_A 0x9408
+#define MPS_STAT_CTL_A 0x9600
 
 #define FRMERR_S    15
 #define FRMERR_V(x) ((x) << FRMERR_S)
index 2accab38632327ae007baf1ef293dca1ef7da7d9..5fdaa16426c50effd2798c213a9f3d083c7bc440 100644 (file)
@@ -36,8 +36,8 @@
 #define __T4FW_VERSION_H__
 
 #define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0F
-#define T4FW_VERSION_MICRO 0x25
+#define T4FW_VERSION_MINOR 0x10
+#define T4FW_VERSION_MICRO 0x1A
 #define T4FW_VERSION_BUILD 0x00
 
 #define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
 #define T4FW_MIN_VERSION_MICRO 0x00
 
 #define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0F
-#define T5FW_VERSION_MICRO 0x25
+#define T5FW_VERSION_MINOR 0x10
+#define T5FW_VERSION_MICRO 0x1A
 #define T5FW_VERSION_BUILD 0x00
 
 #define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
 #define T5FW_MIN_VERSION_MICRO 0x00
 
 #define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0F
-#define T6FW_VERSION_MICRO 0x25
+#define T6FW_VERSION_MINOR 0x10
+#define T6FW_VERSION_MICRO 0x1A
 #define T6FW_VERSION_BUILD 0x00
 
 #define T6FW_MIN_VERSION_MAJOR 0x00
index 0d1a134c817434a2c4f3d12aa34e5aae88d0a021..ac7a150c54e9b4e93b751821eb09d80669eecc69 100644 (file)
@@ -158,20 +158,23 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
                netif_carrier_on(dev);
 
                switch (pi->link_cfg.speed) {
-               case 40000:
-                       s = "40Gbps";
+               case 100:
+                       s = "100Mbps";
+                       break;
+               case 1000:
+                       s = "1Gbps";
                        break;
-
                case 10000:
                        s = "10Gbps";
                        break;
-
-               case 1000:
-                       s = "1000Mbps";
+               case 25000:
+                       s = "25Gbps";
                        break;
-
-               case 100:
-                       s = "100Mbps";
+               case 40000:
+                       s = "40Gbps";
+                       break;
+               case 100000:
+                       s = "100Gbps";
                        break;
 
                default:
index f3ed9ce99e5e7023a93aa22fc85398f4057148dd..e37dde2ba97f6d529177d475be2a6d001071de0b 100644 (file)
@@ -1889,7 +1889,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
        u32 val;
 
        if (likely(work_done < budget)) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                intr_params = rspq->next_intr_params;
                rspq->next_intr_params = rspq->intr_params;
        } else
index 396c88678eabfec556536ca4a81f862245632d76..7a7c02f1f8b955f28206855f7084630cf8b35c2a 100644 (file)
@@ -228,9 +228,10 @@ static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int d
                pr_info("mdio write timed out\n");
 }
 
-static int ep93xx_rx(struct net_device *dev, int processed, int budget)
+static int ep93xx_rx(struct net_device *dev, int budget)
 {
        struct ep93xx_priv *ep = netdev_priv(dev);
+       int processed = 0;
 
        while (processed < budget) {
                int entry;
@@ -294,7 +295,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
                        skb_put(skb, length);
                        skb->protocol = eth_type_trans(skb, dev);
 
-                       netif_receive_skb(skb);
+                       napi_gro_receive(&ep->napi, skb);
 
                        dev->stats.rx_packets++;
                        dev->stats.rx_bytes += length;
@@ -310,35 +311,17 @@ err:
        return processed;
 }
 
-static int ep93xx_have_more_rx(struct ep93xx_priv *ep)
-{
-       struct ep93xx_rstat *rstat = ep->descs->rstat + ep->rx_pointer;
-       return !!((rstat->rstat0 & RSTAT0_RFP) && (rstat->rstat1 & RSTAT1_RFP));
-}
-
 static int ep93xx_poll(struct napi_struct *napi, int budget)
 {
        struct ep93xx_priv *ep = container_of(napi, struct ep93xx_priv, napi);
        struct net_device *dev = ep->dev;
-       int rx = 0;
-
-poll_some_more:
-       rx = ep93xx_rx(dev, rx, budget);
-       if (rx < budget) {
-               int more = 0;
+       int rx;
 
+       rx = ep93xx_rx(dev, budget);
+       if (rx < budget && napi_complete_done(napi, rx)) {
                spin_lock_irq(&ep->rx_lock);
-               __napi_complete(napi);
                wrl(ep, REG_INTEN, REG_INTEN_TX | REG_INTEN_RX);
-               if (ep93xx_have_more_rx(ep)) {
-                       wrl(ep, REG_INTEN, REG_INTEN_TX);
-                       wrl(ep, REG_INTSTSP, REG_INTSTS_RX);
-                       more = 1;
-               }
                spin_unlock_irq(&ep->rx_lock);
-
-               if (more && napi_reschedule(napi))
-                       goto poll_some_more;
        }
 
        if (rx) {
index 9023c858715d5759064081eee54b120ddaace673..2b23f46b34d3a8dff54bb93ce76d26834fe7a8a8 100644 (file)
@@ -135,6 +135,11 @@ struct enic_rfs_flw_tbl {
        struct timer_list rfs_may_expire;
 };
 
+struct vxlan_offload {
+       u16 vxlan_udp_port_number;
+       u8 patch_level;
+};
+
 /* Per-instance private data structure */
 struct enic {
        struct net_device *netdev;
@@ -175,6 +180,7 @@ struct enic {
        /* receive queue cache line section */
        ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX];
        unsigned int rq_count;
+       struct vxlan_offload vxlan;
        u64 rq_truncated_pkts;
        u64 rq_bad_fcs;
        struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
index cdd7a1a59aa7b4f8c1d13a41a02b3e20694270ff..4b87beeabce12a6e55e41c5beb3d3e0a39b6ecea 100644 (file)
 #ifdef CONFIG_RFS_ACCEL
 #include <linux/cpu_rmap.h>
 #endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#include <net/busy_poll.h>
-#endif
 #include <linux/crash_dump.h>
+#include <net/busy_poll.h>
+#include <net/vxlan.h>
 
 #include "cq_enet_desc.h"
 #include "vnic_dev.h"
@@ -178,6 +177,134 @@ static void enic_unset_affinity_hint(struct enic *enic)
                irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
 }
 
+static void enic_udp_tunnel_add(struct net_device *netdev,
+                               struct udp_tunnel_info *ti)
+{
+       struct enic *enic = netdev_priv(netdev);
+       __be16 port = ti->port;
+       int err;
+
+       spin_lock_bh(&enic->devcmd_lock);
+
+       if (ti->type != UDP_TUNNEL_TYPE_VXLAN) {
+               netdev_info(netdev, "udp_tnl: only vxlan tunnel offload supported");
+               goto error;
+       }
+
+       if (ti->sa_family != AF_INET) {
+               netdev_info(netdev, "vxlan: only IPv4 offload supported");
+               goto error;
+       }
+
+       if (enic->vxlan.vxlan_udp_port_number) {
+               if (ntohs(port) == enic->vxlan.vxlan_udp_port_number)
+                       netdev_warn(netdev, "vxlan: udp port already offloaded");
+               else
+                       netdev_info(netdev, "vxlan: offload supported for only one UDP port");
+
+               goto error;
+       }
+
+       err = vnic_dev_overlay_offload_cfg(enic->vdev,
+                                          OVERLAY_CFG_VXLAN_PORT_UPDATE,
+                                          ntohs(port));
+       if (err)
+               goto error;
+
+       err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN,
+                                           enic->vxlan.patch_level);
+       if (err)
+               goto error;
+
+       enic->vxlan.vxlan_udp_port_number = ntohs(port);
+
+       netdev_info(netdev, "vxlan fw-vers-%d: offload enabled for udp port: %d, sa_family: %d ",
+                   (int)enic->vxlan.patch_level, ntohs(port), ti->sa_family);
+
+       goto unlock;
+
+error:
+       netdev_info(netdev, "failed to offload udp port: %d, sa_family: %d, type: %d",
+                   ntohs(port), ti->sa_family, ti->type);
+unlock:
+       spin_unlock_bh(&enic->devcmd_lock);
+}
+
+static void enic_udp_tunnel_del(struct net_device *netdev,
+                               struct udp_tunnel_info *ti)
+{
+       struct enic *enic = netdev_priv(netdev);
+       int err;
+
+       spin_lock_bh(&enic->devcmd_lock);
+
+       if ((ti->sa_family != AF_INET) ||
+           ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number)) ||
+           (ti->type != UDP_TUNNEL_TYPE_VXLAN)) {
+               netdev_info(netdev, "udp_tnl: port:%d, sa_family: %d, type: %d not offloaded",
+                           ntohs(ti->port), ti->sa_family, ti->type);
+               goto unlock;
+       }
+
+       err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN,
+                                           OVERLAY_OFFLOAD_DISABLE);
+       if (err) {
+               netdev_err(netdev, "vxlan: del offload udp port: %d failed",
+                          ntohs(ti->port));
+               goto unlock;
+       }
+
+       enic->vxlan.vxlan_udp_port_number = 0;
+
+       netdev_info(netdev, "vxlan: del offload udp port %d, family %d\n",
+                   ntohs(ti->port), ti->sa_family);
+
+unlock:
+       spin_unlock_bh(&enic->devcmd_lock);
+}
+
+static netdev_features_t enic_features_check(struct sk_buff *skb,
+                                            struct net_device *dev,
+                                            netdev_features_t features)
+{
+       const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
+       struct enic *enic = netdev_priv(dev);
+       struct udphdr *udph;
+       u16 port = 0;
+       u16 proto;
+
+       if (!skb->encapsulation)
+               return features;
+
+       features = vxlan_features_check(skb, features);
+
+       /* hardware only supports IPv4 vxlan tunnel */
+       if (vlan_get_protocol(skb) != htons(ETH_P_IP))
+               goto out;
+
+       /* hardware does not support offload of ipv6 inner pkt */
+       if (eth->h_proto != ntohs(ETH_P_IP))
+               goto out;
+
+       proto = ip_hdr(skb)->protocol;
+
+       if (proto == IPPROTO_UDP) {
+               udph = udp_hdr(skb);
+               port = be16_to_cpu(udph->dest);
+       }
+
+       /* HW supports offload of only one UDP port. Remove CSUM and GSO MASK
+        * for other UDP port tunnels
+        */
+       if (port  != enic->vxlan.vxlan_udp_port_number)
+               goto out;
+
+       return features;
+
+out:
+       return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
 int enic_is_dynamic(struct enic *enic)
 {
        return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -506,20 +633,19 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
        return err;
 }
 
-static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
-                                struct sk_buff *skb, unsigned int mss,
-                                int vlan_tag_insert, unsigned int vlan_tag,
-                                int loopback)
+static void enic_preload_tcp_csum_encap(struct sk_buff *skb)
 {
-       unsigned int frag_len_left = skb_headlen(skb);
-       unsigned int len_left = skb->len - frag_len_left;
-       unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
-       int eop = (len_left == 0);
-       unsigned int len;
-       dma_addr_t dma_addr;
-       unsigned int offset = 0;
-       skb_frag_t *frag;
+       if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
+               inner_ip_hdr(skb)->check = 0;
+               inner_tcp_hdr(skb)->check =
+                       ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
+                                          inner_ip_hdr(skb)->daddr, 0,
+                                          IPPROTO_TCP, 0);
+       }
+}
 
+static void enic_preload_tcp_csum(struct sk_buff *skb)
+{
        /* Preload TCP csum field with IP pseudo hdr calculated
         * with IP length set to zero.  HW will later add in length
         * to each TCP segment resulting from the TSO.
@@ -533,6 +659,30 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
                tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
                        &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
        }
+}
+
+static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
+                                struct sk_buff *skb, unsigned int mss,
+                                int vlan_tag_insert, unsigned int vlan_tag,
+                                int loopback)
+{
+       unsigned int frag_len_left = skb_headlen(skb);
+       unsigned int len_left = skb->len - frag_len_left;
+       int eop = (len_left == 0);
+       unsigned int offset = 0;
+       unsigned int hdr_len;
+       dma_addr_t dma_addr;
+       unsigned int len;
+       skb_frag_t *frag;
+
+       if (skb->encapsulation) {
+               hdr_len = skb_inner_transport_header(skb) - skb->data;
+               hdr_len += inner_tcp_hdrlen(skb);
+               enic_preload_tcp_csum_encap(skb);
+       } else {
+               hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+               enic_preload_tcp_csum(skb);
+       }
 
        /* Queue WQ_ENET_MAX_DESC_LEN length descriptors
         * for the main skb fragment
@@ -581,6 +731,38 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
        return 0;
 }
 
+static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
+                                         struct sk_buff *skb,
+                                         int vlan_tag_insert,
+                                         unsigned int vlan_tag, int loopback)
+{
+       unsigned int head_len = skb_headlen(skb);
+       unsigned int len_left = skb->len - head_len;
+       /* Hardware will overwrite the checksum fields, calculating from
+        * scratch and ignoring the value placed by software.
+        * Offload mode = 00
+        * mss[2], mss[1], mss[0] bits are set
+        */
+       unsigned int mss_or_csum = 7;
+       int eop = (len_left == 0);
+       dma_addr_t dma_addr;
+       int err = 0;
+
+       dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
+                                 PCI_DMA_TODEVICE);
+       if (unlikely(enic_dma_map_check(enic, dma_addr)))
+               return -ENOMEM;
+
+       enic_queue_wq_desc_ex(wq, skb, dma_addr, head_len, mss_or_csum, 0,
+                             vlan_tag_insert, vlan_tag,
+                             WQ_ENET_OFFLOAD_MODE_CSUM, eop, 1 /* SOP */, eop,
+                             loopback);
+       if (!eop)
+               err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback);
+
+       return err;
+}
+
 static inline void enic_queue_wq_skb(struct enic *enic,
        struct vnic_wq *wq, struct sk_buff *skb)
 {
@@ -603,6 +785,9 @@ static inline void enic_queue_wq_skb(struct enic *enic,
                err = enic_queue_wq_skb_tso(enic, wq, skb, mss,
                                            vlan_tag_insert, vlan_tag,
                                            loopback);
+       else if (skb->encapsulation)
+               err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert,
+                                             vlan_tag, loopback);
        else if (skb->ip_summed == CHECKSUM_PARTIAL)
                err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert,
                                                vlan_tag, loopback);
@@ -680,8 +865,8 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
 }
 
 /* dev_base_lock rwlock held, nominally process context */
-static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
-                                               struct rtnl_link_stats64 *net_stats)
+static void enic_get_stats(struct net_device *netdev,
+                          struct rtnl_link_stats64 *net_stats)
 {
        struct enic *enic = netdev_priv(netdev);
        struct vnic_stats *stats;
@@ -693,7 +878,7 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
         * recorded stats.
         */
        if (err == -ENOMEM)
-               return net_stats;
+               return;
 
        net_stats->tx_packets = stats->tx.tx_frames_ok;
        net_stats->tx_bytes = stats->tx.tx_bytes_ok;
@@ -707,8 +892,6 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
        net_stats->rx_over_errors = enic->rq_truncated_pkts;
        net_stats->rx_crc_errors = enic->rq_bad_fcs;
        net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop;
-
-       return net_stats;
 }
 
 static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
@@ -1117,6 +1300,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
        u8 packet_error;
        u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
        u32 rss_hash;
+       bool outer_csum_ok = true, encap = false;
 
        if (skipped)
                return;
@@ -1165,7 +1349,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
                skb_put(skb, bytes_written);
                skb->protocol = eth_type_trans(skb, netdev);
                skb_record_rx_queue(skb, q_number);
-               if (netdev->features & NETIF_F_RXHASH) {
+               if ((netdev->features & NETIF_F_RXHASH) && rss_hash &&
+                   (type == 3)) {
                        switch (rss_type) {
                        case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv4:
                        case CQ_ENET_RQ_DESC_RSS_TYPE_TCP_IPv6:
@@ -1179,22 +1364,45 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
                                break;
                        }
                }
+               if (enic->vxlan.vxlan_udp_port_number) {
+                       switch (enic->vxlan.patch_level) {
+                       case 0:
+                               if (fcoe) {
+                                       encap = true;
+                                       outer_csum_ok = fcoe_fc_crc_ok;
+                               }
+                               break;
+                       case 2:
+                               if ((type == 7) &&
+                                   (rss_hash & BIT(0))) {
+                                       encap = true;
+                                       outer_csum_ok = (rss_hash & BIT(1)) &&
+                                                       (rss_hash & BIT(2));
+                               }
+                               break;
+                       }
+               }
 
                /* Hardware does not provide whole packet checksum. It only
                 * provides pseudo checksum. Since hw validates the packet
                 * checksum but not provide us the checksum value. use
                 * CHECSUM_UNNECESSARY.
+                *
+                * In case of encap pkt tcp_udp_csum_ok/tcp_udp_csum_ok is
+                * inner csum_ok. outer_csum_ok is set by hw when outer udp
+                * csum is correct or is zero.
                 */
-               if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
-                   ipv4_csum_ok)
+               if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
+                   tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       skb->csum_level = encap;
+               }
 
                if (vlan_stripped)
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
 
                skb_mark_napi_id(skb, &enic->napi[rq->index]);
-               if (enic_poll_busy_polling(rq) ||
-                   !(netdev->features & NETIF_F_GRO))
+               if (!(netdev->features & NETIF_F_GRO))
                        netif_receive_skb(skb);
                else
                        napi_gro_receive(&enic->napi[q_number], skb);
@@ -1298,15 +1506,6 @@ static int enic_poll(struct napi_struct *napi, int budget)
        wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
                                       enic_wq_service, NULL);
 
-       if (!enic_poll_lock_napi(&enic->rq[cq_rq])) {
-               if (wq_work_done > 0)
-                       vnic_intr_return_credits(&enic->intr[intr],
-                                                wq_work_done,
-                                                0 /* dont unmask intr */,
-                                                0 /* dont reset intr timer */);
-               return budget;
-       }
-
        if (budget > 0)
                rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
                        rq_work_to_do, enic_rq_service, NULL);
@@ -1325,7 +1524,6 @@ static int enic_poll(struct napi_struct *napi, int budget)
                        0 /* don't reset intr timer */);
 
        err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
-       enic_poll_unlock_napi(&enic->rq[cq_rq], napi);
 
        /* Buffer allocation failed. Stay in polling
         * mode so we can try to fill the ring again.
@@ -1345,7 +1543,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
                 * exit polling
                 */
 
-               napi_complete(napi);
+               napi_complete_done(napi, rq_work_done);
                if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
                        enic_set_int_moderation(enic, &enic->rq[0]);
                vnic_intr_unmask(&enic->intr[intr]);
@@ -1392,34 +1590,6 @@ static void enic_set_rx_cpu_rmap(struct enic *enic)
 
 #endif /* CONFIG_RFS_ACCEL */
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static int enic_busy_poll(struct napi_struct *napi)
-{
-       struct net_device *netdev = napi->dev;
-       struct enic *enic = netdev_priv(netdev);
-       unsigned int rq = (napi - &enic->napi[0]);
-       unsigned int cq = enic_cq_rq(enic, rq);
-       unsigned int intr = enic_msix_rq_intr(enic, rq);
-       unsigned int work_to_do = -1; /* clean all pkts possible */
-       unsigned int work_done;
-
-       if (!enic_poll_lock_poll(&enic->rq[rq]))
-               return LL_FLUSH_BUSY;
-       work_done = vnic_cq_service(&enic->cq[cq], work_to_do,
-                                   enic_rq_service, NULL);
-
-       if (work_done > 0)
-               vnic_intr_return_credits(&enic->intr[intr],
-                                        work_done, 0, 0);
-       vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
-       if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
-               enic_calc_int_moderation(enic, &enic->rq[rq]);
-       enic_poll_unlock_poll(&enic->rq[rq]);
-
-       return work_done;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
 {
        struct net_device *netdev = napi->dev;
@@ -1461,8 +1631,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
        unsigned int work_done = 0;
        int err;
 
-       if (!enic_poll_lock_napi(&enic->rq[rq]))
-               return budget;
        /* Service RQ
         */
 
@@ -1495,14 +1663,13 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
                 */
                enic_calc_int_moderation(enic, &enic->rq[rq]);
 
-       enic_poll_unlock_napi(&enic->rq[rq], napi);
        if (work_done < work_to_do) {
 
                /* Some work done, but not enough to stay in polling,
                 * exit polling
                 */
 
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
                        enic_set_int_moderation(enic, &enic->rq[rq]);
                vnic_intr_unmask(&enic->intr[intr]);
@@ -1753,10 +1920,9 @@ static int enic_open(struct net_device *netdev)
 
        netif_tx_wake_all_queues(netdev);
 
-       for (i = 0; i < enic->rq_count; i++) {
-               enic_busy_poll_init_lock(&enic->rq[i]);
+       for (i = 0; i < enic->rq_count; i++)
                napi_enable(&enic->napi[i]);
-       }
+
        if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
                for (i = 0; i < enic->wq_count; i++)
                        napi_enable(&enic->napi[enic_cq_wq(enic, i)]);
@@ -1800,13 +1966,8 @@ static int enic_stop(struct net_device *netdev)
 
        enic_dev_disable(enic);
 
-       for (i = 0; i < enic->rq_count; i++) {
+       for (i = 0; i < enic->rq_count; i++)
                napi_disable(&enic->napi[i]);
-               local_bh_disable();
-               while (!enic_poll_lock_napi(&enic->rq[i]))
-                       mdelay(1);
-               local_bh_enable();
-       }
 
        netif_carrier_off(netdev);
        netif_tx_disable(netdev);
@@ -2337,9 +2498,9 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = enic_rx_flow_steer,
 #endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       .ndo_busy_poll          = enic_busy_poll,
-#endif
+       .ndo_udp_tunnel_add     = enic_udp_tunnel_add,
+       .ndo_udp_tunnel_del     = enic_udp_tunnel_del,
+       .ndo_features_check     = enic_features_check,
 };
 
 static const struct net_device_ops enic_netdev_ops = {
@@ -2363,9 +2524,9 @@ static const struct net_device_ops enic_netdev_ops = {
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = enic_rx_flow_steer,
 #endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       .ndo_busy_poll          = enic_busy_poll,
-#endif
+       .ndo_udp_tunnel_add     = enic_udp_tunnel_add,
+       .ndo_udp_tunnel_del     = enic_udp_tunnel_del,
+       .ndo_features_check     = enic_features_check,
 };
 
 static void enic_dev_deinit(struct enic *enic)
@@ -2741,6 +2902,39 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                netdev->hw_features |= NETIF_F_RXHASH;
        if (ENIC_SETTING(enic, RXCSUM))
                netdev->hw_features |= NETIF_F_RXCSUM;
+       if (ENIC_SETTING(enic, VXLAN)) {
+               u64 patch_level;
+
+               netdev->hw_enc_features |= NETIF_F_RXCSUM               |
+                                          NETIF_F_TSO                  |
+                                          NETIF_F_TSO_ECN              |
+                                          NETIF_F_GSO_UDP_TUNNEL       |
+                                          NETIF_F_HW_CSUM              |
+                                          NETIF_F_GSO_UDP_TUNNEL_CSUM;
+               netdev->hw_features |= netdev->hw_enc_features;
+               /* get bit mask from hw about supported offload bit level
+                * BIT(0) = fw supports patch_level 0
+                *          fcoe bit = encap
+                *          fcoe_fc_crc_ok = outer csum ok
+                * BIT(1) = always set by fw
+                * BIT(2) = fw supports patch_level 2
+                *          BIT(0) in rss_hash = encap
+                *          BIT(1,2) in rss_hash = outer_ip_csum_ok/
+                *                                 outer_tcp_csum_ok
+                * used in enic_rq_indicate_buf
+                */
+               err = vnic_dev_get_supported_feature_ver(enic->vdev,
+                                                        VIC_FEATURE_VXLAN,
+                                                        &patch_level);
+               if (err)
+                       patch_level = 0;
+               /* mask bits that are supported by driver
+                */
+               patch_level &= BIT_ULL(0) | BIT_ULL(2);
+               patch_level = fls(patch_level);
+               patch_level = patch_level ? patch_level - 1 : 0;
+               enic->vxlan.patch_level = patch_level;
+       }
 
        netdev->features |= netdev->hw_features;
        netdev->vlan_features |= netdev->features;
index 8f27df3207bc0799d78c36b8b539caef1ad84ea4..1841ad45d2157c0e5635f013dc4165787476bfbf 100644 (file)
@@ -1247,3 +1247,37 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
 
        return ret;
 }
+
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config)
+{
+       u64 a0 = overlay;
+       u64 a1 = config;
+       int wait = 1000;
+
+       return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CTRL, &a0, &a1, wait);
+}
+
+int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
+                                u16 vxlan_udp_port_number)
+{
+       u64 a1 = vxlan_udp_port_number;
+       u64 a0 = overlay;
+       int wait = 1000;
+
+       return vnic_dev_cmd(vdev, CMD_OVERLAY_OFFLOAD_CFG, &a0, &a1, wait);
+}
+
+int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature,
+                                      u64 *supported_versions)
+{
+       u64 a0 = feature;
+       int wait = 1000;
+       u64 a1 = 0;
+       int ret;
+
+       ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
+       if (!ret)
+               *supported_versions = a0;
+
+       return ret;
+}
index 54156c48442484dcdcc78a027eac0173ab1cd62f..9d43d6bb9907ec323d9c1520720447bb7731c808 100644 (file)
@@ -179,5 +179,10 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
 int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
                        struct filter *data);
 int vnic_devcmd_init(struct vnic_dev *vdev);
+int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config);
+int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
+                                u16 vxlan_udp_port_number);
+int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature,
+                                      u64 *supported_versions);
 
 #endif /* _VNIC_DEV_H_ */
index 2a812880b884f35e8ebc51d971be3639c8f71c74..d83880b0d46852d51ddaec76f59fbed19cefba0c 100644 (file)
@@ -406,6 +406,31 @@ enum vnic_devcmd_cmd {
         * in: (u32) a0=Queue Pair number
         */
        CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
+
+       /* Use this devcmd for agreeing on the highest common version supported
+        * by both driver and fw for features who need such a facility.
+        * in:  (u64) a0 = feature (driver requests for the supported versions
+        *      on this feature)
+        * out: (u64) a0 = bitmap of all supported versions for that feature
+        */
+       CMD_GET_SUPP_FEATURE_VER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 69),
+
+       /* Control (Enable/Disable) overlay offloads on the given vnic
+        * in: (u8) a0 = OVERLAY_FEATURE_NVGRE : NVGRE
+        *          a0 = OVERLAY_FEATURE_VXLAN : VxLAN
+        * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable or
+        *          a1 = OVERLAY_OFFLOAD_DISABLE : Disable or
+        *          a1 = OVERLAY_OFFLOAD_ENABLE_V2 : Enable with version 2
+        */
+       CMD_OVERLAY_OFFLOAD_CTRL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72),
+
+       /* Configuration of overlay offloads feature on a given vNIC
+        * in: (u8) a0 = DEVCMD_OVERLAY_NVGRE : NVGRE
+        *          a0 = DEVCMD_OVERLAY_VXLAN : VxLAN
+        * in: (u8) a1 = VXLAN_PORT_UPDATE : VxLAN
+        * in: (u16) a2 = unsigned short int port information
+        */
+       CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
 };
 
 /* CMD_ENABLE2 flags */
@@ -657,4 +682,30 @@ struct devcmd2_result {
 #define DEVCMD2_RING_SIZE      32
 #define DEVCMD2_DESC_SIZE      128
 
+enum overlay_feature_t {
+       OVERLAY_FEATURE_NVGRE = 1,
+       OVERLAY_FEATURE_VXLAN,
+       OVERLAY_FEATURE_MAX,
+};
+
+enum overlay_ofld_cmd {
+       OVERLAY_OFFLOAD_ENABLE,
+       OVERLAY_OFFLOAD_DISABLE,
+       OVERLAY_OFFLOAD_ENABLE_P2,
+       OVERLAY_OFFLOAD_MAX,
+};
+
+#define OVERLAY_CFG_VXLAN_PORT_UPDATE  0
+
+/* Use this enum to get the supported versions for each of these features
+ * If you need to use the devcmd_get_supported_feature_version(), add
+ * the new feature into this enum and install function handler in devcmd.c
+ */
+enum vic_feature_t {
+       VIC_FEATURE_VXLAN,
+       VIC_FEATURE_RDMA,
+       VIC_FEATURE_VXLAN_PATCH,
+       VIC_FEATURE_MAX,
+};
+
 #endif /* _VNIC_DEVCMD_H_ */
index 75aced2de86987b6a96205ca6d079f4d5cfd77be..7d6fbb5635a47ca21fd2bd1274948bd4687bd221 100644 (file)
@@ -48,6 +48,7 @@ struct vnic_enet_config {
 #define VENETF_RSSHASH_IPV6_EX 0x200   /* Hash on IPv6 extended fields */
 #define VENETF_RSSHASH_TCPIPV6_EX 0x400        /* Hash on TCP + IPv6 ext. fields */
 #define VENETF_LOOP            0x800   /* Loopback enabled */
+#define VENETF_VXLAN           0x10000 /* VxLAN offload */
 
 #define VENET_INTR_TYPE_MIN    0       /* Timer specs min interrupt spacing */
 #define VENET_INTR_TYPE_IDLE   1       /* Timer specs idle time before irq */
index b9c82f143d7e099948c9bd5e540fee64eeb68b46..0413103ebe94a8436b43e4f9ad04b7800c4d1853 100644 (file)
@@ -92,9 +92,6 @@ struct vnic_rq {
        struct vnic_rq_buf *to_clean;
        void *os_buf_head;
        unsigned int pkts_outstanding;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       atomic_t bpoll_state;
-#endif /* CONFIG_NET_RX_BUSY_POLL */
 };
 
 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
@@ -207,81 +204,6 @@ static inline int vnic_rq_fill(struct vnic_rq *rq,
        return 0;
 }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
-{
-       atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
-}
-
-static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
-{
-       int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE,
-                               ENIC_POLL_STATE_NAPI);
-
-       return (rc == ENIC_POLL_STATE_IDLE);
-}
-
-static inline void enic_poll_unlock_napi(struct vnic_rq *rq,
-                                        struct napi_struct *napi)
-{
-       WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_NAPI);
-       napi_gro_flush(napi, false);
-       atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
-}
-
-static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
-{
-       int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE,
-                               ENIC_POLL_STATE_POLL);
-
-       return (rc == ENIC_POLL_STATE_IDLE);
-}
-
-
-static inline void enic_poll_unlock_poll(struct vnic_rq *rq)
-{
-       WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_POLL);
-       atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE);
-}
-
-static inline bool enic_poll_busy_polling(struct vnic_rq *rq)
-{
-       return atomic_read(&rq->bpoll_state) & ENIC_POLL_STATE_POLL;
-}
-
-#else
-
-static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
-{
-}
-
-static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
-{
-       return true;
-}
-
-static inline bool enic_poll_unlock_napi(struct vnic_rq *rq,
-                                        struct napi_struct *napi)
-{
-       return false;
-}
-
-static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
-{
-       return false;
-}
-
-static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
-{
-       return false;
-}
-
-static inline bool enic_poll_ll_polling(struct vnic_rq *rq)
-{
-       return false;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 void vnic_rq_free(struct vnic_rq *rq);
 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
        unsigned int desc_count, unsigned int desc_size);
index 57c17e797ae3a060fb6215be8bf7de372b54e1ac..127ce9707378c151c3037c92dcfc6eb8da16dcb3 100644 (file)
@@ -1485,95 +1485,104 @@ static void __de_get_regs(struct de_private *de, u8 *buf)
        de_rx_missed(de, rbuf[8]);
 }
 
-static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
+static int __de_get_link_ksettings(struct de_private *de,
+                                  struct ethtool_link_ksettings *cmd)
 {
-       ecmd->supported = de->media_supported;
-       ecmd->transceiver = XCVR_INTERNAL;
-       ecmd->phy_address = 0;
-       ecmd->advertising = de->media_advertise;
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               de->media_supported);
+       cmd->base.phy_address = 0;
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               de->media_advertise);
 
        switch (de->media_type) {
        case DE_MEDIA_AUI:
-               ecmd->port = PORT_AUI;
+               cmd->base.port = PORT_AUI;
                break;
        case DE_MEDIA_BNC:
-               ecmd->port = PORT_BNC;
+               cmd->base.port = PORT_BNC;
                break;
        default:
-               ecmd->port = PORT_TP;
+               cmd->base.port = PORT_TP;
                break;
        }
 
-       ethtool_cmd_speed_set(ecmd, 10);
+       cmd->base.speed = 10;
 
        if (dr32(MacMode) & FullDuplex)
-               ecmd->duplex = DUPLEX_FULL;
+               cmd->base.duplex = DUPLEX_FULL;
        else
-               ecmd->duplex = DUPLEX_HALF;
+               cmd->base.duplex = DUPLEX_HALF;
 
        if (de->media_lock)
-               ecmd->autoneg = AUTONEG_DISABLE;
+               cmd->base.autoneg = AUTONEG_DISABLE;
        else
-               ecmd->autoneg = AUTONEG_ENABLE;
+               cmd->base.autoneg = AUTONEG_ENABLE;
 
        /* ignore maxtxpkt, maxrxpkt for now */
 
        return 0;
 }
 
-static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
+static int __de_set_link_ksettings(struct de_private *de,
+                                  const struct ethtool_link_ksettings *cmd)
 {
        u32 new_media;
        unsigned int media_lock;
+       u8 duplex = cmd->base.duplex;
+       u8 port = cmd->base.port;
+       u8 autoneg = cmd->base.autoneg;
+       u32 advertising;
 
-       if (ethtool_cmd_speed(ecmd) != 10)
-               return -EINVAL;
-       if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
+
+       if (cmd->base.speed != 10)
                return -EINVAL;
-       if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
+       if (duplex != DUPLEX_HALF && duplex != DUPLEX_FULL)
                return -EINVAL;
-       if (de->de21040 && ecmd->port == PORT_BNC)
+       if (port != PORT_TP && port != PORT_AUI && port != PORT_BNC)
                return -EINVAL;
-       if (ecmd->transceiver != XCVR_INTERNAL)
+       if (de->de21040 && port == PORT_BNC)
                return -EINVAL;
-       if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
+       if (autoneg != AUTONEG_DISABLE && autoneg != AUTONEG_ENABLE)
                return -EINVAL;
-       if (ecmd->advertising & ~de->media_supported)
+       if (advertising & ~de->media_supported)
                return -EINVAL;
-       if (ecmd->autoneg == AUTONEG_ENABLE &&
-           (!(ecmd->advertising & ADVERTISED_Autoneg)))
+       if (autoneg == AUTONEG_ENABLE &&
+           (!(advertising & ADVERTISED_Autoneg)))
                return -EINVAL;
 
-       switch (ecmd->port) {
+       switch (port) {
        case PORT_AUI:
                new_media = DE_MEDIA_AUI;
-               if (!(ecmd->advertising & ADVERTISED_AUI))
+               if (!(advertising & ADVERTISED_AUI))
                        return -EINVAL;
                break;
        case PORT_BNC:
                new_media = DE_MEDIA_BNC;
-               if (!(ecmd->advertising & ADVERTISED_BNC))
+               if (!(advertising & ADVERTISED_BNC))
                        return -EINVAL;
                break;
        default:
-               if (ecmd->autoneg == AUTONEG_ENABLE)
+               if (autoneg == AUTONEG_ENABLE)
                        new_media = DE_MEDIA_TP_AUTO;
-               else if (ecmd->duplex == DUPLEX_FULL)
+               else if (duplex == DUPLEX_FULL)
                        new_media = DE_MEDIA_TP_FD;
                else
                        new_media = DE_MEDIA_TP;
-               if (!(ecmd->advertising & ADVERTISED_TP))
+               if (!(advertising & ADVERTISED_TP))
                        return -EINVAL;
-               if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
+               if (!(advertising & (ADVERTISED_10baseT_Full |
+                                    ADVERTISED_10baseT_Half)))
                        return -EINVAL;
                break;
        }
 
-       media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
+       media_lock = (autoneg == AUTONEG_ENABLE) ? 0 : 1;
 
        if ((new_media == de->media_type) &&
            (media_lock == de->media_lock) &&
-           (ecmd->advertising == de->media_advertise))
+           (advertising == de->media_advertise))
                return 0; /* nothing to change */
 
        de_link_down(de);
@@ -1582,7 +1591,7 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
 
        de->media_type = new_media;
        de->media_lock = media_lock;
-       de->media_advertise = ecmd->advertising;
+       de->media_advertise = advertising;
        de_set_media(de);
        if (netif_running(de->dev))
                de_start_rxtx(de);
@@ -1604,25 +1613,27 @@ static int de_get_regs_len(struct net_device *dev)
        return DE_REGS_SIZE;
 }
 
-static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int de_get_link_ksettings(struct net_device *dev,
+                                struct ethtool_link_ksettings *cmd)
 {
        struct de_private *de = netdev_priv(dev);
        int rc;
 
        spin_lock_irq(&de->lock);
-       rc = __de_get_settings(de, ecmd);
+       rc = __de_get_link_ksettings(de, cmd);
        spin_unlock_irq(&de->lock);
 
        return rc;
 }
 
-static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int de_set_link_ksettings(struct net_device *dev,
+                                const struct ethtool_link_ksettings *cmd)
 {
        struct de_private *de = netdev_priv(dev);
        int rc;
 
        spin_lock_irq(&de->lock);
-       rc = __de_set_settings(de, ecmd);
+       rc = __de_set_link_ksettings(de, cmd);
        spin_unlock_irq(&de->lock);
 
        return rc;
@@ -1690,13 +1701,13 @@ static const struct ethtool_ops de_ethtool_ops = {
        .get_link               = ethtool_op_get_link,
        .get_drvinfo            = de_get_drvinfo,
        .get_regs_len           = de_get_regs_len,
-       .get_settings           = de_get_settings,
-       .set_settings           = de_set_settings,
        .get_msglevel           = de_get_msglevel,
        .set_msglevel           = de_set_msglevel,
        .get_eeprom             = de_get_eeprom,
        .nway_reset             = de_nway_reset,
        .get_regs               = de_get_regs,
+       .get_link_ksettings     = de_get_link_ksettings,
+       .set_link_ksettings     = de_set_link_ksettings,
 };
 
 static void de21040_get_mac_address(struct de_private *de)
index 92306b320840289e710502dcee938282c4a92e5e..ba6ae24acf62238e22c162f9ee294159f41d5cc0 100644 (file)
@@ -319,8 +319,8 @@ int tulip_poll(struct napi_struct *napi, int budget)
 
          /* Remove us from polling list and enable RX intr. */
 
-         napi_complete(napi);
-         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
+       napi_complete_done(napi, work_done);
+       iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
 
          /* The last op happens after poll completion. Which means the following:
           * 1. it can race with disabling irqs in irq handler
@@ -355,7 +355,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
           * before we did napi_complete(). See? We would lose it. */
 
          /* remove ourselves from the polling list */
-         napi_complete(napi);
+         napi_complete_done(napi, work_done);
 
          return work_done;
 }
index f82ebe5d89ee726851678d6972c42e01945d2503..8d98b259d1baa21da41a278b4614fd2d4f4b749d 100644 (file)
@@ -926,48 +926,53 @@ static void uli526x_set_filter_mode(struct net_device * dev)
 }
 
 static void
-ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
+ULi_ethtool_get_link_ksettings(struct uli526x_board_info *db,
+                              struct ethtool_link_ksettings *cmd)
 {
-       ecmd->supported = (SUPPORTED_10baseT_Half |
+       u32 supported, advertising;
+
+       supported = (SUPPORTED_10baseT_Half |
                           SUPPORTED_10baseT_Full |
                           SUPPORTED_100baseT_Half |
                           SUPPORTED_100baseT_Full |
                           SUPPORTED_Autoneg |
                           SUPPORTED_MII);
 
-       ecmd->advertising = (ADVERTISED_10baseT_Half |
+       advertising = (ADVERTISED_10baseT_Half |
                           ADVERTISED_10baseT_Full |
                           ADVERTISED_100baseT_Half |
                           ADVERTISED_100baseT_Full |
                           ADVERTISED_Autoneg |
                           ADVERTISED_MII);
 
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
 
-       ecmd->port = PORT_MII;
-       ecmd->phy_address = db->phy_addr;
-
-       ecmd->transceiver = XCVR_EXTERNAL;
+       cmd->base.port = PORT_MII;
+       cmd->base.phy_address = db->phy_addr;
 
-       ethtool_cmd_speed_set(ecmd, SPEED_10);
-       ecmd->duplex = DUPLEX_HALF;
+       cmd->base.speed = SPEED_10;
+       cmd->base.duplex = DUPLEX_HALF;
 
        if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
        {
-               ethtool_cmd_speed_set(ecmd, SPEED_100);
+               cmd->base.speed = SPEED_100;
        }
        if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
        {
-               ecmd->duplex = DUPLEX_FULL;
+               cmd->base.duplex = DUPLEX_FULL;
        }
        if(db->link_failed)
        {
-               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
-               ecmd->duplex = DUPLEX_UNKNOWN;
+               cmd->base.speed = SPEED_UNKNOWN;
+               cmd->base.duplex = DUPLEX_UNKNOWN;
        }
 
        if (db->media_mode & ULI526X_AUTO)
        {
-               ecmd->autoneg = AUTONEG_ENABLE;
+               cmd->base.autoneg = AUTONEG_ENABLE;
        }
 }
 
@@ -981,10 +986,12 @@ static void netdev_get_drvinfo(struct net_device *dev,
        strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
 }
 
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
+static int netdev_get_link_ksettings(struct net_device *dev,
+                                    struct ethtool_link_ksettings *cmd)
+{
        struct uli526x_board_info *np = netdev_priv(dev);
 
-       ULi_ethtool_gset(np, cmd);
+       ULi_ethtool_get_link_ksettings(np, cmd);
 
        return 0;
 }
@@ -1006,9 +1013,9 @@ static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 
 static const struct ethtool_ops netdev_ethtool_ops = {
        .get_drvinfo            = netdev_get_drvinfo,
-       .get_settings           = netdev_get_settings,
        .get_link               = netdev_get_link,
        .get_wol                = uli526x_get_wol,
+       .get_link_ksettings     = netdev_get_link_ksettings,
 };
 
 /*
index bc9bf88e5831a8ca7b3286a1a97c3b6b01ca6eb7..d1f2f3cc7cfaa760a826fb8820c91c4a07ed18cd 100644 (file)
@@ -1391,25 +1391,27 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+                                    struct ethtool_link_ksettings *cmd)
 {
        struct netdev_private *np = netdev_priv(dev);
        int rc;
 
        spin_lock_irq(&np->lock);
-       rc = mii_ethtool_gset(&np->mii_if, cmd);
+       rc = mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
        spin_unlock_irq(&np->lock);
 
        return rc;
 }
 
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+                                    const struct ethtool_link_ksettings *cmd)
 {
        struct netdev_private *np = netdev_priv(dev);
        int rc;
 
        spin_lock_irq(&np->lock);
-       rc = mii_ethtool_sset(&np->mii_if, cmd);
+       rc = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
        spin_unlock_irq(&np->lock);
 
        return rc;
@@ -1439,12 +1441,12 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value)
 
 static const struct ethtool_ops netdev_ethtool_ops = {
        .get_drvinfo            = netdev_get_drvinfo,
-       .get_settings           = netdev_get_settings,
-       .set_settings           = netdev_set_settings,
        .nway_reset             = netdev_nway_reset,
        .get_link               = netdev_get_link,
        .get_msglevel           = netdev_get_msglevel,
        .set_msglevel           = netdev_set_msglevel,
+       .get_link_ksettings     = netdev_get_link_ksettings,
+       .set_link_ksettings     = netdev_set_link_ksettings,
 };
 
 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
index 8c95a8a81e3c237b61a2d564951819ea88f68eb7..1e350135f11d9a3b13b8e0300f063d927f65a365 100644 (file)
@@ -1256,52 +1256,63 @@ static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
        strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
 }
 
-static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rio_get_link_ksettings(struct net_device *dev,
+                                 struct ethtool_link_ksettings *cmd)
 {
        struct netdev_private *np = netdev_priv(dev);
+       u32 supported, advertising;
+
        if (np->phy_media) {
                /* fiber device */
-               cmd->supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE;
-               cmd->advertising= ADVERTISED_Autoneg | ADVERTISED_FIBRE;
-               cmd->port = PORT_FIBRE;
-               cmd->transceiver = XCVR_INTERNAL;
+               supported = SUPPORTED_Autoneg | SUPPORTED_FIBRE;
+               advertising = ADVERTISED_Autoneg | ADVERTISED_FIBRE;
+               cmd->base.port = PORT_FIBRE;
        } else {
                /* copper device */
-               cmd->supported = SUPPORTED_10baseT_Half |
+               supported = SUPPORTED_10baseT_Half |
                        SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half
                        | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full |
                        SUPPORTED_Autoneg | SUPPORTED_MII;
-               cmd->advertising = ADVERTISED_10baseT_Half |
+               advertising = ADVERTISED_10baseT_Half |
                        ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half |
-                       ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full|
+                       ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full |
                        ADVERTISED_Autoneg | ADVERTISED_MII;
-               cmd->port = PORT_MII;
-               cmd->transceiver = XCVR_INTERNAL;
+               cmd->base.port = PORT_MII;
        }
-       if ( np->link_status ) {
-               ethtool_cmd_speed_set(cmd, np->speed);
-               cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+       if (np->link_status) {
+               cmd->base.speed = np->speed;
+               cmd->base.duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
        } else {
-               ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
-               cmd->duplex = DUPLEX_UNKNOWN;
+               cmd->base.speed = SPEED_UNKNOWN;
+               cmd->base.duplex = DUPLEX_UNKNOWN;
        }
-       if ( np->an_enable)
-               cmd->autoneg = AUTONEG_ENABLE;
+       if (np->an_enable)
+               cmd->base.autoneg = AUTONEG_ENABLE;
        else
-               cmd->autoneg = AUTONEG_DISABLE;
+               cmd->base.autoneg = AUTONEG_DISABLE;
+
+       cmd->base.phy_address = np->phy_addr;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
 
-       cmd->phy_address = np->phy_addr;
        return 0;
 }
 
-static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int rio_set_link_ksettings(struct net_device *dev,
+                                 const struct ethtool_link_ksettings *cmd)
 {
        struct netdev_private *np = netdev_priv(dev);
+       u32 speed = cmd->base.speed;
+       u8 duplex = cmd->base.duplex;
+
        netif_carrier_off(dev);
-       if (cmd->autoneg == AUTONEG_ENABLE) {
-               if (np->an_enable)
+       if (cmd->base.autoneg == AUTONEG_ENABLE) {
+               if (np->an_enable) {
                        return 0;
-               else {
+               else {
                        np->an_enable = 1;
                        mii_set_media(dev);
                        return 0;
@@ -1309,18 +1320,18 @@ static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        } else {
                np->an_enable = 0;
                if (np->speed == 1000) {
-                       ethtool_cmd_speed_set(cmd, SPEED_100);
-                       cmd->duplex = DUPLEX_FULL;
+                       speed = SPEED_100;
+                       duplex = DUPLEX_FULL;
                        printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
                }
-               switch (ethtool_cmd_speed(cmd)) {
+               switch (speed) {
                case SPEED_10:
                        np->speed = 10;
-                       np->full_duplex = (cmd->duplex == DUPLEX_FULL);
+                       np->full_duplex = (duplex == DUPLEX_FULL);
                        break;
                case SPEED_100:
                        np->speed = 100;
-                       np->full_duplex = (cmd->duplex == DUPLEX_FULL);
+                       np->full_duplex = (duplex == DUPLEX_FULL);
                        break;
                case SPEED_1000: /* not supported */
                default:
@@ -1339,9 +1350,9 @@ static u32 rio_get_link(struct net_device *dev)
 
 static const struct ethtool_ops ethtool_ops = {
        .get_drvinfo = rio_get_drvinfo,
-       .get_settings = rio_get_settings,
-       .set_settings = rio_set_settings,
        .get_link = rio_get_link,
+       .get_link_ksettings = rio_get_link_ksettings,
+       .set_link_ksettings = rio_set_link_ksettings,
 };
 
 static int
index 2e5b66762e152fb1d68ba3fea4bcfac0cc06e47e..2704bcf023be771872b14ee71a04acbdea0446cf 100644 (file)
@@ -1664,21 +1664,23 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
-static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int get_link_ksettings(struct net_device *dev,
+                             struct ethtool_link_ksettings *cmd)
 {
        struct netdev_private *np = netdev_priv(dev);
        spin_lock_irq(&np->lock);
-       mii_ethtool_gset(&np->mii_if, ecmd);
+       mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
        spin_unlock_irq(&np->lock);
        return 0;
 }
 
-static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int set_link_ksettings(struct net_device *dev,
+                             const struct ethtool_link_ksettings *cmd)
 {
        struct netdev_private *np = netdev_priv(dev);
        int res;
        spin_lock_irq(&np->lock);
-       res = mii_ethtool_sset(&np->mii_if, ecmd);
+       res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
        spin_unlock_irq(&np->lock);
        return res;
 }
@@ -1800,8 +1802,6 @@ static int sundance_set_wol(struct net_device *dev,
 static const struct ethtool_ops ethtool_ops = {
        .begin = check_if_running,
        .get_drvinfo = get_drvinfo,
-       .get_settings = get_settings,
-       .set_settings = set_settings,
        .nway_reset = nway_reset,
        .get_link = get_link,
        .get_wol = sundance_get_wol,
@@ -1811,6 +1811,8 @@ static const struct ethtool_ops ethtool_ops = {
        .get_strings = get_strings,
        .get_sset_count = get_sset_count,
        .get_ethtool_stats = get_ethtool_stats,
+       .get_link_ksettings = get_link_ksettings,
+       .set_link_ksettings = set_link_ksettings,
 };
 
 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
index 2a17c59f69f9fb0208f7bf6c23b88af1f082ea0a..3e77dd863175f7a6c5d5c31cbcb922589c00fafd 100644 (file)
@@ -415,7 +415,7 @@ static int dnet_poll(struct napi_struct *napi, int budget)
                /* We processed all packets available.  Tell NAPI it can
                 * stop polling then re-enable rx interrupts.
                 */
-               napi_complete(napi);
+               napi_complete_done(napi, npackets);
                int_enable = dnet_readl(bp, INTR_ENB);
                int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
                dnet_writel(bp, int_enable, INTR_ENB);
index 7bf78a0d322cc0efe9c4c793359ae5980b55e29d..278f139f2a22355dbb49674b28f6f54609c195cc 100644 (file)
@@ -457,7 +457,7 @@ static int ec_bhf_stop(struct net_device *net_dev)
        return 0;
 }
 
-static struct rtnl_link_stats64 *
+static void
 ec_bhf_get_stats(struct net_device *net_dev,
                 struct rtnl_link_stats64 *stats)
 {
@@ -472,8 +472,6 @@ ec_bhf_get_stats(struct net_device *net_dev,
 
        stats->tx_bytes = priv->stat_tx_bytes;
        stats->rx_bytes = priv->stat_rx_bytes;
-
-       return stats;
 }
 
 static const struct net_device_ops ec_bhf_netdev_ops = {
index 4c30c44b242e67e59d52f5cc00a230ba81ba68d4..d49528ad7821d202355ef9b1a18d58e56794b162 100644 (file)
@@ -226,11 +226,6 @@ struct be_aic_obj {                /* Adaptive interrupt coalescing (AIC) info */
        u64 tx_reqs_prev;       /* Used to calculate TX pps */
 };
 
-enum {
-       NAPI_POLLING,
-       BUSY_POLLING
-};
-
 struct be_mcc_obj {
        struct be_queue_info q;
        struct be_queue_info cq;
index 0a48a31225e6430e4f732d46723cdffdaf0c439c..7d1819c9e8ccf5093c331ee610cf3678323cd04b 100644 (file)
@@ -606,7 +606,8 @@ bool be_pause_supported(struct be_adapter *adapter)
                false : true;
 }
 
-static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+static int be_get_link_ksettings(struct net_device *netdev,
+                                struct ethtool_link_ksettings *cmd)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        u8 link_status;
@@ -614,13 +615,14 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
        int status;
        u32 auto_speeds;
        u32 fixed_speeds;
+       u32 supported = 0, advertising = 0;
 
        if (adapter->phy.link_speed < 0) {
                status = be_cmd_link_status_query(adapter, &link_speed,
                                                  &link_status, 0);
                if (!status)
                        be_link_status_update(adapter, link_status);
-               ethtool_cmd_speed_set(ecmd, link_speed);
+               cmd->base.speed = link_speed;
 
                status = be_cmd_get_phy_info(adapter);
                if (!status) {
@@ -629,58 +631,51 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 
                        be_cmd_query_cable_type(adapter);
 
-                       ecmd->supported =
+                       supported =
                                convert_to_et_setting(adapter,
                                                      auto_speeds |
                                                      fixed_speeds);
-                       ecmd->advertising =
+                       advertising =
                                convert_to_et_setting(adapter, auto_speeds);
 
-                       ecmd->port = be_get_port_type(adapter);
+                       cmd->base.port = be_get_port_type(adapter);
 
                        if (adapter->phy.auto_speeds_supported) {
-                               ecmd->supported |= SUPPORTED_Autoneg;
-                               ecmd->autoneg = AUTONEG_ENABLE;
-                               ecmd->advertising |= ADVERTISED_Autoneg;
+                               supported |= SUPPORTED_Autoneg;
+                               cmd->base.autoneg = AUTONEG_ENABLE;
+                               advertising |= ADVERTISED_Autoneg;
                        }
 
-                       ecmd->supported |= SUPPORTED_Pause;
+                       supported |= SUPPORTED_Pause;
                        if (be_pause_supported(adapter))
-                               ecmd->advertising |= ADVERTISED_Pause;
-
-                       switch (adapter->phy.interface_type) {
-                       case PHY_TYPE_KR_10GB:
-                       case PHY_TYPE_KX4_10GB:
-                               ecmd->transceiver = XCVR_INTERNAL;
-                               break;
-                       default:
-                               ecmd->transceiver = XCVR_EXTERNAL;
-                               break;
-                       }
+                               advertising |= ADVERTISED_Pause;
                } else {
-                       ecmd->port = PORT_OTHER;
-                       ecmd->autoneg = AUTONEG_DISABLE;
-                       ecmd->transceiver = XCVR_DUMMY1;
+                       cmd->base.port = PORT_OTHER;
+                       cmd->base.autoneg = AUTONEG_DISABLE;
                }
 
                /* Save for future use */
-               adapter->phy.link_speed = ethtool_cmd_speed(ecmd);
-               adapter->phy.port_type = ecmd->port;
-               adapter->phy.transceiver = ecmd->transceiver;
-               adapter->phy.autoneg = ecmd->autoneg;
-               adapter->phy.advertising = ecmd->advertising;
-               adapter->phy.supported = ecmd->supported;
+               adapter->phy.link_speed = cmd->base.speed;
+               adapter->phy.port_type = cmd->base.port;
+               adapter->phy.autoneg = cmd->base.autoneg;
+               adapter->phy.advertising = advertising;
+               adapter->phy.supported = supported;
        } else {
-               ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed);
-               ecmd->port = adapter->phy.port_type;
-               ecmd->transceiver = adapter->phy.transceiver;
-               ecmd->autoneg = adapter->phy.autoneg;
-               ecmd->advertising = adapter->phy.advertising;
-               ecmd->supported = adapter->phy.supported;
+               cmd->base.speed = adapter->phy.link_speed;
+               cmd->base.port = adapter->phy.port_type;
+               cmd->base.autoneg = adapter->phy.autoneg;
+               advertising = adapter->phy.advertising;
+               supported = adapter->phy.supported;
        }
 
-       ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN;
-       ecmd->phy_address = adapter->port_num;
+       cmd->base.duplex = netif_carrier_ok(netdev) ?
+               DUPLEX_FULL : DUPLEX_UNKNOWN;
+       cmd->base.phy_address = adapter->port_num;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
 
        return 0;
 }
@@ -1399,7 +1394,6 @@ static int be_set_priv_flags(struct net_device *netdev, u32 flags)
 }
 
 const struct ethtool_ops be_ethtool_ops = {
-       .get_settings = be_get_settings,
        .get_drvinfo = be_get_drvinfo,
        .get_wol = be_get_wol,
        .set_wol = be_set_wol,
@@ -1433,5 +1427,6 @@ const struct ethtool_ops be_ethtool_ops = {
        .get_channels = be_get_channels,
        .set_channels = be_set_channels,
        .get_module_info = be_get_module_info,
-       .get_module_eeprom = be_get_module_eeprom
+       .get_module_eeprom = be_get_module_eeprom,
+       .get_link_ksettings = be_get_link_ksettings,
 };
index cd49a54c538d5202f1bb0cb632b8fdb306a66989..6be3b9aba8ed38875574f9e2fa018e80cc948cab 100644 (file)
@@ -647,8 +647,8 @@ void be_parse_stats(struct be_adapter *adapter)
        }
 }
 
-static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
-                                               struct rtnl_link_stats64 *stats)
+static void be_get_stats64(struct net_device *netdev,
+                          struct rtnl_link_stats64 *stats)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
        struct be_drv_stats *drvs = &adapter->drv_stats;
@@ -712,7 +712,6 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
        stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
                                drvs->rx_input_fifo_overflow_drop +
                                drvs->rx_drops_no_pbuf;
-       return stats;
 }
 
 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
@@ -3064,7 +3063,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
 }
 
 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
-                        int budget, int polling)
+                        int budget)
 {
        struct be_adapter *adapter = rxo->adapter;
        struct be_queue_info *rx_cq = &rxo->cq;
@@ -3096,8 +3095,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
                        goto loop_continue;
                }
 
-               /* Don't do gro when we're busy_polling */
-               if (do_gro(rxcp) && polling != BUSY_POLLING)
+               if (do_gro(rxcp))
                        be_rx_compl_process_gro(rxo, napi, rxcp);
                else
                        be_rx_compl_process(rxo, napi, rxcp);
@@ -3195,106 +3193,6 @@ static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
        }
 }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline bool be_lock_napi(struct be_eq_obj *eqo)
-{
-       bool status = true;
-
-       spin_lock(&eqo->lock); /* BH is already disabled */
-       if (eqo->state & BE_EQ_LOCKED) {
-               WARN_ON(eqo->state & BE_EQ_NAPI);
-               eqo->state |= BE_EQ_NAPI_YIELD;
-               status = false;
-       } else {
-               eqo->state = BE_EQ_NAPI;
-       }
-       spin_unlock(&eqo->lock);
-       return status;
-}
-
-static inline void be_unlock_napi(struct be_eq_obj *eqo)
-{
-       spin_lock(&eqo->lock); /* BH is already disabled */
-
-       WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
-       eqo->state = BE_EQ_IDLE;
-
-       spin_unlock(&eqo->lock);
-}
-
-static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
-{
-       bool status = true;
-
-       spin_lock_bh(&eqo->lock);
-       if (eqo->state & BE_EQ_LOCKED) {
-               eqo->state |= BE_EQ_POLL_YIELD;
-               status = false;
-       } else {
-               eqo->state |= BE_EQ_POLL;
-       }
-       spin_unlock_bh(&eqo->lock);
-       return status;
-}
-
-static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
-{
-       spin_lock_bh(&eqo->lock);
-
-       WARN_ON(eqo->state & (BE_EQ_NAPI));
-       eqo->state = BE_EQ_IDLE;
-
-       spin_unlock_bh(&eqo->lock);
-}
-
-static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
-{
-       spin_lock_init(&eqo->lock);
-       eqo->state = BE_EQ_IDLE;
-}
-
-static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
-{
-       local_bh_disable();
-
-       /* It's enough to just acquire napi lock on the eqo to stop
-        * be_busy_poll() from processing any queueus.
-        */
-       while (!be_lock_napi(eqo))
-               mdelay(1);
-
-       local_bh_enable();
-}
-
-#else /* CONFIG_NET_RX_BUSY_POLL */
-
-static inline bool be_lock_napi(struct be_eq_obj *eqo)
-{
-       return true;
-}
-
-static inline void be_unlock_napi(struct be_eq_obj *eqo)
-{
-}
-
-static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
-{
-       return false;
-}
-
-static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
-{
-}
-
-static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
-{
-}
-
-static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
-{
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 int be_poll(struct napi_struct *napi, int budget)
 {
        struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
@@ -3309,25 +3207,20 @@ int be_poll(struct napi_struct *napi, int budget)
        for_all_tx_queues_on_eq(adapter, eqo, txo, i)
                be_process_tx(adapter, txo, i);
 
-       if (be_lock_napi(eqo)) {
-               /* This loop will iterate twice for EQ0 in which
-                * completions of the last RXQ (default one) are also processed
-                * For other EQs the loop iterates only once
-                */
-               for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
-                       work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
-                       max_work = max(work, max_work);
-               }
-               be_unlock_napi(eqo);
-       } else {
-               max_work = budget;
+       /* This loop will iterate twice for EQ0 in which
+        * completions of the last RXQ (default one) are also processed
+        * For other EQs the loop iterates only once
+        */
+       for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
+               work = be_process_rx(rxo, napi, budget);
+               max_work = max(work, max_work);
        }
 
        if (is_mcc_eqo(eqo))
                be_process_mcc(adapter);
 
        if (max_work < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, max_work);
 
                /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
                 * delay via a delay multiplier encoding value
@@ -3344,28 +3237,6 @@ int be_poll(struct napi_struct *napi, int budget)
        return max_work;
 }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static int be_busy_poll(struct napi_struct *napi)
-{
-       struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
-       struct be_adapter *adapter = eqo->adapter;
-       struct be_rx_obj *rxo;
-       int i, work = 0;
-
-       if (!be_lock_busy_poll(eqo))
-               return LL_FLUSH_BUSY;
-
-       for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
-               work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
-               if (work)
-                       break;
-       }
-
-       be_unlock_busy_poll(eqo);
-       return work;
-}
-#endif
-
 void be_detect_error(struct be_adapter *adapter)
 {
        u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
@@ -3670,7 +3541,6 @@ static int be_close(struct net_device *netdev)
        if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
                for_all_evt_queues(adapter, eqo, i) {
                        napi_disable(&eqo->napi);
-                       be_disable_busy_poll(eqo);
                }
                adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
        }
@@ -3840,7 +3710,6 @@ static int be_open(struct net_device *netdev)
 
        for_all_evt_queues(adapter, eqo, i) {
                napi_enable(&eqo->napi);
-               be_enable_busy_poll(eqo);
                be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
        }
        adapter->flags |= BE_FLAGS_NAPI_ENABLED;
@@ -5246,9 +5115,6 @@ static const struct net_device_ops be_netdev_ops = {
 #endif
        .ndo_bridge_setlink     = be_ndo_bridge_setlink,
        .ndo_bridge_getlink     = be_ndo_bridge_getlink,
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       .ndo_busy_poll          = be_busy_poll,
-#endif
        .ndo_udp_tunnel_add     = be_add_vxlan_port,
        .ndo_udp_tunnel_del     = be_del_vxlan_port,
        .ndo_features_check     = be_features_check,
index 45abc81f6f55efe7644897ea36fa3cd9dfa40e4c..23d82748f52b9aa19ecd17226444f3326dfaaed4 100644 (file)
@@ -180,8 +180,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
  * struct ethoc - driver-private device structure
  * @iobase:    pointer to I/O memory region
  * @membase:   pointer to buffer memory region
- * @dma_alloc: dma allocated buffer size
- * @io_region_size:    I/O memory region size
  * @num_bd:    number of buffer descriptors
  * @num_tx:    number of send buffers
  * @cur_tx:    last send buffer written
@@ -199,8 +197,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
 struct ethoc {
        void __iomem *iobase;
        void __iomem *membase;
-       int dma_alloc;
-       resource_size_t io_region_size;
        bool big_endian;
 
        unsigned int num_bd;
@@ -618,7 +614,7 @@ static int ethoc_poll(struct napi_struct *napi, int budget)
        tx_work_done = ethoc_tx(priv->netdev, budget);
 
        if (rx_work_done < budget && tx_work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rx_work_done);
                ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
        }
 
@@ -999,7 +995,7 @@ static int ethoc_set_ringparam(struct net_device *dev,
        return 0;
 }
 
-const struct ethtool_ops ethoc_ethtool_ops = {
+static const struct ethtool_ops ethoc_ethtool_ops = {
        .get_regs_len = ethoc_get_regs_len,
        .get_regs = ethoc_get_regs,
        .nway_reset = phy_ethtool_nway_reset,
@@ -1035,7 +1031,6 @@ static int ethoc_probe(struct platform_device *pdev)
        struct ethoc *priv = NULL;
        int num_bd;
        int ret = 0;
-       bool random_mac = false;
        struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
        u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0;
 
@@ -1096,8 +1091,6 @@ static int ethoc_probe(struct platform_device *pdev)
        /* setup driver-private data */
        priv = netdev_priv(netdev);
        priv->netdev = netdev;
-       priv->dma_alloc = 0;
-       priv->io_region_size = resource_size(mmio);
 
        priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
                        resource_size(mmio));
@@ -1127,7 +1120,6 @@ static int ethoc_probe(struct platform_device *pdev)
                        goto free;
                }
                netdev->mem_end = netdev->mem_start + buffer_size;
-               priv->dma_alloc = buffer_size;
        }
 
        priv->big_endian = pdata ? pdata->big_endian :
@@ -1176,16 +1168,11 @@ static int ethoc_probe(struct platform_device *pdev)
        /* Check the MAC again for validity, if it still isn't choose and
         * program a random one.
         */
-       if (!is_valid_ether_addr(netdev->dev_addr)) {
-               eth_random_addr(netdev->dev_addr);
-               random_mac = true;
-       }
+       if (!is_valid_ether_addr(netdev->dev_addr))
+               eth_hw_addr_random(netdev);
 
        ethoc_do_set_mac_address(netdev);
 
-       if (random_mac)
-               netdev->addr_assign_type = NET_ADDR_RANDOM;
-
        /* Allow the platform setup code to adjust MII management bus clock. */
        if (!eth_clkfreq) {
                struct clk *clk = devm_clk_get(&pdev->dev, NULL);
index 223f35cc034cf4f9846856d1cbb4c551a0014747..992ebe973d25bfbccff7b5c42dc1801ea41fc9ea 100644 (file)
@@ -192,7 +192,7 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
        if (work_done < budget) {
                u32 buf_int_enable_value = 0;
 
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                /* set tx_done and rx_rdy bits */
                buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
index dce5f7b7f7729225d9bb9456a8c5d7e69be3bb2a..c0ddbbe6c2268939bf994bf6d2559f8995d26a84 100644 (file)
@@ -825,16 +825,18 @@ static void ftmac100_get_drvinfo(struct net_device *netdev,
        strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
 }
 
-static int ftmac100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+static int ftmac100_get_link_ksettings(struct net_device *netdev,
+                                      struct ethtool_link_ksettings *cmd)
 {
        struct ftmac100 *priv = netdev_priv(netdev);
-       return mii_ethtool_gset(&priv->mii, cmd);
+       return mii_ethtool_get_link_ksettings(&priv->mii, cmd);
 }
 
-static int ftmac100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+static int ftmac100_set_link_ksettings(struct net_device *netdev,
+                                      const struct ethtool_link_ksettings *cmd)
 {
        struct ftmac100 *priv = netdev_priv(netdev);
-       return mii_ethtool_sset(&priv->mii, cmd);
+       return mii_ethtool_set_link_ksettings(&priv->mii, cmd);
 }
 
 static int ftmac100_nway_reset(struct net_device *netdev)
@@ -850,11 +852,11 @@ static u32 ftmac100_get_link(struct net_device *netdev)
 }
 
 static const struct ethtool_ops ftmac100_ethtool_ops = {
-       .set_settings           = ftmac100_set_settings,
-       .get_settings           = ftmac100_get_settings,
        .get_drvinfo            = ftmac100_get_drvinfo,
        .nway_reset             = ftmac100_nway_reset,
        .get_link               = ftmac100_get_link,
+       .get_link_ksettings     = ftmac100_get_link_ksettings,
+       .set_link_ksettings     = ftmac100_set_link_ksettings,
 };
 
 /******************************************************************************
index 9cb436cb37454c78c75e5724a7b51a119c654b45..766636a7c25e23167e5185cbe6f2c6bd6d9da043 100644 (file)
@@ -1817,25 +1817,27 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+                                    struct ethtool_link_ksettings *cmd)
 {
        struct netdev_private *np = netdev_priv(dev);
        int rc;
 
        spin_lock_irq(&np->lock);
-       rc = mii_ethtool_gset(&np->mii, cmd);
+       rc = mii_ethtool_get_link_ksettings(&np->mii, cmd);
        spin_unlock_irq(&np->lock);
 
        return rc;
 }
 
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+                                    const struct ethtool_link_ksettings *cmd)
 {
        struct netdev_private *np = netdev_priv(dev);
        int rc;
 
        spin_lock_irq(&np->lock);
-       rc = mii_ethtool_sset(&np->mii, cmd);
+       rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
        spin_unlock_irq(&np->lock);
 
        return rc;
@@ -1865,12 +1867,12 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value)
 
 static const struct ethtool_ops netdev_ethtool_ops = {
        .get_drvinfo            = netdev_get_drvinfo,
-       .get_settings           = netdev_get_settings,
-       .set_settings           = netdev_set_settings,
        .nway_reset             = netdev_nway_reset,
        .get_link               = netdev_get_link,
        .get_msglevel           = netdev_get_msglevel,
        .set_msglevel           = netdev_set_msglevel,
+       .get_link_ksettings     = netdev_get_link_ksettings,
+       .set_link_ksettings     = netdev_set_link_ksettings,
 };
 
 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
index 726b5693ae8a5a3a9364e5de13195d36c5b1d06f..bc5a3347fd4aaf8f5166f17e379f660186bb7f49 100644 (file)
@@ -313,8 +313,8 @@ static void dpaa_tx_timeout(struct net_device *net_dev)
 /* Calculates the statistics for the given device by adding the statistics
  * collected by each CPU.
  */
-static struct rtnl_link_stats64 *dpaa_get_stats64(struct net_device *net_dev,
-                                                 struct rtnl_link_stats64 *s)
+static void dpaa_get_stats64(struct net_device *net_dev,
+                            struct rtnl_link_stats64 *s)
 {
        int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
        struct dpaa_priv *priv = netdev_priv(net_dev);
@@ -332,8 +332,6 @@ static struct rtnl_link_stats64 *dpaa_get_stats64(struct net_device *net_dev,
                for (j = 0; j < numstats; j++)
                        netstats[j] += cpustats[j];
        }
-
-       return s;
 }
 
 static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
@@ -2003,7 +2001,7 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
        int cleaned = qman_p_poll_dqrr(np->p, budget);
 
        if (cleaned < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, cleaned);
                qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
 
        } else if (np->down) {
index 27e7044667d1f0dc5c7da518c509dd4b7a3812f3..15571e251fb910c3f17d70c643c33ef894438a58 100644 (file)
@@ -72,8 +72,8 @@ static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
 #define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
 #define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
 
-static int dpaa_get_settings(struct net_device *net_dev,
-                            struct ethtool_cmd *et_cmd)
+static int dpaa_get_link_ksettings(struct net_device *net_dev,
+                                  struct ethtool_link_ksettings *cmd)
 {
        int err;
 
@@ -82,13 +82,13 @@ static int dpaa_get_settings(struct net_device *net_dev,
                return 0;
        }
 
-       err = phy_ethtool_gset(net_dev->phydev, et_cmd);
+       err = phy_ethtool_ksettings_get(net_dev->phydev, cmd);
 
        return err;
 }
 
-static int dpaa_set_settings(struct net_device *net_dev,
-                            struct ethtool_cmd *et_cmd)
+static int dpaa_set_link_ksettings(struct net_device *net_dev,
+                                  const struct ethtool_link_ksettings *cmd)
 {
        int err;
 
@@ -97,9 +97,9 @@ static int dpaa_set_settings(struct net_device *net_dev,
                return -ENODEV;
        }
 
-       err = phy_ethtool_sset(net_dev->phydev, et_cmd);
+       err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
        if (err < 0)
-               netdev_err(net_dev, "phy_ethtool_sset() = %d\n", err);
+               netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err);
 
        return err;
 }
@@ -402,8 +402,6 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
 }
 
 const struct ethtool_ops dpaa_ethtool_ops = {
-       .get_settings = dpaa_get_settings,
-       .set_settings = dpaa_set_settings,
        .get_drvinfo = dpaa_get_drvinfo,
        .get_msglevel = dpaa_get_msglevel,
        .set_msglevel = dpaa_set_msglevel,
@@ -414,4 +412,6 @@ const struct ethtool_ops dpaa_ethtool_ops = {
        .get_sset_count = dpaa_get_sset_count,
        .get_ethtool_stats = dpaa_get_ethtool_stats,
        .get_strings = dpaa_get_strings,
+       .get_link_ksettings = dpaa_get_link_ksettings,
+       .set_link_ksettings = dpaa_set_link_ksettings,
 };
index 8be7034b2e7ba4168ab962b951419a498ca9cc7d..91a16641e8514b7f1f99be03fe280d71e54de37d 100644 (file)
@@ -1615,7 +1615,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
        fec_enet_tx(ndev);
 
        if (pkts < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, pkts);
                writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
        }
        return pkts;
index 71a5ded9d1de0fda2f08c32985cc7661a3af3966..cd6a53eaf1614f7ffbeaadc7bbf6b28d8163e4c0 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/phy.h>
+#include <linux/phy_fixed.h>
 #include <linux/of_mdio.h>
 
 /* PCS registers */
index 1f98838f32b7772ebb1920f5f5e5644fa7b6e77e..753259091b227937b5915d34734780033f9cf05c 100644 (file)
@@ -301,7 +301,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
 
        if (received < budget && tx_left) {
                /* done */
-               napi_complete(napi);
+               napi_complete_done(napi, received);
                (*fep->ops->napi_enable)(dev);
 
                return received;
@@ -964,11 +964,10 @@ static int fs_enet_probe(struct platform_device *ofdev)
         */
        clk = devm_clk_get(&ofdev->dev, "per");
        if (!IS_ERR(clk)) {
-               err = clk_prepare_enable(clk);
-               if (err) {
-                       ret = err;
+               ret = clk_prepare_enable(clk);
+               if (ret)
                        goto out_deregister_fixed_link;
-               }
+
                fpi->clk_per = clk;
        }
 
@@ -1045,10 +1044,10 @@ out_cleanup_data:
 out_free_dev:
        free_netdev(ndev);
 out_put:
-       of_node_put(fpi->phy_node);
        if (fpi->clk_per)
                clk_disable_unprepare(fpi->clk_per);
 out_deregister_fixed_link:
+       of_node_put(fpi->phy_node);
        if (of_phy_is_fixed_link(ofdev->dev.of_node))
                of_phy_deregister_fixed_link(ofdev->dev.of_node);
 out_free_fpi:
index 957bfc220978479a5ccee32b58ae26d4236fe939..0ff166ec3e7ebf265af2e20453c72a918e911336 100644 (file)
@@ -3183,7 +3183,7 @@ static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
 
        if (work_done < budget) {
                u32 imask;
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                /* Clear the halt bit in RSTAT */
                gfar_write(&regs->rstat, gfargrp->rstat);
 
@@ -3272,7 +3272,7 @@ static int gfar_poll_rx(struct napi_struct *napi, int budget)
 
        if (!num_act_queues) {
                u32 imask;
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                /* Clear the halt bit in RSTAT */
                gfar_write(&regs->rstat, gfargrp->rstat);
index 9d660888510f3f007905eaa7f8b5dede6eea9d85..3f7ae9f64cd8771a834b104cec0b395c473a7bf2 100644 (file)
@@ -3303,7 +3303,7 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
                howmany += ucc_geth_rx(ugeth, i, budget - howmany);
 
        if (howmany < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, howmany);
                setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
        }
 
index 97b184774784beb603c27d62486e771f3a8af80b..0cec06bec63ee1c0085019f0f1ee456b675c2ece 100644 (file)
@@ -555,7 +555,7 @@ refill:
                priv->reg_inten |= RCV_INT;
                writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
        }
-       napi_complete(napi);
+       napi_complete_done(napi, rx);
 done:
        /* clean up tx descriptors and start a new timer if necessary */
        tx_remaining = hip04_tx_reclaim(ndev, false);
@@ -701,11 +701,6 @@ static void hip04_tx_timeout_task(struct work_struct *work)
        hip04_mac_open(priv->ndev);
 }
 
-static struct net_device_stats *hip04_get_stats(struct net_device *ndev)
-{
-       return &ndev->stats;
-}
-
 static int hip04_get_coalesce(struct net_device *netdev,
                              struct ethtool_coalesce *ec)
 {
@@ -764,7 +759,6 @@ static const struct ethtool_ops hip04_ethtool_ops = {
 static const struct net_device_ops hip04_netdev_ops = {
        .ndo_open               = hip04_mac_open,
        .ndo_stop               = hip04_mac_stop,
-       .ndo_get_stats          = hip04_get_stats,
        .ndo_start_xmit         = hip04_mac_start_xmit,
        .ndo_set_mac_address    = hip04_set_mac_address,
        .ndo_tx_timeout         = hip04_timeout,
index 979852d56f31ddb7301c7be19f2f91d922ef5e33..2c2808830e95723604b7356514ea64ca123fac3a 100644 (file)
@@ -330,7 +330,7 @@ static int hisi_femac_poll(struct napi_struct *napi, int budget)
        } while (ints & DEF_INT_MASK);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                hisi_femac_irq_enable(priv, DEF_INT_MASK &
                                        (~IRQ_INT_TX_PER_PACKET));
        }
index 418ca1f3774aabbd1575c934243ac938124b54ad..25a6c8722ecacc981ff8320276c3b41943e8fa31 100644 (file)
@@ -662,7 +662,7 @@ static int hix5hd2_poll(struct napi_struct *napi, int budget)
        } while (ints & DEF_INT_MASK);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                hix5hd2_irq_enable(priv);
        }
 
index 8aed72860e7c0eece690c97ae38b1fbedfa58557..fca37e2c7f017d76aa537daede5f7af14cb8e152 100644 (file)
@@ -797,7 +797,6 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
 
        skb->protocol = eth_type_trans(skb, ndev);
        (void)napi_gro_receive(&ring_data->napi, skb);
-       ndev->last_rx = jiffies;
 }
 
 static int hns_desc_unused(struct hnae_ring *ring)
@@ -1203,43 +1202,48 @@ static void hns_set_irq_affinity(struct hns_nic_priv *priv)
        struct hns_nic_ring_data *rd;
        int i;
        int cpu;
-       cpumask_t mask;
+       cpumask_var_t mask;
+
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+               return;
 
        /*diffrent irq banlance for 16core and 32core*/
        if (h->q_num == num_possible_cpus()) {
                for (i = 0; i < h->q_num * 2; i++) {
                        rd = &priv->ring_data[i];
                        if (cpu_online(rd->queue_index)) {
-                               cpumask_clear(&mask);
+                               cpumask_clear(mask);
                                cpu = rd->queue_index;
-                               cpumask_set_cpu(cpu, &mask);
+                               cpumask_set_cpu(cpu, mask);
                                (void)irq_set_affinity_hint(rd->ring->irq,
-                                                           &mask);
+                                                           mask);
                        }
                }
        } else {
                for (i = 0; i < h->q_num; i++) {
                        rd = &priv->ring_data[i];
                        if (cpu_online(rd->queue_index * 2)) {
-                               cpumask_clear(&mask);
+                               cpumask_clear(mask);
                                cpu = rd->queue_index * 2;
-                               cpumask_set_cpu(cpu, &mask);
+                               cpumask_set_cpu(cpu, mask);
                                (void)irq_set_affinity_hint(rd->ring->irq,
-                                                           &mask);
+                                                           mask);
                        }
                }
 
                for (i = h->q_num; i < h->q_num * 2; i++) {
                        rd = &priv->ring_data[i];
                        if (cpu_online(rd->queue_index * 2 + 1)) {
-                               cpumask_clear(&mask);
+                               cpumask_clear(mask);
                                cpu = rd->queue_index * 2 + 1;
-                               cpumask_set_cpu(cpu, &mask);
+                               cpumask_set_cpu(cpu, mask);
                                (void)irq_set_affinity_hint(rd->ring->irq,
-                                                           &mask);
+                                                           mask);
                        }
                }
        }
+
+       free_cpumask_var(mask);
 }
 
 static int hns_nic_init_irq(struct hns_nic_priv *priv)
@@ -1625,8 +1629,8 @@ void hns_nic_set_rx_mode(struct net_device *ndev)
                netdev_err(ndev, "sync uc address fail\n");
 }
 
-struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
-                                             struct rtnl_link_stats64 *stats)
+static void hns_nic_get_stats64(struct net_device *ndev,
+                               struct rtnl_link_stats64 *stats)
 {
        int idx = 0;
        u64 tx_bytes = 0;
@@ -1668,8 +1672,6 @@ struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
        stats->tx_window_errors = ndev->stats.tx_window_errors;
        stats->rx_compressed = ndev->stats.rx_compressed;
        stats->tx_compressed = ndev->stats.tx_compressed;
-
-       return stats;
 }
 
 static u16
index 85a3866459cf4a3bebc6bf6ee3fbe3ff254d48a4..4f58d338d739da0f7309718353d72871aa01c7cc 100644 (file)
 #include "ehea.h"
 #include "ehea_phyp.h"
 
-static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ehea_get_link_ksettings(struct net_device *dev,
+                                  struct ethtool_link_ksettings *cmd)
 {
        struct ehea_port *port = netdev_priv(dev);
+       u32 supported, advertising;
        u32 speed;
        int ret;
 
@@ -60,68 +62,75 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        speed = -1;
                        break; /* BUG */
                }
-               cmd->duplex = port->full_duplex == 1 ?
+               cmd->base.duplex = port->full_duplex == 1 ?
                                                     DUPLEX_FULL : DUPLEX_HALF;
        } else {
                speed = SPEED_UNKNOWN;
-               cmd->duplex = DUPLEX_UNKNOWN;
+               cmd->base.duplex = DUPLEX_UNKNOWN;
        }
-       ethtool_cmd_speed_set(cmd, speed);
+       cmd->base.speed = speed;
 
-       if (cmd->speed == SPEED_10000) {
-               cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
-               cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
-               cmd->port = PORT_FIBRE;
+       if (cmd->base.speed == SPEED_10000) {
+               supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+               advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+               cmd->base.port = PORT_FIBRE;
        } else {
-               cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full
+               supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full
                               | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full
                               | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg
                               | SUPPORTED_TP);
-               cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg
+               advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg
                                 | ADVERTISED_TP);
-               cmd->port = PORT_TP;
+               cmd->base.port = PORT_TP;
        }
 
-       cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+       cmd->base.autoneg = port->autoneg == 1 ?
+               AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
 
        return 0;
 }
 
-static int ehea_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ehea_set_link_ksettings(struct net_device *dev,
+                                  const struct ethtool_link_ksettings *cmd)
 {
        struct ehea_port *port = netdev_priv(dev);
        int ret = 0;
        u32 sp;
 
-       if (cmd->autoneg == AUTONEG_ENABLE) {
+       if (cmd->base.autoneg == AUTONEG_ENABLE) {
                sp = EHEA_SPEED_AUTONEG;
                goto doit;
        }
 
-       switch (cmd->speed) {
+       switch (cmd->base.speed) {
        case SPEED_10:
-               if (cmd->duplex == DUPLEX_FULL)
+               if (cmd->base.duplex == DUPLEX_FULL)
                        sp = H_SPEED_10M_F;
                else
                        sp = H_SPEED_10M_H;
                break;
 
        case SPEED_100:
-               if (cmd->duplex == DUPLEX_FULL)
+               if (cmd->base.duplex == DUPLEX_FULL)
                        sp = H_SPEED_100M_F;
                else
                        sp = H_SPEED_100M_H;
                break;
 
        case SPEED_1000:
-               if (cmd->duplex == DUPLEX_FULL)
+               if (cmd->base.duplex == DUPLEX_FULL)
                        sp = H_SPEED_1G_F;
                else
                        ret = -EINVAL;
                break;
 
        case SPEED_10000:
-               if (cmd->duplex == DUPLEX_FULL)
+               if (cmd->base.duplex == DUPLEX_FULL)
                        sp = H_SPEED_10G_F;
                else
                        ret = -EINVAL;
@@ -264,7 +273,6 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
 }
 
 static const struct ethtool_ops ehea_ethtool_ops = {
-       .get_settings = ehea_get_settings,
        .get_drvinfo = ehea_get_drvinfo,
        .get_msglevel = ehea_get_msglevel,
        .set_msglevel = ehea_set_msglevel,
@@ -272,8 +280,9 @@ static const struct ethtool_ops ehea_ethtool_ops = {
        .get_strings = ehea_get_strings,
        .get_sset_count = ehea_get_sset_count,
        .get_ethtool_stats = ehea_get_ethtool_stats,
-       .set_settings = ehea_set_settings,
        .nway_reset = ehea_nway_reset,          /* Restart autonegotiation */
+       .get_link_ksettings = ehea_get_link_ksettings,
+       .set_link_ksettings = ehea_set_link_ksettings,
 };
 
 void ehea_set_ethtool_ops(struct net_device *netdev)
index 702446a93697a42a94cae938ac303d58350461cf..1e53d7a82675f3e7fee8db985922331ecf2cadc3 100644 (file)
@@ -328,8 +328,8 @@ out:
        spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
 }
 
-static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
-                                       struct rtnl_link_stats64 *stats)
+static void ehea_get_stats64(struct net_device *dev,
+                            struct rtnl_link_stats64 *stats)
 {
        struct ehea_port *port = netdev_priv(dev);
        u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
@@ -352,7 +352,6 @@ static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
 
        stats->multicast = port->stats.multicast;
        stats->rx_errors = port->stats.rx_errors;
-       return stats;
 }
 
 static void ehea_update_stats(struct work_struct *work)
index 5909615c27f7cb2629217a87e00537ca694b1680..6ead2335a1694110136cc10fb36a0038e5e71365 100644 (file)
@@ -1991,69 +1991,79 @@ static struct mal_commac_ops emac_commac_sg_ops = {
 };
 
 /* Ethtool support */
-static int emac_ethtool_get_settings(struct net_device *ndev,
-                                    struct ethtool_cmd *cmd)
+static int emac_ethtool_get_link_ksettings(struct net_device *ndev,
+                                          struct ethtool_link_ksettings *cmd)
 {
        struct emac_instance *dev = netdev_priv(ndev);
+       u32 supported, advertising;
 
-       cmd->supported = dev->phy.features;
-       cmd->port = PORT_MII;
-       cmd->phy_address = dev->phy.address;
-       cmd->transceiver =
-           dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
+       supported = dev->phy.features;
+       cmd->base.port = PORT_MII;
+       cmd->base.phy_address = dev->phy.address;
 
        mutex_lock(&dev->link_lock);
-       cmd->advertising = dev->phy.advertising;
-       cmd->autoneg = dev->phy.autoneg;
-       cmd->speed = dev->phy.speed;
-       cmd->duplex = dev->phy.duplex;
+       advertising = dev->phy.advertising;
+       cmd->base.autoneg = dev->phy.autoneg;
+       cmd->base.speed = dev->phy.speed;
+       cmd->base.duplex = dev->phy.duplex;
        mutex_unlock(&dev->link_lock);
 
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
+
        return 0;
 }
 
-static int emac_ethtool_set_settings(struct net_device *ndev,
-                                    struct ethtool_cmd *cmd)
+static int
+emac_ethtool_set_link_ksettings(struct net_device *ndev,
+                               const struct ethtool_link_ksettings *cmd)
 {
        struct emac_instance *dev = netdev_priv(ndev);
        u32 f = dev->phy.features;
+       u32 advertising;
+
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
 
        DBG(dev, "set_settings(%d, %d, %d, 0x%08x)" NL,
-           cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
+           cmd->base.autoneg, cmd->base.speed, cmd->base.duplex, advertising);
 
        /* Basic sanity checks */
        if (dev->phy.address < 0)
                return -EOPNOTSUPP;
-       if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
+       if (cmd->base.autoneg != AUTONEG_ENABLE &&
+           cmd->base.autoneg != AUTONEG_DISABLE)
                return -EINVAL;
-       if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
+       if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0)
                return -EINVAL;
-       if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
+       if (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL)
                return -EINVAL;
 
-       if (cmd->autoneg == AUTONEG_DISABLE) {
-               switch (cmd->speed) {
+       if (cmd->base.autoneg == AUTONEG_DISABLE) {
+               switch (cmd->base.speed) {
                case SPEED_10:
-                       if (cmd->duplex == DUPLEX_HALF &&
+                       if (cmd->base.duplex == DUPLEX_HALF &&
                            !(f & SUPPORTED_10baseT_Half))
                                return -EINVAL;
-                       if (cmd->duplex == DUPLEX_FULL &&
+                       if (cmd->base.duplex == DUPLEX_FULL &&
                            !(f & SUPPORTED_10baseT_Full))
                                return -EINVAL;
                        break;
                case SPEED_100:
-                       if (cmd->duplex == DUPLEX_HALF &&
+                       if (cmd->base.duplex == DUPLEX_HALF &&
                            !(f & SUPPORTED_100baseT_Half))
                                return -EINVAL;
-                       if (cmd->duplex == DUPLEX_FULL &&
+                       if (cmd->base.duplex == DUPLEX_FULL &&
                            !(f & SUPPORTED_100baseT_Full))
                                return -EINVAL;
                        break;
                case SPEED_1000:
-                       if (cmd->duplex == DUPLEX_HALF &&
+                       if (cmd->base.duplex == DUPLEX_HALF &&
                            !(f & SUPPORTED_1000baseT_Half))
                                return -EINVAL;
-                       if (cmd->duplex == DUPLEX_FULL &&
+                       if (cmd->base.duplex == DUPLEX_FULL &&
                            !(f & SUPPORTED_1000baseT_Full))
                                return -EINVAL;
                        break;
@@ -2062,8 +2072,8 @@ static int emac_ethtool_set_settings(struct net_device *ndev,
                }
 
                mutex_lock(&dev->link_lock);
-               dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
-                                               cmd->duplex);
+               dev->phy.def->ops->setup_forced(&dev->phy, cmd->base.speed,
+                                               cmd->base.duplex);
                mutex_unlock(&dev->link_lock);
 
        } else {
@@ -2072,7 +2082,7 @@ static int emac_ethtool_set_settings(struct net_device *ndev,
 
                mutex_lock(&dev->link_lock);
                dev->phy.def->ops->setup_aneg(&dev->phy,
-                                             (cmd->advertising & f) |
+                                             (advertising & f) |
                                              (dev->phy.advertising &
                                               (ADVERTISED_Pause |
                                                ADVERTISED_Asym_Pause)));
@@ -2234,8 +2244,6 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
 }
 
 static const struct ethtool_ops emac_ethtool_ops = {
-       .get_settings = emac_ethtool_get_settings,
-       .set_settings = emac_ethtool_set_settings,
        .get_drvinfo = emac_ethtool_get_drvinfo,
 
        .get_regs_len = emac_ethtool_get_regs_len,
@@ -2251,6 +2259,8 @@ static const struct ethtool_ops emac_ethtool_ops = {
        .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
 
        .get_link = ethtool_op_get_link,
+       .get_link_ksettings = emac_ethtool_get_link_ksettings,
+       .set_link_ksettings = emac_ethtool_set_link_ksettings,
 };
 
 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
index aaf6fec566b5175a834c74cee494ebcf13e1c10c..cd3227b088b73f51b68951401eefc7e2d1e11470 100644 (file)
@@ -421,20 +421,20 @@ static int mal_poll(struct napi_struct *napi, int budget)
                int n;
                if (unlikely(test_bit(MAL_COMMAC_POLL_DISABLED, &mc->flags)))
                        continue;
-               n = mc->ops->poll_rx(mc->dev, budget);
+               n = mc->ops->poll_rx(mc->dev, budget - received);
                if (n) {
                        received += n;
-                       budget -= n;
-                       if (budget <= 0)
-                               goto more_work; // XXX What if this is the last one ?
+                       if (received >= budget)
+                               return budget;
                }
        }
 
-       /* We need to disable IRQs to protect from RXDE IRQ here */
-       spin_lock_irqsave(&mal->lock, flags);
-       __napi_complete(napi);
-       mal_enable_eob_irq(mal);
-       spin_unlock_irqrestore(&mal->lock, flags);
+       if (napi_complete_done(napi, received)) {
+               /* We need to disable IRQs to protect from RXDE IRQ here */
+               spin_lock_irqsave(&mal->lock, flags);
+               mal_enable_eob_irq(mal);
+               spin_unlock_irqrestore(&mal->lock, flags);
+       }
 
        /* Check for "rotting" packet(s) */
        list_for_each(l, &mal->poll_list) {
index 309f5c66083cf504e8beaeff9f4575e40c646063..72ab7b6bf20b7a93eb4a6c18b14f75b2b063aaa5 100644 (file)
@@ -729,20 +729,26 @@ static int ibmveth_close(struct net_device *netdev)
        return 0;
 }
 
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+                                    struct ethtool_link_ksettings *cmd)
 {
-       cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+       u32 supported, advertising;
+
+       supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
                                SUPPORTED_FIBRE);
-       cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
+       advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
                                ADVERTISED_FIBRE);
-       ethtool_cmd_speed_set(cmd, SPEED_1000);
-       cmd->duplex = DUPLEX_FULL;
-       cmd->port = PORT_FIBRE;
-       cmd->phy_address = 0;
-       cmd->transceiver = XCVR_INTERNAL;
-       cmd->autoneg = AUTONEG_ENABLE;
-       cmd->maxtxpkt = 0;
-       cmd->maxrxpkt = 1;
+       cmd->base.speed = SPEED_1000;
+       cmd->base.duplex = DUPLEX_FULL;
+       cmd->base.port = PORT_FIBRE;
+       cmd->base.phy_address = 0;
+       cmd->base.autoneg = AUTONEG_ENABLE;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
+
        return 0;
 }
 
@@ -978,11 +984,11 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev,
 
 static const struct ethtool_ops netdev_ethtool_ops = {
        .get_drvinfo            = netdev_get_drvinfo,
-       .get_settings           = netdev_get_settings,
        .get_link               = ethtool_op_get_link,
        .get_strings            = ibmveth_get_strings,
        .get_sset_count         = ibmveth_get_sset_count,
        .get_ethtool_stats      = ibmveth_get_ethtool_stats,
+       .get_link_ksettings     = netdev_get_link_ksettings,
 };
 
 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1320,7 +1326,7 @@ restart_poll:
        ibmveth_replenish_task(adapter);
 
        if (frames_processed < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, frames_processed);
 
                /* We think we are done - reenable interrupts,
                 * then check once more to make sure we are done.
index a07b8d79174cd0aa4f338df7214ea4e9601076dc..0c94e23985be9451d3e99691221f992850608e24 100644 (file)
@@ -988,7 +988,7 @@ restart_poll:
 
        if (frames_processed < budget) {
                enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
-               napi_complete(napi);
+               napi_complete_done(napi, frames_processed);
                if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
                    napi_reschedule(napi)) {
                        disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
@@ -1026,21 +1026,26 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
 
 /* ethtool functions */
 
-static int ibmvnic_get_settings(struct net_device *netdev,
-                               struct ethtool_cmd *cmd)
+static int ibmvnic_get_link_ksettings(struct net_device *netdev,
+                                     struct ethtool_link_ksettings *cmd)
 {
-       cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+       u32 supported, advertising;
+
+       supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
                          SUPPORTED_FIBRE);
-       cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
+       advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
                            ADVERTISED_FIBRE);
-       ethtool_cmd_speed_set(cmd, SPEED_1000);
-       cmd->duplex = DUPLEX_FULL;
-       cmd->port = PORT_FIBRE;
-       cmd->phy_address = 0;
-       cmd->transceiver = XCVR_INTERNAL;
-       cmd->autoneg = AUTONEG_ENABLE;
-       cmd->maxtxpkt = 0;
-       cmd->maxrxpkt = 1;
+       cmd->base.speed = SPEED_1000;
+       cmd->base.duplex = DUPLEX_FULL;
+       cmd->base.port = PORT_FIBRE;
+       cmd->base.phy_address = 0;
+       cmd->base.autoneg = AUTONEG_ENABLE;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
+
        return 0;
 }
 
@@ -1133,7 +1138,6 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
 }
 
 static const struct ethtool_ops ibmvnic_ethtool_ops = {
-       .get_settings           = ibmvnic_get_settings,
        .get_drvinfo            = ibmvnic_get_drvinfo,
        .get_msglevel           = ibmvnic_get_msglevel,
        .set_msglevel           = ibmvnic_set_msglevel,
@@ -1142,6 +1146,7 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = {
        .get_strings            = ibmvnic_get_strings,
        .get_sset_count         = ibmvnic_get_sset_count,
        .get_ethtool_stats      = ibmvnic_get_ethtool_stats,
+       .get_link_ksettings     = ibmvnic_get_link_ksettings,
 };
 
 /* Routines for managing CRQs/sCRQs  */
index 25c6dfd500b4cde27310305770074deac6487b06..2b7323d392dcc542430ced19c4ed5448bc274cdc 100644 (file)
@@ -2253,7 +2253,7 @@ static int e100_poll(struct napi_struct *napi, int budget)
 
        /* If budget not fully consumed, exit the polling mode */
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                e100_enable_irq(nic);
        }
 
@@ -2426,19 +2426,21 @@ err_clean_rx:
 #define E100_82552_LED_ON       0x000F /* LEDTX and LED_RX both on */
 #define E100_82552_LED_OFF      0x000A /* LEDTX and LED_RX both off */
 
-static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+static int e100_get_link_ksettings(struct net_device *netdev,
+                                  struct ethtool_link_ksettings *cmd)
 {
        struct nic *nic = netdev_priv(netdev);
-       return mii_ethtool_gset(&nic->mii, cmd);
+       return mii_ethtool_get_link_ksettings(&nic->mii, cmd);
 }
 
-static int e100_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+static int e100_set_link_ksettings(struct net_device *netdev,
+                                  const struct ethtool_link_ksettings *cmd)
 {
        struct nic *nic = netdev_priv(netdev);
        int err;
 
        mdio_write(netdev, nic->mii.phy_id, MII_BMCR, BMCR_RESET);
-       err = mii_ethtool_sset(&nic->mii, cmd);
+       err = mii_ethtool_set_link_ksettings(&nic->mii, cmd);
        e100_exec_cb(nic, NULL, e100_configure);
 
        return err;
@@ -2741,8 +2743,6 @@ static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
 }
 
 static const struct ethtool_ops e100_ethtool_ops = {
-       .get_settings           = e100_get_settings,
-       .set_settings           = e100_set_settings,
        .get_drvinfo            = e100_get_drvinfo,
        .get_regs_len           = e100_get_regs_len,
        .get_regs               = e100_get_regs,
@@ -2763,6 +2763,8 @@ static const struct ethtool_ops e100_ethtool_ops = {
        .get_ethtool_stats      = e100_get_ethtool_stats,
        .get_sset_count         = e100_get_sset_count,
        .get_ts_info            = ethtool_op_get_ts_info,
+       .get_link_ksettings     = e100_get_link_ksettings,
+       .set_link_ksettings     = e100_set_link_ksettings,
 };
 
 static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
index 879cca47b021446565f30ea4e877e569ec17531c..a29b12e80855b1f13b7f6c86af460dc2e661c5e6 100644 (file)
@@ -493,8 +493,8 @@ int e1000e_setup_rx_resources(struct e1000_ring *ring);
 int e1000e_setup_tx_resources(struct e1000_ring *ring);
 void e1000e_free_rx_resources(struct e1000_ring *ring);
 void e1000e_free_tx_resources(struct e1000_ring *ring);
-struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
-                                            struct rtnl_link_stats64 *stats);
+void e1000e_get_stats64(struct net_device *netdev,
+                       struct rtnl_link_stats64 *stats);
 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
 void e1000e_get_hw_control(struct e1000_adapter *adapter);
index eccf1da9356badc85df416cca1a519bb1ec417bc..2175cced402f7fe84dd260eecc5f3f0ab712132a 100644 (file)
@@ -240,9 +240,9 @@ static void e1000e_dump(struct e1000_adapter *adapter)
        /* Print netdevice Info */
        if (netdev) {
                dev_info(&adapter->pdev->dev, "Net device Info\n");
-               pr_info("Device Name     state            trans_start      last_rx\n");
-               pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
-                       netdev->state, dev_trans_start(netdev), netdev->last_rx);
+               pr_info("Device Name     state            trans_start\n");
+               pr_info("%-15s %016lX %016lX\n", netdev->name,
+                       netdev->state, dev_trans_start(netdev));
        }
 
        /* Print Registers */
@@ -5920,12 +5920,11 @@ static void e1000_reset_task(struct work_struct *work)
  *
  * Returns the address of the device statistics structure.
  **/
-struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
-                                            struct rtnl_link_stats64 *stats)
+void e1000e_get_stats64(struct net_device *netdev,
+                       struct rtnl_link_stats64 *stats)
 {
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
-       memset(stats, 0, sizeof(struct rtnl_link_stats64));
        spin_lock(&adapter->stats64_lock);
        e1000e_update_stats(adapter);
        /* Fill out the OS statistics structure */
@@ -5958,7 +5957,6 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
        /* Tx Dropped needs to be maintained elsewhere */
 
        spin_unlock(&adapter->stats64_lock);
-       return stats;
 }
 
 /**
@@ -6276,8 +6274,8 @@ static int e1000e_pm_freeze(struct device *dev)
                /* Quiesce the device without resetting the hardware */
                e1000e_down(adapter, false);
                e1000_free_irq(adapter);
+               e1000e_reset_interrupt_capability(adapter);
        }
-       e1000e_reset_interrupt_capability(adapter);
 
        /* Allow time for pending master requests to run */
        e1000e_disable_pcie_master(&adapter->hw);
index 4d19e46f7c5573857327d280f97a352985583bea..52b979443cdecd702177ff40bf71c1162a0d0783 100644 (file)
@@ -260,9 +260,7 @@ struct fm10k_intfc {
 #define FM10K_FLAG_RESET_REQUESTED             (u32)(BIT(0))
 #define FM10K_FLAG_RSS_FIELD_IPV4_UDP          (u32)(BIT(1))
 #define FM10K_FLAG_RSS_FIELD_IPV6_UDP          (u32)(BIT(2))
-#define FM10K_FLAG_RX_TS_ENABLED               (u32)(BIT(3))
-#define FM10K_FLAG_SWPRI_CONFIG                        (u32)(BIT(4))
-#define FM10K_FLAG_DEBUG_STATS                 (u32)(BIT(5))
+#define FM10K_FLAG_SWPRI_CONFIG                        (u32)(BIT(3))
        int xcast_mode;
 
        /* Tx fast path data */
index dd95ac4f4c64ad951ee9998aef337242f38453be..62a6ad9b3eeda3dcf2da0d840bfbbbf909b442b6 100644 (file)
@@ -506,7 +506,7 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
                goto out;
 
        /* if we somehow dropped the Tx enable we should reset */
-       if (hw->mac.tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
+       if (mac->tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
                ret_val = FM10K_ERR_RESET_REQUESTED;
                goto out;
        }
@@ -523,8 +523,8 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
 
        /* interface cannot receive traffic without logical ports */
        if (mac->dglort_map == FM10K_DGLORTMAP_NONE) {
-               if (hw->mac.ops.request_lport_map)
-                       ret_val = hw->mac.ops.request_lport_map(hw);
+               if (mac->ops.request_lport_map)
+                       ret_val = mac->ops.request_lport_map(hw);
 
                goto out;
        }
index 5241e0873397692d6365a907ffe95f16e8739b11..0c84fef750f43a2c9230a580003bbd9b9a5d4ef9 100644 (file)
@@ -148,7 +148,7 @@ enum {
 static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
 };
 
-static void fm10k_add_stat_strings(char **p, const char *prefix,
+static void fm10k_add_stat_strings(u8 **p, const char *prefix,
                                   const struct fm10k_stats stats[],
                                   const unsigned int size)
 {
@@ -164,32 +164,31 @@ static void fm10k_add_stat_strings(char **p, const char *prefix,
 static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
 {
        struct fm10k_intfc *interface = netdev_priv(dev);
-       char *p = (char *)data;
        unsigned int i;
 
-       fm10k_add_stat_strings(&p, "", fm10k_gstrings_net_stats,
+       fm10k_add_stat_strings(&data, "", fm10k_gstrings_net_stats,
                               FM10K_NETDEV_STATS_LEN);
 
-       fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats,
+       fm10k_add_stat_strings(&data, "", fm10k_gstrings_global_stats,
                               FM10K_GLOBAL_STATS_LEN);
 
-       fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats,
+       fm10k_add_stat_strings(&data, "", fm10k_gstrings_mbx_stats,
                               FM10K_MBX_STATS_LEN);
 
        if (interface->hw.mac.type != fm10k_mac_vf)
-               fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats,
+               fm10k_add_stat_strings(&data, "", fm10k_gstrings_pf_stats,
                                       FM10K_PF_STATS_LEN);
 
        for (i = 0; i < interface->hw.mac.max_queues; i++) {
                char prefix[ETH_GSTRING_LEN];
 
                snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i);
-               fm10k_add_stat_strings(&p, prefix,
+               fm10k_add_stat_strings(&data, prefix,
                                       fm10k_gstrings_queue_stats,
                                       FM10K_QUEUE_STATS_LEN);
 
                snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i);
-               fm10k_add_stat_strings(&p, prefix,
+               fm10k_add_stat_strings(&data, prefix,
                                       fm10k_gstrings_queue_stats,
                                       FM10K_QUEUE_STATS_LEN);
        }
@@ -198,18 +197,16 @@ static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
 static void fm10k_get_strings(struct net_device *dev,
                              u32 stringset, u8 *data)
 {
-       char *p = (char *)data;
-
        switch (stringset) {
        case ETH_SS_TEST:
-               memcpy(data, *fm10k_gstrings_test,
+               memcpy(data, fm10k_gstrings_test,
                       FM10K_TEST_LEN * ETH_GSTRING_LEN);
                break;
        case ETH_SS_STATS:
                fm10k_get_stat_strings(dev, data);
                break;
        case ETH_SS_PRIV_FLAGS:
-               memcpy(p, fm10k_prv_flags,
+               memcpy(data, fm10k_prv_flags,
                       FM10K_PRV_FLAG_LEN * ETH_GSTRING_LEN);
                break;
        }
index 5de937852436acbdafda6036d913339d4b8d5728..5bb233a9614c1cc172d87a7f40bc68003f4af673 100644 (file)
@@ -28,7 +28,7 @@
 
 #include "fm10k.h"
 
-#define DRV_VERSION    "0.21.2-k"
+#define DRV_VERSION    "0.21.7-k"
 #define DRV_SUMMARY    "Intel(R) Ethernet Switch Host Interface Driver"
 const char fm10k_driver_version[] = DRV_VERSION;
 char fm10k_driver_name[] = "fm10k";
@@ -251,6 +251,7 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
 /**
  * fm10k_add_rx_frag - Add contents of Rx buffer to sk_buff
  * @rx_buffer: buffer containing page to add
+ * @size: packet size from rx_desc
  * @rx_desc: descriptor containing length of buffer written by hardware
  * @skb: sk_buff to place the data into
  *
@@ -263,12 +264,12 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
  * true if the buffer can be reused by the interface.
  **/
 static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
+                             unsigned int size,
                              union fm10k_rx_desc *rx_desc,
                              struct sk_buff *skb)
 {
        struct page *page = rx_buffer->page;
        unsigned char *va = page_address(page) + rx_buffer->page_offset;
-       unsigned int size = le16_to_cpu(rx_desc->w.length);
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = FM10K_RX_BUFSZ;
 #else
@@ -314,6 +315,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
                                             union fm10k_rx_desc *rx_desc,
                                             struct sk_buff *skb)
 {
+       unsigned int size = le16_to_cpu(rx_desc->w.length);
        struct fm10k_rx_buffer *rx_buffer;
        struct page *page;
 
@@ -350,11 +352,11 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
        dma_sync_single_range_for_cpu(rx_ring->dev,
                                      rx_buffer->dma,
                                      rx_buffer->page_offset,
-                                     FM10K_RX_BUFSZ,
+                                     size,
                                      DMA_FROM_DEVICE);
 
        /* pull page into skb */
-       if (fm10k_add_rx_frag(rx_buffer, rx_desc, skb)) {
+       if (fm10k_add_rx_frag(rx_buffer, size, rx_desc, skb)) {
                /* hand second half of page back to the ring */
                fm10k_reuse_rx_page(rx_ring, rx_buffer);
        } else {
@@ -473,6 +475,8 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
 
        fm10k_rx_checksum(rx_ring, rx_desc, skb);
 
+       FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
+
        FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
 
        skb_record_rx_queue(skb, rx_ring->queue_index);
index c9dfa6564fcf54eb8a3315cec39343590f64121c..334088a101c3d64049283ac5b04f6a973150c68c 100644 (file)
@@ -2011,9 +2011,10 @@ static void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw,
  *  function can also be used to respond to an error as the connection
  *  resetting would also be a means of dealing with errors.
  **/
-static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
-                                      struct fm10k_mbx_info *mbx)
+static s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
+                                     struct fm10k_mbx_info *mbx)
 {
+       s32 err = 0;
        const enum fm10k_mbx_state state = mbx->state;
 
        switch (state) {
@@ -2026,6 +2027,7 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
        case FM10K_STATE_OPEN:
                /* flush any incomplete work */
                fm10k_sm_mbx_connect_reset(mbx);
+               err = FM10K_ERR_RESET_REQUESTED;
                break;
        case FM10K_STATE_CONNECT:
                /* Update remote value to match local value */
@@ -2035,6 +2037,8 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
        }
 
        fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail);
+
+       return err;
 }
 
 /**
@@ -2115,7 +2119,7 @@ static s32 fm10k_sm_mbx_process(struct fm10k_hw *hw,
 
        switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) {
        case 0:
-               fm10k_sm_mbx_process_reset(hw, mbx);
+               err = fm10k_sm_mbx_process_reset(hw, mbx);
                break;
        case FM10K_SM_MBX_VERSION:
                err = fm10k_sm_mbx_process_version_1(hw, mbx);
index bc5ef6eb3dd6ed82786cfe65b6bd9bebaec6b081..01db688cf5398d434e81c2d4016e5764bbd49820 100644 (file)
@@ -1118,8 +1118,8 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface)
  * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This
  * function replaces fm10k_get_stats for kernels which support it.
  */
-static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
-                                                  struct rtnl_link_stats64 *stats)
+static void fm10k_get_stats64(struct net_device *netdev,
+                             struct rtnl_link_stats64 *stats)
 {
        struct fm10k_intfc *interface = netdev_priv(netdev);
        struct fm10k_ring *ring;
@@ -1164,8 +1164,6 @@ static struct rtnl_link_stats64 *fm10k_get_stats64(struct net_device *netdev,
 
        /* following stats updated by fm10k_service_task() */
        stats->rx_missed_errors = netdev->stats.rx_missed_errors;
-
-       return stats;
 }
 
 int fm10k_setup_tc(struct net_device *dev, u8 tc)
index b1a2f8437d5965535e9e779053ed1bdb8fd22edd..e372a582348015355e5406eae522fbda148558bd 100644 (file)
@@ -1144,6 +1144,7 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
        struct fm10k_hw *hw = &interface->hw;
        struct fm10k_mbx_info *mbx = &hw->mbx;
        u32 eicr;
+       s32 err = 0;
 
        /* unmask any set bits related to this interrupt */
        eicr = fm10k_read_reg(hw, FM10K_EICR);
@@ -1159,12 +1160,15 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
 
        /* service mailboxes */
        if (fm10k_mbx_trylock(interface)) {
-               mbx->ops.process(hw, mbx);
+               err = mbx->ops.process(hw, mbx);
                /* handle VFLRE events */
                fm10k_iov_event(interface);
                fm10k_mbx_unlock(interface);
        }
 
+       if (err == FM10K_ERR_RESET_REQUESTED)
+               interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+
        /* if switch toggled state we should reset GLORTs */
        if (eicr & FM10K_EICR_SWITCHNOTREADY) {
                /* force link down for at least 4 seconds */
index 23fb319fd2a048e7c8d788ce87636b97dbeea7b3..40ee0242a80ae49e10f595462ba3c05bb6880edc 100644 (file)
@@ -72,10 +72,6 @@ force_reset:
        fm10k_write_flush(hw);
        udelay(FM10K_RESET_TIMEOUT);
 
-       /* Reset mailbox global interrupts */
-       reg = FM10K_MBX_GLOBAL_REQ_INTERRUPT | FM10K_MBX_GLOBAL_ACK_INTERRUPT;
-       fm10k_write_reg(hw, FM10K_GMBX, reg);
-
        /* Verify we made it out of reset */
        reg = fm10k_read_reg(hw, FM10K_IP);
        if (!(reg & FM10K_IP_NOTINRESET))
index ba8d30984beea742e17345df31f6f333b4989604..7a23d3e47c6fac2f38668a132a23b3b745086325 100644 (file)
 /* default to trying for four seconds */
 #define I40E_TRY_LINK_TIMEOUT  (4 * HZ)
 
-/**
- * i40e_is_mac_710 - Return true if MAC is X710/XL710
- * @hw: ptr to the hardware info
- **/
-static inline bool i40e_is_mac_710(struct i40e_hw *hw)
-{
-       if ((hw->mac.type == I40E_MAC_X710) ||
-           (hw->mac.type == I40E_MAC_XL710))
-               return true;
-
-       return false;
-}
-
 /* driver state flags */
 enum i40e_state_t {
        __I40E_TESTING,
@@ -480,6 +467,22 @@ struct i40e_mac_filter {
        enum i40e_filter_state state;
 };
 
+/* Wrapper structure to keep track of filters while we are preparing to send
+ * firmware commands. We cannot send firmware commands while holding a
+ * spinlock, since it might sleep. To avoid this, we wrap the added filters in
+ * a separate structure, which will track the state change and update the real
+ * filter while under lock. We can't simply hold the filters in a separate
+ * list, as this opens a window for a race condition when adding new MAC
+ * addresses to all VLANs, or when adding new VLANs to all MAC addresses.
+ */
+struct i40e_new_mac_filter {
+       struct hlist_node hlist;
+       struct i40e_mac_filter *f;
+
+       /* Track future changes to state separately */
+       enum i40e_filter_state state;
+};
+
 struct i40e_veb {
        struct i40e_pf *pf;
        u16 idx;
@@ -762,6 +765,7 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
 void i40e_set_ethtool_ops(struct net_device *netdev);
 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
                                        const u8 *macaddr, s16 vlan);
+void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f);
 void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan);
 int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
@@ -804,7 +808,6 @@ int i40e_lan_add_device(struct i40e_pf *pf);
 int i40e_lan_del_device(struct i40e_pf *pf);
 void i40e_client_subtask(struct i40e_pf *pf);
 void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
-void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi);
 void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
 void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
 void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
@@ -834,9 +837,8 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
 #ifdef I40E_FCOE
-struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
-                                            struct net_device *netdev,
-                                            struct rtnl_link_stats64 *storage);
+void i40e_get_netdev_stats_struct(struct net_device *netdev,
+                                 struct rtnl_link_stats64 *storage);
 int i40e_set_mac(struct net_device *netdev, void *p);
 void i40e_set_rx_mode(struct net_device *netdev);
 #endif
@@ -853,12 +855,12 @@ int i40e_close(struct net_device *netdev);
 int i40e_vsi_open(struct i40e_vsi *vsi);
 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
 int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
-int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid);
 void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid);
-void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
-struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
-                                            const u8 *macaddr);
-int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr);
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid);
+struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
+                                           const u8 *macaddr);
+int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
 #ifdef I40E_FCOE
index 7fe72abc0b4a817afd99d57516b49b573f25bed5..d570219efd9f33b8934fdfe8ad3e256fb78a2e97 100644 (file)
@@ -174,8 +174,6 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
 
        if (!vsi)
                return;
-       memset(&params, 0, sizeof(params));
-       i40e_client_get_params(vsi, &params);
        mutex_lock(&i40e_client_instance_mutex);
        list_for_each_entry(cdev, &i40e_client_instances, list) {
                if (cdev->lan_info.pf == vsi->back) {
@@ -186,6 +184,8 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
                                        "Cannot locate client instance l2_param_change routine\n");
                                continue;
                        }
+       memset(&params, 0, sizeof(params));
+       i40e_client_get_params(vsi, &params);
                        if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED,
                                      &cdev->state)) {
                                dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
@@ -200,41 +200,6 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
        mutex_unlock(&i40e_client_instance_mutex);
 }
 
-/**
- * i40e_notify_client_of_netdev_open - call the client open callback
- * @vsi: the VSI with netdev opened
- *
- * If there is a client to this netdev, call the client with open
- **/
-void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
-{
-       struct i40e_client_instance *cdev;
-       int ret = 0;
-
-       if (!vsi)
-               return;
-       mutex_lock(&i40e_client_instance_mutex);
-       list_for_each_entry(cdev, &i40e_client_instances, list) {
-               if (cdev->lan_info.netdev == vsi->netdev) {
-                       if (!cdev->client ||
-                           !cdev->client->ops || !cdev->client->ops->open) {
-                               dev_dbg(&vsi->back->pdev->dev,
-                                       "Cannot locate client instance open routine\n");
-                               continue;
-                       }
-                       if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED,
-                                      &cdev->state))) {
-                               ret = cdev->client->ops->open(&cdev->lan_info,
-                                                             cdev->client);
-                               if (!ret)
-                                       set_bit(__I40E_CLIENT_INSTANCE_OPENED,
-                                               &cdev->state);
-                       }
-               }
-       }
-       mutex_unlock(&i40e_client_instance_mutex);
-}
-
 /**
  * i40e_client_release_qvlist
  * @ldev: pointer to L2 context.
@@ -545,9 +510,10 @@ void i40e_client_subtask(struct i40e_pf *pf)
                        continue;
 
                if (!existing) {
-                       dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
+                       dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
                                 client->name, pf->hw.pf_id,
-                                pf->hw.bus.device, pf->hw.bus.func);
+                                pf->hw.bus.bus_id, pf->hw.bus.device,
+                                pf->hw.bus.func);
                }
 
                mutex_lock(&i40e_client_instance_mutex);
@@ -596,8 +562,9 @@ int i40e_lan_add_device(struct i40e_pf *pf)
        ldev->pf = pf;
        INIT_LIST_HEAD(&ldev->list);
        list_add(&ldev->list, &i40e_devices);
-       dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x func=0x%02x\n",
-                pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
+       dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
+                pf->hw.pf_id, pf->hw.bus.bus_id,
+                pf->hw.bus.device, pf->hw.bus.func);
 
        /* Since in some cases register may have happened before a device gets
         * added, we can schedule a subtask to go initiate the clients if
@@ -625,9 +592,9 @@ int i40e_lan_del_device(struct i40e_pf *pf)
        mutex_lock(&i40e_device_mutex);
        list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
                if (ldev->pf == pf) {
-                       dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x func=0x%02x\n",
-                                pf->hw.pf_id, pf->hw.bus.device,
-                                pf->hw.bus.func);
+                       dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x dev=0x%02x func=0x%02x\n",
+                                pf->hw.pf_id, pf->hw.bus.bus_id,
+                                pf->hw.bus.device, pf->hw.bus.func);
                        list_del(&ldev->list);
                        kfree(ldev);
                        ret = 0;
@@ -688,13 +655,11 @@ static int i40e_client_release(struct i40e_client *client)
  * i40e_client_prepare - prepare client specific resources
  * @client: pointer to the registered client
  *
- * Return 0 on success or < 0 on error
  **/
-static int i40e_client_prepare(struct i40e_client *client)
+static void i40e_client_prepare(struct i40e_client *client)
 {
        struct i40e_device *ldev;
        struct i40e_pf *pf;
-       int ret = 0;
 
        mutex_lock(&i40e_device_mutex);
        list_for_each_entry(ldev, &i40e_devices, list) {
@@ -704,7 +669,6 @@ static int i40e_client_prepare(struct i40e_client *client)
                i40e_service_event_schedule(pf);
        }
        mutex_unlock(&i40e_device_mutex);
-       return ret;
 }
 
 /**
@@ -961,13 +925,9 @@ int i40e_register_client(struct i40e_client *client)
        set_bit(__I40E_CLIENT_REGISTERED, &client->state);
        mutex_unlock(&i40e_client_mutex);
 
-       if (i40e_client_prepare(client)) {
-               ret = -EIO;
-               goto out;
-       }
+       i40e_client_prepare(client);
 
-       pr_info("i40e: Registered client %s with return code %d\n",
-               client->name, ret);
+       pr_info("i40e: Registered client %s\n", client->name);
 out:
        return ret;
 }
index 128735975caa2472cd93a4f9162f857e459f16cd..fc73e4ef27aca7568ae65af20425b7bcaf416f07 100644 (file)
@@ -1838,6 +1838,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
        hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
        hw_link_info->link_info = resp->link_info;
        hw_link_info->an_info = resp->an_info;
+       hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
+                                                I40E_AQ_CONFIG_FEC_RS_ENA);
        hw_link_info->ext_info = resp->ext_info;
        hw_link_info->loopback = resp->loopback;
        hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
index cc1465aac2efb9a858fe0ef7750b28b5a47584f4..a22e26200bccb1a7e79716a7429e0eb7fe700ecc 100644 (file)
@@ -803,9 +803,12 @@ static int i40e_set_settings(struct net_device *netdev,
        if (change || (abilities.link_speed != config.link_speed)) {
                /* copy over the rest of the abilities */
                config.phy_type = abilities.phy_type;
+               config.phy_type_ext = abilities.phy_type_ext;
                config.eee_capability = abilities.eee_capability;
                config.eeer = abilities.eeer_val;
                config.low_power_ctrl = abilities.d3_lpan;
+               config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+                                   I40E_AQ_PHY_FEC_CONFIG_MASK;
 
                /* save the requested speeds */
                hw->phy.link_info.requested_speeds = config.link_speed;
@@ -2072,7 +2075,7 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
        struct i40e_q_vector *q_vector;
        u16 vector, intrl;
 
-       intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit);
+       intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
 
        vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs;
        vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs;
@@ -2116,6 +2119,7 @@ static int __i40e_set_coalesce(struct net_device *netdev,
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
+       u16 intrl_reg;
        int i;
 
        if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
@@ -2127,8 +2131,9 @@ static int __i40e_set_coalesce(struct net_device *netdev,
                return -EINVAL;
        }
 
-       if (ec->rx_coalesce_usecs_high >= INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
-               netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-235\n");
+       if (ec->rx_coalesce_usecs_high > INTRL_REG_TO_USEC(I40E_MAX_INTRL)) {
+               netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-%lu\n",
+                          INTRL_REG_TO_USEC(I40E_MAX_INTRL));
                return -EINVAL;
        }
 
@@ -2141,7 +2146,12 @@ static int __i40e_set_coalesce(struct net_device *netdev,
                        return -EINVAL;
        }
 
-       vsi->int_rate_limit = ec->rx_coalesce_usecs_high;
+       intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high);
+       vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg);
+       if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) {
+               netif_info(pf, drv, netdev, "Interrupt rate limit rounded down to %d\n",
+                          vsi->int_rate_limit);
+       }
 
        if (ec->tx_coalesce_usecs == 0) {
                if (ec->use_adaptive_tx_coalesce)
index ad4cf639430eacf8e746d2f23bda1be8b28af4d4..e83a8ca5dd65480d21cf4a5198b4b9a1954b47b8 100644 (file)
@@ -41,7 +41,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 25
+#define DRV_VERSION_BUILD 27
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -409,15 +409,11 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
  * Returns the address of the device statistics structure.
  * The statistics are actually updated from the service task.
  **/
-#ifdef I40E_FCOE
-struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
-                                            struct net_device *netdev,
-                                            struct rtnl_link_stats64 *stats)
-#else
-static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
-                                            struct net_device *netdev,
-                                            struct rtnl_link_stats64 *stats)
+#ifndef I40E_FCOE
+static
 #endif
+void i40e_get_netdev_stats_struct(struct net_device *netdev,
+                                 struct rtnl_link_stats64 *stats)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_ring *tx_ring, *rx_ring;
@@ -426,10 +422,10 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
        int i;
 
        if (test_bit(__I40E_DOWN, &vsi->state))
-               return stats;
+               return;
 
        if (!vsi->tx_rings)
-               return stats;
+               return;
 
        rcu_read_lock();
        for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -469,8 +465,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
        stats->rx_dropped       = vsi_stats->rx_dropped;
        stats->rx_crc_errors    = vsi_stats->rx_crc_errors;
        stats->rx_length_errors = vsi_stats->rx_length_errors;
-
-       return stats;
 }
 
 /**
@@ -1261,6 +1255,7 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
                                         int vlan_filters)
 {
        struct i40e_mac_filter *f, *add_head;
+       struct i40e_new_mac_filter *new;
        struct hlist_node *h;
        int bkt, new_vlan;
 
@@ -1279,13 +1274,13 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
         */
 
        /* Update the filters about to be added in place */
-       hlist_for_each_entry(f, tmp_add_list, hlist) {
-               if (vsi->info.pvid && f->vlan != vsi->info.pvid)
-                       f->vlan = vsi->info.pvid;
-               else if (vlan_filters && f->vlan == I40E_VLAN_ANY)
-                       f->vlan = 0;
-               else if (!vlan_filters && f->vlan == 0)
-                       f->vlan = I40E_VLAN_ANY;
+       hlist_for_each_entry(new, tmp_add_list, hlist) {
+               if (vsi->info.pvid && new->f->vlan != vsi->info.pvid)
+                       new->f->vlan = vsi->info.pvid;
+               else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY)
+                       new->f->vlan = 0;
+               else if (!vlan_filters && new->f->vlan == 0)
+                       new->f->vlan = I40E_VLAN_ANY;
        }
 
        /* Update the remaining active filters */
@@ -1311,9 +1306,16 @@ static int i40e_correct_mac_vlan_filters(struct i40e_vsi *vsi,
                        if (!add_head)
                                return -ENOMEM;
 
-                       /* Put the replacement filter into the add list */
-                       hash_del(&add_head->hlist);
-                       hlist_add_head(&add_head->hlist, tmp_add_list);
+                       /* Create a temporary i40e_new_mac_filter */
+                       new = kzalloc(sizeof(*new), GFP_ATOMIC);
+                       if (!new)
+                               return -ENOMEM;
+
+                       new->f = add_head;
+                       new->state = add_head->state;
+
+                       /* Add the new filter to the tmp list */
+                       hlist_add_head(&new->hlist, tmp_add_list);
 
                        /* Put the original filter into the delete list */
                        f->state = I40E_FILTER_REMOVE;
@@ -1440,7 +1442,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
  * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
  * instead of list_for_each_entry().
  **/
-static void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
+void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
 {
        if (!f)
                return;
@@ -1483,18 +1485,19 @@ void i40e_del_filter(struct i40e_vsi *vsi, const u8 *macaddr, s16 vlan)
 }
 
 /**
- * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
+ * i40e_add_mac_filter - Add a MAC filter for all active VLANs
  * @vsi: the VSI to be searched
  * @macaddr: the mac address to be filtered
  *
- * Goes through all the macvlan filters and adds a macvlan filter for each
+ * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
+ * go through all the macvlan filters and add a macvlan filter for each
  * unique vlan that already exists. If a PVID has been assigned, instead only
  * add the macaddr to that VLAN.
  *
  * Returns last filter added on success, else NULL
  **/
-struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
-                                            const u8 *macaddr)
+struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
+                                           const u8 *macaddr)
 {
        struct i40e_mac_filter *f, *add = NULL;
        struct hlist_node *h;
@@ -1504,6 +1507,9 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
                return i40e_add_filter(vsi, macaddr,
                                       le16_to_cpu(vsi->info.pvid));
 
+       if (!i40e_is_vsi_in_vlan(vsi))
+               return i40e_add_filter(vsi, macaddr, I40E_VLAN_ANY);
+
        hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
                if (f->state == I40E_FILTER_REMOVE)
                        continue;
@@ -1516,15 +1522,16 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi,
 }
 
 /**
- * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
+ * i40e_del_mac_filter - Remove a MAC filter from all VLANs
  * @vsi: the VSI to be searched
  * @macaddr: the mac address to be removed
  *
- * Removes a given MAC address from a VSI, regardless of VLAN
+ * Removes a given MAC address from a VSI regardless of what VLAN it has been
+ * associated with.
  *
  * Returns 0 for success, or error
  **/
-int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, const u8 *macaddr)
+int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
 {
        struct i40e_mac_filter *f;
        struct hlist_node *h;
@@ -1585,8 +1592,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
                netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
 
        spin_lock_bh(&vsi->mac_filter_hash_lock);
-       i40e_del_mac_all_vlan(vsi, netdev->dev_addr);
-       i40e_put_mac_in_vlan(vsi, addr->sa_data);
+       i40e_del_mac_filter(vsi, netdev->dev_addr);
+       i40e_add_mac_filter(vsi, addr->sa_data);
        spin_unlock_bh(&vsi->mac_filter_hash_lock);
        ether_addr_copy(netdev->dev_addr, addr->sa_data);
        if (vsi->type == I40E_VSI_MAIN) {
@@ -1762,14 +1769,8 @@ static int i40e_addr_sync(struct net_device *netdev, const u8 *addr)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
-       struct i40e_mac_filter *f;
-
-       if (i40e_is_vsi_in_vlan(vsi))
-               f = i40e_put_mac_in_vlan(vsi, addr);
-       else
-               f = i40e_add_filter(vsi, addr, I40E_VLAN_ANY);
 
-       if (f)
+       if (i40e_add_mac_filter(vsi, addr))
                return 0;
        else
                return -ENOMEM;
@@ -1788,10 +1789,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
 
-       if (i40e_is_vsi_in_vlan(vsi))
-               i40e_del_mac_all_vlan(vsi, addr);
-       else
-               i40e_del_filter(vsi, addr, I40E_VLAN_ANY);
+       i40e_del_mac_filter(vsi, addr);
 
        return 0;
 }
@@ -1829,16 +1827,15 @@ static void i40e_set_rx_mode(struct net_device *netdev)
 }
 
 /**
- * i40e_undo_filter_entries - Undo the changes made to MAC filter entries
+ * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
  * @vsi: Pointer to VSI struct
  * @from: Pointer to list which contains MAC filter entries - changes to
  *        those entries needs to be undone.
  *
- * MAC filter entries from list were slated to be sent to firmware, either for
- * addition or deletion.
+ * MAC filter entries from this list were slated for deletion.
  **/
-static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
-                                    struct hlist_head *from)
+static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
+                                        struct hlist_head *from)
 {
        struct i40e_mac_filter *f;
        struct hlist_node *h;
@@ -1852,6 +1849,53 @@ static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
        }
 }
 
+/**
+ * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
+ * @vsi: Pointer to vsi struct
+ * @from: Pointer to list which contains MAC filter entries - changes to
+ *        those entries needs to be undone.
+ *
+ * MAC filter entries from this list were slated for addition.
+ **/
+static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
+                                        struct hlist_head *from)
+{
+       struct i40e_new_mac_filter *new;
+       struct hlist_node *h;
+
+       hlist_for_each_entry_safe(new, h, from, hlist) {
+               /* We can simply free the wrapper structure */
+               hlist_del(&new->hlist);
+               kfree(new);
+       }
+}
+
+/**
+ * i40e_next_entry - Get the next non-broadcast filter from a list
+ * @next: pointer to filter in list
+ *
+ * Returns the next non-broadcast filter in the list. Required so that we
+ * ignore broadcast filters within the list, since these are not handled via
+ * the normal firmware update path.
+ */
+static
+struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
+{
+       while (next) {
+               next = hlist_entry(next->hlist.next,
+                                  typeof(struct i40e_new_mac_filter),
+                                  hlist);
+
+               /* keep going if we found a broadcast filter */
+               if (next && is_broadcast_ether_addr(next->f->macaddr))
+                       continue;
+
+               break;
+       }
+
+       return next;
+}
+
 /**
  * i40e_update_filter_state - Update filter state based on return data
  * from firmware
@@ -1865,7 +1909,7 @@ static void i40e_undo_filter_entries(struct i40e_vsi *vsi,
 static int
 i40e_update_filter_state(int count,
                         struct i40e_aqc_add_macvlan_element_data *add_list,
-                        struct i40e_mac_filter *add_head)
+                        struct i40e_new_mac_filter *add_head)
 {
        int retval = 0;
        int i;
@@ -1884,9 +1928,9 @@ i40e_update_filter_state(int count,
                        retval++;
                }
 
-               add_head = hlist_entry(add_head->hlist.next,
-                                      typeof(struct i40e_mac_filter),
-                                      hlist);
+               add_head = i40e_next_filter(add_head);
+               if (!add_head)
+                       break;
        }
 
        return retval;
@@ -1943,7 +1987,7 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
 static
 void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
                          struct i40e_aqc_add_macvlan_element_data *list,
-                         struct i40e_mac_filter *add_head,
+                         struct i40e_new_mac_filter *add_head,
                          int num_add, bool *promisc_changed)
 {
        struct i40e_hw *hw = &vsi->back->hw;
@@ -1971,10 +2015,12 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
  * This function sets or clears the promiscuous broadcast flags for VLAN
  * filters in order to properly receive broadcast frames. Assumes that only
  * broadcast filters are passed.
+ *
+ * Returns status indicating success or failure;
  **/
-static
-void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
-                              struct i40e_mac_filter *f)
+static i40e_status
+i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
+                         struct i40e_mac_filter *f)
 {
        bool enable = f->state == I40E_FILTER_NEW;
        struct i40e_hw *hw = &vsi->back->hw;
@@ -1993,15 +2039,13 @@ void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
                                                            NULL);
        }
 
-       if (aq_ret) {
+       if (aq_ret)
                dev_warn(&vsi->back->pdev->dev,
                         "Error %s setting broadcast promiscuous mode on %s\n",
                         i40e_aq_str(hw, hw->aq.asq_last_status),
                         vsi_name);
-               f->state = I40E_FILTER_FAILED;
-       } else if (enable) {
-               f->state = I40E_FILTER_ACTIVE;
-       }
+
+       return aq_ret;
 }
 
 /**
@@ -2015,7 +2059,8 @@ void i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
 int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 {
        struct hlist_head tmp_add_list, tmp_del_list;
-       struct i40e_mac_filter *f, *add_head = NULL;
+       struct i40e_mac_filter *f;
+       struct i40e_new_mac_filter *new, *add_head = NULL;
        struct i40e_hw *hw = &vsi->back->hw;
        unsigned int failed_filters = 0;
        unsigned int vlan_filters = 0;
@@ -2069,8 +2114,17 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                                continue;
                        }
                        if (f->state == I40E_FILTER_NEW) {
-                               hash_del(&f->hlist);
-                               hlist_add_head(&f->hlist, &tmp_add_list);
+                               /* Create a temporary i40e_new_mac_filter */
+                               new = kzalloc(sizeof(*new), GFP_ATOMIC);
+                               if (!new)
+                                       goto err_no_memory_locked;
+
+                               /* Store pointer to the real filter */
+                               new->f = f;
+                               new->state = f->state;
+
+                               /* Add it to the hash list */
+                               hlist_add_head(&new->hlist, &tmp_add_list);
                        }
 
                        /* Count the number of active (current and new) VLAN
@@ -2105,7 +2159,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                        cmd_flags = 0;
 
                        /* handle broadcast filters by updating the broadcast
-                        * promiscuous flag instead of deleting a MAC filter.
+                        * promiscuous flag and release filter list.
                         */
                        if (is_broadcast_ether_addr(f->macaddr)) {
                                i40e_aqc_broadcast_filter(vsi, vsi_name, f);
@@ -2163,36 +2217,37 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                        goto err_no_memory;
 
                num_add = 0;
-               hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) {
+               hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
                        if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
                                     &vsi->state)) {
-                               f->state = I40E_FILTER_FAILED;
+                               new->state = I40E_FILTER_FAILED;
                                continue;
                        }
 
                        /* handle broadcast filters by updating the broadcast
                         * promiscuous flag instead of adding a MAC filter.
                         */
-                       if (is_broadcast_ether_addr(f->macaddr)) {
-                               u64 key = i40e_addr_to_hkey(f->macaddr);
-                               i40e_aqc_broadcast_filter(vsi, vsi_name, f);
-
-                               hlist_del(&f->hlist);
-                               hash_add(vsi->mac_filter_hash, &f->hlist, key);
+                       if (is_broadcast_ether_addr(new->f->macaddr)) {
+                               if (i40e_aqc_broadcast_filter(vsi, vsi_name,
+                                                             new->f))
+                                       new->state = I40E_FILTER_FAILED;
+                               else
+                                       new->state = I40E_FILTER_ACTIVE;
                                continue;
                        }
 
                        /* add to add array */
                        if (num_add == 0)
-                               add_head = f;
+                               add_head = new;
                        cmd_flags = 0;
-                       ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
-                       if (f->vlan == I40E_VLAN_ANY) {
+                       ether_addr_copy(add_list[num_add].mac_addr,
+                                       new->f->macaddr);
+                       if (new->f->vlan == I40E_VLAN_ANY) {
                                add_list[num_add].vlan_tag = 0;
                                cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
                        } else {
                                add_list[num_add].vlan_tag =
-                                       cpu_to_le16((u16)(f->vlan));
+                                       cpu_to_le16((u16)(new->f->vlan));
                        }
                        add_list[num_add].queue_number = 0;
                        /* set invalid match method for later detection */
@@ -2218,11 +2273,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                 * the VSI's list.
                 */
                spin_lock_bh(&vsi->mac_filter_hash_lock);
-               hlist_for_each_entry_safe(f, h, &tmp_add_list, hlist) {
-                       u64 key = i40e_addr_to_hkey(f->macaddr);
-
-                       hlist_del(&f->hlist);
-                       hash_add(vsi->mac_filter_hash, &f->hlist, key);
+               hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
+                       /* Only update the state if we're still NEW */
+                       if (new->f->state == I40E_FILTER_NEW)
+                               new->f->state = new->state;
+                       hlist_del(&new->hlist);
+                       kfree(new);
                }
                spin_unlock_bh(&vsi->mac_filter_hash_lock);
                kfree(add_list);
@@ -2383,8 +2439,8 @@ err_no_memory:
        /* Restore elements on the temporary add and delete lists */
        spin_lock_bh(&vsi->mac_filter_hash_lock);
 err_no_memory_locked:
-       i40e_undo_filter_entries(vsi, &tmp_del_list);
-       i40e_undo_filter_entries(vsi, &tmp_add_list);
+       i40e_undo_del_filter_entries(vsi, &tmp_del_list);
+       i40e_undo_add_filter_entries(vsi, &tmp_add_list);
        spin_unlock_bh(&vsi->mac_filter_hash_lock);
 
        vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
@@ -2574,12 +2630,15 @@ int i40e_add_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
 /**
  * i40e_vsi_add_vlan - Add VSI membership for given VLAN
  * @vsi: the VSI being configured
- * @vid: VLAN id to be added (0 = untagged only , -1 = any)
+ * @vid: VLAN id to be added
  **/
-int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
+int i40e_vsi_add_vlan(struct i40e_vsi *vsi, u16 vid)
 {
        int err;
 
+       if (!vid || vsi->info.pvid)
+               return -EINVAL;
+
        /* Locked once because all functions invoked below iterates list*/
        spin_lock_bh(&vsi->mac_filter_hash_lock);
        err = i40e_add_vlan_all_mac(vsi, vid);
@@ -2622,10 +2681,13 @@ void i40e_rm_vlan_all_mac(struct i40e_vsi *vsi, s16 vid)
 /**
  * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
  * @vsi: the VSI being configured
- * @vid: VLAN id to be removed (0 = untagged only , -1 = any)
+ * @vid: VLAN id to be removed
  **/
-void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
+void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
 {
+       if (!vid || vsi->info.pvid)
+               return;
+
        spin_lock_bh(&vsi->mac_filter_hash_lock);
        i40e_rm_vlan_all_mac(vsi, vid);
        spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -3272,7 +3334,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
                wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
                     q_vector->tx.itr);
                wr32(hw, I40E_PFINT_RATEN(vector - 1),
-                    INTRL_USEC_TO_REG(vsi->int_rate_limit));
+                    i40e_intrl_usec_to_reg(vsi->int_rate_limit));
 
                /* Linked list for the queuepairs assigned to this vector */
                wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
@@ -5276,6 +5338,8 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
        enum i40e_aq_link_speed new_speed;
        char *speed = "Unknown";
        char *fc = "Unknown";
+       char *fec = "";
+       char *an = "";
 
        new_speed = vsi->back->hw.phy.link_info.link_speed;
 
@@ -5335,8 +5399,23 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
                break;
        }
 
-       netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
-                   speed, fc);
+       if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
+               fec = ", FEC: None";
+               an = ", Autoneg: False";
+
+               if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
+                       an = ", Autoneg: True";
+
+               if (vsi->back->hw.phy.link_info.fec_info &
+                   I40E_AQ_CONFIG_FEC_KR_ENA)
+                       fec = ", FEC: CL74 FC-FEC/BASE-R";
+               else if (vsi->back->hw.phy.link_info.fec_info &
+                        I40E_AQ_CONFIG_FEC_RS_ENA)
+                       fec = ", FEC: CL108 RS-FEC";
+       }
+
+       netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s, Flow Control: %s\n",
+                   speed, fec, an, fc);
 }
 
 /**
@@ -8688,7 +8767,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
                                 pf->hw.func_caps.fd_filters_best_effort;
        }
 
-       if (i40e_is_mac_710(&pf->hw) &&
+       if ((pf->hw.mac.type == I40E_MAC_XL710) &&
            (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
            (pf->hw.aq.fw_maj_ver < 4))) {
                pf->flags |= I40E_FLAG_RESTART_AUTONEG;
@@ -8697,13 +8776,13 @@ static int i40e_sw_init(struct i40e_pf *pf)
        }
 
        /* Disable FW LLDP if FW < v4.3 */
-       if (i40e_is_mac_710(&pf->hw) &&
+       if ((pf->hw.mac.type == I40E_MAC_XL710) &&
            (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
            (pf->hw.aq.fw_maj_ver < 4)))
                pf->flags |= I40E_FLAG_STOP_FW_LLDP;
 
        /* Use the FW Set LLDP MIB API if FW > v4.40 */
-       if (i40e_is_mac_710(&pf->hw) &&
+       if ((pf->hw.mac.type == I40E_MAC_XL710) &&
            (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
            (pf->hw.aq.fw_maj_ver >= 5)))
                pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
@@ -9345,7 +9424,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
                 */
                i40e_rm_default_mac_filter(vsi, mac_addr);
                spin_lock_bh(&vsi->mac_filter_hash_lock);
-               i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
+               i40e_add_mac_filter(vsi, mac_addr);
                spin_unlock_bh(&vsi->mac_filter_hash_lock);
        } else {
                /* relate the VSI_VMDQ name to the VSI_MAIN name */
@@ -9354,7 +9433,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
                random_ether_addr(mac_addr);
 
                spin_lock_bh(&vsi->mac_filter_hash_lock);
-               i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY);
+               i40e_add_mac_filter(vsi, mac_addr);
                spin_unlock_bh(&vsi->mac_filter_hash_lock);
        }
 
@@ -9373,7 +9452,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
         */
        eth_broadcast_addr(broadcast);
        spin_lock_bh(&vsi->mac_filter_hash_lock);
-       i40e_add_filter(vsi, broadcast, I40E_VLAN_ANY);
+       i40e_add_mac_filter(vsi, broadcast);
        spin_unlock_bh(&vsi->mac_filter_hash_lock);
 
        ether_addr_copy(netdev->dev_addr, mac_addr);
@@ -10994,6 +11073,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw->subsystem_device_id = pdev->subsystem_device;
        hw->bus.device = PCI_SLOT(pdev->devfn);
        hw->bus.func = PCI_FUNC(pdev->devfn);
+       hw->bus.bus_id = pdev->bus->number;
        pf->instance = pfs_found;
 
        /* set up the locks for the AQ, do this only once in probe
index 5b6feb7edeb167a75fc0c6c93c82e2b7e0c735d8..fea81ed065db8a57a26d89e31b87758b5c069d20 100644 (file)
@@ -55,7 +55,7 @@ struct i40e_dma_mem {
        void *va;
        dma_addr_t pa;
        u32 size;
-} __packed;
+};
 
 #define i40e_allocate_dma_mem(h, m, unused, s, a) \
                        i40e_allocate_dma_mem_d(h, m, s, a)
@@ -64,17 +64,17 @@ struct i40e_dma_mem {
 struct i40e_virt_mem {
        void *va;
        u32 size;
-} __packed;
+};
 
 #define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
 #define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
 
-#define i40e_debug(h, m, s, ...)                                \
-do {                                                            \
-       if (((m) & (h)->debug_mask))                            \
-               pr_info("i40e %02x.%x " s,                      \
-                       (h)->bus.device, (h)->bus.func,         \
-                       ##__VA_ARGS__);                         \
+#define i40e_debug(h, m, s, ...)                               \
+do {                                                           \
+       if (((m) & (h)->debug_mask))                            \
+               pr_info("i40e %02x:%02x.%x " s,                 \
+                       (h)->bus.bus_id, (h)->bus.device,       \
+                       (h)->bus.func, ##__VA_ARGS__);          \
 } while (0)
 
 typedef enum i40e_status_code i40e_status;
index 9e49ffafce28f030ea3108e3d7f60cce36aa73fa..2caee35528fad6151f2c6f1a4eeac4d336596a8f 100644 (file)
@@ -280,7 +280,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
-       int i;
+       unsigned int i, cleared = 0;
 
        /* Since we cannot turn off the Rx timestamp logic if the device is
         * configured for Tx timestamping, we check if Rx timestamping is
@@ -306,14 +306,25 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
                    time_is_before_jiffies(pf->latch_events[i] + HZ)) {
                        rd32(hw, I40E_PRTTSYN_RXTIME_H(i));
                        pf->latch_event_flags &= ~BIT(i);
-                       pf->rx_hwtstamp_cleared++;
-                       dev_warn(&pf->pdev->dev,
-                                "Clearing a missed Rx timestamp event for RXTIME[%d]\n",
-                                i);
+                       cleared++;
                }
        }
 
        spin_unlock_bh(&pf->ptp_rx_lock);
+
+       /* Log a warning if more than 2 timestamps got dropped in the same
+        * check. We don't want to warn about all drops because it can occur
+        * in normal scenarios such as PTP frames on multicast addresses we
+        * aren't listening to. However, administrator should know if this is
+        * the reason packets aren't receiving timestamps.
+        */
+       if (cleared > 2)
+               dev_dbg(&pf->pdev->dev,
+                       "Dropped %d missed RXTIME timestamp events\n",
+                       cleared);
+
+       /* Finally, update the rx_hwtstamp_cleared counter */
+       pf->rx_hwtstamp_cleared += cleared;
 }
 
 /**
index 352cf7cd2ef4bd3fbc81819b5cc3bdc095f52b8d..09f09ea7a5e55b4e5da927f6a78947d9fe5e47d8 100644 (file)
@@ -432,7 +432,12 @@ unsupported_flow:
                ret = -EINVAL;
        }
 
-       /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
+       /* The buffer allocated here will be normally be freed by
+        * i40e_clean_fdir_tx_irq() as it reclaims resources after transmit
+        * completion. In the event of an error adding the buffer to the FDIR
+        * ring, it will immediately be freed. It may also be freed by
+        * i40e_clean_tx_ring() when closing the VSI.
+        */
        return ret;
 }
 
@@ -1013,14 +1018,15 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
        if (!rx_ring->rx_bi)
                return;
 
+       if (rx_ring->skb) {
+               dev_kfree_skb(rx_ring->skb);
+               rx_ring->skb = NULL;
+       }
+
        /* Free all the Rx ring sk_buffs */
        for (i = 0; i < rx_ring->count; i++) {
                struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
 
-               if (rx_bi->skb) {
-                       dev_kfree_skb(rx_bi->skb);
-                       rx_bi->skb = NULL;
-               }
                if (!rx_bi->page)
                        continue;
 
@@ -1424,45 +1430,6 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
        skb_record_rx_queue(skb, rx_ring->queue_index);
 }
 
-/**
- * i40e_pull_tail - i40e specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @skb: pointer to current skb being adjusted
- *
- * This function is an i40e specific version of __pskb_pull_tail.  The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
-{
-       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
-       unsigned char *va;
-       unsigned int pull_len;
-
-       /* it is valid to use page_address instead of kmap since we are
-        * working with pages allocated out of the lomem pool per
-        * alloc_page(GFP_ATOMIC)
-        */
-       va = skb_frag_address(frag);
-
-       /* we need the header to contain the greater of either ETH_HLEN or
-        * 60 bytes if the skb->len is less than 60 for skb_pad.
-        */
-       pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
-
-       /* align pull length to size of long to optimize memcpy performance */
-       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
-       /* update all of the pointers */
-       skb_frag_size_sub(frag, pull_len);
-       frag->page_offset += pull_len;
-       skb->data_len -= pull_len;
-       skb->tail += pull_len;
-}
-
 /**
  * i40e_cleanup_headers - Correct empty headers
  * @rx_ring: rx descriptor ring packet is being transacted on
@@ -1478,10 +1445,6 @@ static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
  **/
 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
 {
-       /* place header in linear portion of buffer */
-       if (skb_is_nonlinear(skb))
-               i40e_pull_tail(rx_ring, skb);
-
        /* if eth_skb_pad returns an error the skb was freed */
        if (eth_skb_pad(skb))
                return true;
@@ -1513,19 +1476,85 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
 }
 
 /**
- * i40e_page_is_reserved - check if reuse is possible
+ * i40e_page_is_reusable - check if any reuse is possible
  * @page: page struct to check
+ *
+ * A page is not reusable if it was allocated under low memory
+ * conditions, or it's not in the same NUMA node as this CPU.
  */
-static inline bool i40e_page_is_reserved(struct page *page)
+static inline bool i40e_page_is_reusable(struct page *page)
 {
-       return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+       return (page_to_nid(page) == numa_mem_id()) &&
+               !page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_can_reuse_rx_page - Determine if this page can be reused by
+ * the adapter for another receive
+ *
+ * @rx_buffer: buffer containing the page
+ * @page: page address from rx_buffer
+ * @truesize: actual size of the buffer in this page
+ *
+ * If page is reusable, rx_buffer->page_offset is adjusted to point to
+ * an unused region in the page.
+ *
+ * For small pages, @truesize will be a constant value, half the size
+ * of the memory at page.  We'll attempt to alternate between high and
+ * low halves of the page, with one half ready for use by the hardware
+ * and the other half being consumed by the stack.  We use the page
+ * ref count to determine whether the stack has finished consuming the
+ * portion of this page that was passed up with a previous packet.  If
+ * the page ref count is >1, we'll assume the "other" half page is
+ * still busy, and this page cannot be reused.
+ *
+ * For larger pages, @truesize will be the actual space used by the
+ * received packet (adjusted upward to an even multiple of the cache
+ * line size).  This will advance through the page by the amount
+ * actually consumed by the received packets while there is still
+ * space for a buffer.  Each region of larger pages will be used at
+ * most once, after which the page will not be reused.
+ *
+ * In either case, if the page is reusable its refcount is increased.
+ **/
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+                                  struct page *page,
+                                  const unsigned int truesize)
+{
+#if (PAGE_SIZE >= 8192)
+       unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
+#endif
+
+       /* Is any reuse possible? */
+       if (unlikely(!i40e_page_is_reusable(page)))
+               return false;
+
+#if (PAGE_SIZE < 8192)
+       /* if we are only owner of page we can reuse it */
+       if (unlikely(page_count(page) != 1))
+               return false;
+
+       /* flip page offset to other buffer */
+       rx_buffer->page_offset ^= truesize;
+#else
+       /* move offset up to the next cache line */
+       rx_buffer->page_offset += truesize;
+
+       if (rx_buffer->page_offset > last_offset)
+               return false;
+#endif
+
+       /* Inc ref count on page before passing it up to the stack */
+       get_page(page);
+
+       return true;
 }
 
 /**
  * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
  * @rx_ring: rx descriptor ring to transact packets on
  * @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
+ * @size: packet length from rx_desc
  * @skb: sk_buff to place the data into
  *
  * This function will add the data contained in rx_buffer->page to the skb.
@@ -1538,30 +1567,29 @@ static inline bool i40e_page_is_reserved(struct page *page)
  **/
 static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
                             struct i40e_rx_buffer *rx_buffer,
-                            union i40e_rx_desc *rx_desc,
+                            unsigned int size,
                             struct sk_buff *skb)
 {
        struct page *page = rx_buffer->page;
-       u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-       unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
-                           I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+       unsigned char *va = page_address(page) + rx_buffer->page_offset;
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = I40E_RXBUFFER_2048;
 #else
        unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
-       unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
 #endif
+       unsigned int pull_len;
+
+       if (unlikely(skb_is_nonlinear(skb)))
+               goto add_tail_frag;
 
        /* will the data fit in the skb we allocated? if so, just
         * copy it as it is pretty small anyway
         */
-       if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
-               unsigned char *va = page_address(page) + rx_buffer->page_offset;
-
+       if (size <= I40E_RX_HDR_SIZE) {
                memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
-               /* page is not reserved, we can reuse buffer as-is */
-               if (likely(!i40e_page_is_reserved(page)))
+               /* page is reusable, we can reuse buffer as-is */
+               if (likely(i40e_page_is_reusable(page)))
                        return true;
 
                /* this page cannot be reused so discard it */
@@ -1569,34 +1597,26 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
                return false;
        }
 
-       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-                       rx_buffer->page_offset, size, truesize);
-
-       /* avoid re-using remote pages */
-       if (unlikely(i40e_page_is_reserved(page)))
-               return false;
-
-#if (PAGE_SIZE < 8192)
-       /* if we are only owner of page we can reuse it */
-       if (unlikely(page_count(page) != 1))
-               return false;
+       /* we need the header to contain the greater of either
+        * ETH_HLEN or 60 bytes if the skb->len is less than
+        * 60 for skb_pad.
+        */
+       pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
 
-       /* flip page offset to other buffer */
-       rx_buffer->page_offset ^= truesize;
-#else
-       /* move offset up to the next cache line */
-       rx_buffer->page_offset += truesize;
+       /* align pull length to size of long to optimize
+        * memcpy performance
+        */
+       memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
 
-       if (rx_buffer->page_offset > last_offset)
-               return false;
-#endif
+       /* update all of the pointers */
+       va += pull_len;
+       size -= pull_len;
 
-       /* Even if we own the page, we are not allowed to use atomic_set()
-        * This would break get_page_unless_zero() users.
-        */
-       get_page(rx_buffer->page);
+add_tail_frag:
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                       (unsigned long)va & ~PAGE_MASK, size, truesize);
 
-       return true;
+       return i40e_can_reuse_rx_page(rx_buffer, page, truesize);
 }
 
 /**
@@ -1611,18 +1631,21 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
  */
 static inline
 struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
-                                    union i40e_rx_desc *rx_desc)
+                                    union i40e_rx_desc *rx_desc,
+                                    struct sk_buff *skb)
 {
+       u64 local_status_error_len =
+               le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       unsigned int size =
+               (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+               I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
        struct i40e_rx_buffer *rx_buffer;
-       struct sk_buff *skb;
        struct page *page;
 
        rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
        page = rx_buffer->page;
        prefetchw(page);
 
-       skb = rx_buffer->skb;
-
        if (likely(!skb)) {
                void *page_addr = page_address(page) + rx_buffer->page_offset;
 
@@ -1646,19 +1669,17 @@ struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
                 * it now to avoid a possible cache miss
                 */
                prefetchw(skb->data);
-       } else {
-               rx_buffer->skb = NULL;
        }
 
        /* we are reusing so sync this buffer for CPU use */
        dma_sync_single_range_for_cpu(rx_ring->dev,
                                      rx_buffer->dma,
                                      rx_buffer->page_offset,
-                                     I40E_RXBUFFER_2048,
+                                     size,
                                      DMA_FROM_DEVICE);
 
        /* pull page into skb */
-       if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+       if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
                /* hand second half of page back to the ring */
                i40e_reuse_rx_page(rx_ring, rx_buffer);
                rx_ring->rx_stats.page_reuse_count++;
@@ -1700,7 +1721,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
 #define staterrlen rx_desc->wb.qword1.status_error_len
        if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
                i40e_clean_programming_status(rx_ring, rx_desc);
-               rx_ring->rx_bi[ntc].skb = skb;
                return true;
        }
        /* if we are the last buffer then there is nothing else to do */
@@ -1708,8 +1728,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
        if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
                return false;
 
-       /* place skb in next buffer to be received */
-       rx_ring->rx_bi[ntc].skb = skb;
        rx_ring->rx_stats.non_eop_descs++;
 
        return true;
@@ -1730,12 +1748,12 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 {
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+       struct sk_buff *skb = rx_ring->skb;
        u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
        bool failure = false;
 
        while (likely(total_rx_packets < budget)) {
                union i40e_rx_desc *rx_desc;
-               struct sk_buff *skb;
                u16 vlan_tag;
                u8 rx_ptype;
                u64 qword;
@@ -1764,7 +1782,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                 */
                dma_rmb();
 
-               skb = i40e_fetch_rx_buffer(rx_ring, rx_desc);
+               skb = i40e_fetch_rx_buffer(rx_ring, rx_desc, skb);
                if (!skb)
                        break;
 
@@ -1783,8 +1801,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                        continue;
                }
 
-               if (i40e_cleanup_headers(rx_ring, skb))
+               if (i40e_cleanup_headers(rx_ring, skb)) {
+                       skb = NULL;
                        continue;
+               }
 
                /* probably a little skewed due to removing CRC */
                total_rx_bytes += skb->len;
@@ -1809,11 +1829,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                           le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
 
                i40e_receive_skb(rx_ring, skb, vlan_tag);
+               skb = NULL;
 
                /* update budget accounting */
                total_rx_packets++;
        }
 
+       rx_ring->skb = skb;
+
        u64_stats_update_begin(&rx_ring->syncp);
        rx_ring->stats.packets += total_rx_packets;
        rx_ring->stats.bytes += total_rx_bytes;
@@ -2251,14 +2274,16 @@ out:
 
 /**
  * i40e_tso - set up the tso context descriptor
- * @skb:      ptr to the skb we're sending
+ * @first:    pointer to first Tx buffer for xmit
  * @hdr_len:  ptr to the size of the packet header
  * @cd_type_cmd_tso_mss: Quad Word 1
  *
  * Returns 0 if no TSO can happen, 1 if tso is going, or error
  **/
-static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
+                   u64 *cd_type_cmd_tso_mss)
 {
+       struct sk_buff *skb = first->skb;
        u64 cd_cmd, cd_tso_len, cd_mss;
        union {
                struct iphdr *v4;
@@ -2271,6 +2296,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
                unsigned char *hdr;
        } l4;
        u32 paylen, l4_offset;
+       u16 gso_segs, gso_size;
        int err;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -2335,10 +2361,18 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
        /* compute length of segmentation header */
        *hdr_len = (l4.tcp->doff * 4) + l4_offset;
 
+       /* pull values out of skb_shinfo */
+       gso_size = skb_shinfo(skb)->gso_size;
+       gso_segs = skb_shinfo(skb)->gso_segs;
+
+       /* update GSO size and bytecount with header size */
+       first->gso_segs = gso_segs;
+       first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
        /* find the field values */
        cd_cmd = I40E_TX_CTX_DESC_TSO;
        cd_tso_len = skb->len - *hdr_len;
-       cd_mss = skb_shinfo(skb)->gso_size;
+       cd_mss = gso_size;
        *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
                                (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
                                (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
@@ -2699,7 +2733,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
        u16 i = tx_ring->next_to_use;
        u32 td_tag = 0;
        dma_addr_t dma;
-       u16 gso_segs;
        u16 desc_count = 1;
 
        if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
@@ -2708,15 +2741,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                         I40E_TX_FLAGS_VLAN_SHIFT;
        }
 
-       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
-               gso_segs = skb_shinfo(skb)->gso_segs;
-       else
-               gso_segs = 1;
-
-       /* multiply data chunks by size of headers */
-       first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
-       first->gso_segs = gso_segs;
-       first->skb = skb;
        first->tx_flags = tx_flags;
 
        dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@@ -2902,8 +2926,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
 
        count = i40e_xmit_descriptor_count(skb);
        if (i40e_chk_linearize(skb, count)) {
-               if (__skb_linearize(skb))
-                       goto out_drop;
+               if (__skb_linearize(skb)) {
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
                count = i40e_txd_use_count(skb->len);
                tx_ring->tx_stats.tx_linearize++;
        }
@@ -2919,6 +2945,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
                return NETDEV_TX_BUSY;
        }
 
+       /* record the location of the first descriptor for this packet */
+       first = &tx_ring->tx_bi[tx_ring->next_to_use];
+       first->skb = skb;
+       first->bytecount = skb->len;
+       first->gso_segs = 1;
+
        /* prepare the xmit flags */
        if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
                goto out_drop;
@@ -2926,16 +2958,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        /* obtain protocol of skb */
        protocol = vlan_get_protocol(skb);
 
-       /* record the location of the first descriptor for this packet */
-       first = &tx_ring->tx_bi[tx_ring->next_to_use];
-
        /* setup IPv4/IPv6 offloads */
        if (protocol == htons(ETH_P_IP))
                tx_flags |= I40E_TX_FLAGS_IPV4;
        else if (protocol == htons(ETH_P_IPV6))
                tx_flags |= I40E_TX_FLAGS_IPV6;
 
-       tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
+       tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
 
        if (tso < 0)
                goto out_drop;
@@ -2973,7 +3002,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        return NETDEV_TX_OK;
 
 out_drop:
-       dev_kfree_skb_any(skb);
+       dev_kfree_skb_any(first->skb);
+       first->skb = NULL;
        return NETDEV_TX_OK;
 }
 
index e065321ce8ed9666748695a01d63d1e9e38c323c..f80979025c0131a07e7b956826f17877b8b7081a 100644 (file)
  */
 #define INTRL_ENA                  BIT(6)
 #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
-#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
+/**
+ * i40e_intrl_usec_to_reg - convert interrupt rate limit to register
+ * @intrl: interrupt rate limit to convert
+ *
+ * This function converts a decimal interrupt rate limit to the appropriate
+ * register format expected by the firmware when setting interrupt rate limit.
+ */
+static inline u16 i40e_intrl_usec_to_reg(int intrl)
+{
+       if (intrl >> 2)
+               return ((intrl >> 2) | INTRL_ENA);
+       else
+               return 0;
+}
 #define I40E_INTRL_8K              125     /* 8000 ints/sec */
 #define I40E_INTRL_62K             16      /* 62500 ints/sec */
 #define I40E_INTRL_83K             12      /* 83333 ints/sec */
@@ -240,7 +253,6 @@ struct i40e_tx_buffer {
 };
 
 struct i40e_rx_buffer {
-       struct sk_buff *skb;
        dma_addr_t dma;
        struct page *page;
        unsigned int page_offset;
@@ -341,6 +353,14 @@ struct i40e_ring {
 
        struct rcu_head rcu;            /* to avoid race on free */
        u16 next_to_alloc;
+       struct sk_buff *skb;            /* When i40e_clean_rx_ring_irq() must
+                                        * return before it sees the EOP for
+                                        * the current packet, we save that skb
+                                        * here and resume receiving this
+                                        * packet the next time
+                                        * i40e_clean_rx_ring_irq() is called
+                                        * for this ring.
+                                        */
 } ____cacheline_internodealigned_in_smp;
 
 enum i40e_latency_range {
index edc0abdf4783bb7a7affcbf5fc4e428873be9d73..939f9fdc8f8573fa35554d6917e1722348d2da29 100644 (file)
@@ -125,7 +125,6 @@ enum i40e_debug_mask {
  */
 enum i40e_mac_type {
        I40E_MAC_UNKNOWN = 0,
-       I40E_MAC_X710,
        I40E_MAC_XL710,
        I40E_MAC_VF,
        I40E_MAC_X722,
@@ -185,6 +184,7 @@ struct i40e_link_status {
        enum i40e_aq_link_speed link_speed;
        u8 link_info;
        u8 an_info;
+       u8 fec_info;
        u8 ext_info;
        u8 loopback;
        /* is Link Status Event notification to SW enabled */
@@ -470,6 +470,7 @@ struct i40e_bus_info {
        u16 func;
        u16 device;
        u16 lan_id;
+       u16 bus_id;
 };
 
 /* Flow control (FC) parameters */
index a6198b727e243bbb8ec4584638856632bcba3aec..cbbf8648307a1a4f14d77b39d469484b0913d99d 100644 (file)
@@ -689,17 +689,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
 
                spin_lock_bh(&vsi->mac_filter_hash_lock);
                if (is_valid_ether_addr(vf->default_lan_addr.addr)) {
-                       f = i40e_add_filter(vsi, vf->default_lan_addr.addr,
-                                      vf->port_vlan_id ?
-                                      vf->port_vlan_id : -1);
+                       f = i40e_add_mac_filter(vsi,
+                                               vf->default_lan_addr.addr);
                        if (!f)
                                dev_info(&pf->pdev->dev,
                                         "Could not add MAC filter %pM for VF %d\n",
                                        vf->default_lan_addr.addr, vf->vf_id);
                }
                eth_broadcast_addr(broadcast);
-               f = i40e_add_filter(vsi, broadcast,
-                                   vf->port_vlan_id ? vf->port_vlan_id : -1);
+               f = i40e_add_mac_filter(vsi, broadcast);
                if (!f)
                        dev_info(&pf->pdev->dev,
                                 "Could not allocate VF broadcast filter\n");
@@ -1942,12 +1940,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
                struct i40e_mac_filter *f;
 
                f = i40e_find_mac(vsi, al->list[i].addr);
-               if (!f) {
-                       if (i40e_is_vsi_in_vlan(vsi))
-                               f = i40e_put_mac_in_vlan(vsi, al->list[i].addr);
-                       else
-                               f = i40e_add_filter(vsi, al->list[i].addr, -1);
-               }
+               if (!f)
+                       f = i40e_add_mac_filter(vsi, al->list[i].addr);
 
                if (!f) {
                        dev_err(&pf->pdev->dev,
@@ -2012,7 +2006,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
        spin_lock_bh(&vsi->mac_filter_hash_lock);
        /* delete addresses from the list */
        for (i = 0; i < al->num_elements; i++)
-               if (i40e_del_mac_all_vlan(vsi, al->list[i].addr)) {
+               if (i40e_del_mac_filter(vsi, al->list[i].addr)) {
                        ret = I40E_ERR_INVALID_MAC_ADDR;
                        spin_unlock_bh(&vsi->mac_filter_hash_lock);
                        goto error_param;
@@ -2722,14 +2716,13 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
 
        /* delete the temporary mac address */
        if (!is_zero_ether_addr(vf->default_lan_addr.addr))
-               i40e_del_filter(vsi, vf->default_lan_addr.addr,
-                               vf->port_vlan_id ? vf->port_vlan_id : -1);
+               i40e_del_mac_filter(vsi, vf->default_lan_addr.addr);
 
        /* Delete all the filters for this VSI - we're going to kill it
         * anyway.
         */
        hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
-               i40e_del_filter(vsi, f->macaddr, f->vlan);
+               __i40e_del_filter(vsi, f);
 
        spin_unlock_bh(&vsi->mac_filter_hash_lock);
 
index aa63b7fb993daef8c94579bf1a0821840e39b4b1..b5a59dd72a0c88760ce0cfaa816de8f8778cbc5c 100644 (file)
@@ -64,7 +64,6 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
                        hw->mac.type = I40E_MAC_X722;
                        break;
                case I40E_DEV_ID_X722_VF:
-               case I40E_DEV_ID_X722_VF_HV:
                        hw->mac.type = I40E_MAC_X722_VF;
                        break;
                case I40E_DEV_ID_VF:
index 21dcaee1ad1d13f466ec075e77604a13026b146c..d76393c9505606a815d54833ec9f11dc2f64576f 100644 (file)
@@ -48,7 +48,6 @@
 #define I40E_DEV_ID_10G_BASE_T_X722    0x37D2
 #define I40E_DEV_ID_SFP_I_X722         0x37D3
 #define I40E_DEV_ID_X722_VF            0x37CD
-#define I40E_DEV_ID_X722_VF_HV         0x37D9
 
 #define i40e_is_40G_device(d)          ((d) == I40E_DEV_ID_QSFP_A  || \
                                         (d) == I40E_DEV_ID_QSFP_B  || \
index df67ef37b7f3c8c7f1e94fdb8ffe492a406b5049..b758846d4dc5d29858799beea8de3b8f0eec5f66 100644 (file)
@@ -501,14 +501,15 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
        if (!rx_ring->rx_bi)
                return;
 
+       if (rx_ring->skb) {
+               dev_kfree_skb(rx_ring->skb);
+               rx_ring->skb = NULL;
+       }
+
        /* Free all the Rx ring sk_buffs */
        for (i = 0; i < rx_ring->count; i++) {
                struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
 
-               if (rx_bi->skb) {
-                       dev_kfree_skb(rx_bi->skb);
-                       rx_bi->skb = NULL;
-               }
                if (!rx_bi->page)
                        continue;
 
@@ -902,45 +903,6 @@ void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
        skb_record_rx_queue(skb, rx_ring->queue_index);
 }
 
-/**
- * i40e_pull_tail - i40e specific version of skb_pull_tail
- * @rx_ring: rx descriptor ring packet is being transacted on
- * @skb: pointer to current skb being adjusted
- *
- * This function is an i40e specific version of __pskb_pull_tail.  The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
- */
-static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
-{
-       struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
-       unsigned char *va;
-       unsigned int pull_len;
-
-       /* it is valid to use page_address instead of kmap since we are
-        * working with pages allocated out of the lomem pool per
-        * alloc_page(GFP_ATOMIC)
-        */
-       va = skb_frag_address(frag);
-
-       /* we need the header to contain the greater of either ETH_HLEN or
-        * 60 bytes if the skb->len is less than 60 for skb_pad.
-        */
-       pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
-
-       /* align pull length to size of long to optimize memcpy performance */
-       skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
-
-       /* update all of the pointers */
-       skb_frag_size_sub(frag, pull_len);
-       frag->page_offset += pull_len;
-       skb->data_len -= pull_len;
-       skb->tail += pull_len;
-}
-
 /**
  * i40e_cleanup_headers - Correct empty headers
  * @rx_ring: rx descriptor ring packet is being transacted on
@@ -956,10 +918,6 @@ static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
  **/
 static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
 {
-       /* place header in linear portion of buffer */
-       if (skb_is_nonlinear(skb))
-               i40e_pull_tail(rx_ring, skb);
-
        /* if eth_skb_pad returns an error the skb was freed */
        if (eth_skb_pad(skb))
                return true;
@@ -991,19 +949,85 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
 }
 
 /**
- * i40e_page_is_reserved - check if reuse is possible
+ * i40e_page_is_reusable - check if any reuse is possible
  * @page: page struct to check
+ *
+ * A page is not reusable if it was allocated under low memory
+ * conditions, or it's not in the same NUMA node as this CPU.
  */
-static inline bool i40e_page_is_reserved(struct page *page)
+static inline bool i40e_page_is_reusable(struct page *page)
 {
-       return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+       return (page_to_nid(page) == numa_mem_id()) &&
+               !page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_can_reuse_rx_page - Determine if this page can be reused by
+ * the adapter for another receive
+ *
+ * @rx_buffer: buffer containing the page
+ * @page: page address from rx_buffer
+ * @truesize: actual size of the buffer in this page
+ *
+ * If page is reusable, rx_buffer->page_offset is adjusted to point to
+ * an unused region in the page.
+ *
+ * For small pages, @truesize will be a constant value, half the size
+ * of the memory at page.  We'll attempt to alternate between high and
+ * low halves of the page, with one half ready for use by the hardware
+ * and the other half being consumed by the stack.  We use the page
+ * ref count to determine whether the stack has finished consuming the
+ * portion of this page that was passed up with a previous packet.  If
+ * the page ref count is >1, we'll assume the "other" half page is
+ * still busy, and this page cannot be reused.
+ *
+ * For larger pages, @truesize will be the actual space used by the
+ * received packet (adjusted upward to an even multiple of the cache
+ * line size).  This will advance through the page by the amount
+ * actually consumed by the received packets while there is still
+ * space for a buffer.  Each region of larger pages will be used at
+ * most once, after which the page will not be reused.
+ *
+ * In either case, if the page is reusable its refcount is increased.
+ **/
+static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
+                                  struct page *page,
+                                  const unsigned int truesize)
+{
+#if (PAGE_SIZE >= 8192)
+       unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
+#endif
+
+       /* Is any reuse possible? */
+       if (unlikely(!i40e_page_is_reusable(page)))
+               return false;
+
+#if (PAGE_SIZE < 8192)
+       /* if we are only owner of page we can reuse it */
+       if (unlikely(page_count(page) != 1))
+               return false;
+
+       /* flip page offset to other buffer */
+       rx_buffer->page_offset ^= truesize;
+#else
+       /* move offset up to the next cache line */
+       rx_buffer->page_offset += truesize;
+
+       if (rx_buffer->page_offset > last_offset)
+               return false;
+#endif
+
+       /* Inc ref count on page before passing it up to the stack */
+       get_page(page);
+
+       return true;
 }
 
 /**
  * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
  * @rx_ring: rx descriptor ring to transact packets on
  * @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
+ * @size: packet length from rx_desc
  * @skb: sk_buff to place the data into
  *
  * This function will add the data contained in rx_buffer->page to the skb.
@@ -1016,30 +1040,29 @@ static inline bool i40e_page_is_reserved(struct page *page)
  **/
 static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
                             struct i40e_rx_buffer *rx_buffer,
-                            union i40e_rx_desc *rx_desc,
+                            unsigned int size,
                             struct sk_buff *skb)
 {
        struct page *page = rx_buffer->page;
-       u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
-       unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
-                           I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+       unsigned char *va = page_address(page) + rx_buffer->page_offset;
 #if (PAGE_SIZE < 8192)
        unsigned int truesize = I40E_RXBUFFER_2048;
 #else
        unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
-       unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
 #endif
+       unsigned int pull_len;
+
+       if (unlikely(skb_is_nonlinear(skb)))
+               goto add_tail_frag;
 
        /* will the data fit in the skb we allocated? if so, just
         * copy it as it is pretty small anyway
         */
-       if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
-               unsigned char *va = page_address(page) + rx_buffer->page_offset;
-
+       if (size <= I40E_RX_HDR_SIZE) {
                memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
-               /* page is not reserved, we can reuse buffer as-is */
-               if (likely(!i40e_page_is_reserved(page)))
+               /* page is reusable, we can reuse buffer as-is */
+               if (likely(i40e_page_is_reusable(page)))
                        return true;
 
                /* this page cannot be reused so discard it */
@@ -1047,34 +1070,26 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
                return false;
        }
 
-       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
-                       rx_buffer->page_offset, size, truesize);
-
-       /* avoid re-using remote pages */
-       if (unlikely(i40e_page_is_reserved(page)))
-               return false;
-
-#if (PAGE_SIZE < 8192)
-       /* if we are only owner of page we can reuse it */
-       if (unlikely(page_count(page) != 1))
-               return false;
+       /* we need the header to contain the greater of either
+        * ETH_HLEN or 60 bytes if the skb->len is less than
+        * 60 for skb_pad.
+        */
+       pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
 
-       /* flip page offset to other buffer */
-       rx_buffer->page_offset ^= truesize;
-#else
-       /* move offset up to the next cache line */
-       rx_buffer->page_offset += truesize;
+       /* align pull length to size of long to optimize
+        * memcpy performance
+        */
+       memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
 
-       if (rx_buffer->page_offset > last_offset)
-               return false;
-#endif
+       /* update all of the pointers */
+       va += pull_len;
+       size -= pull_len;
 
-       /* Even if we own the page, we are not allowed to use atomic_set()
-        * This would break get_page_unless_zero() users.
-        */
-       get_page(rx_buffer->page);
+add_tail_frag:
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+                       (unsigned long)va & ~PAGE_MASK, size, truesize);
 
-       return true;
+       return i40e_can_reuse_rx_page(rx_buffer, page, truesize);
 }
 
 /**
@@ -1089,18 +1104,21 @@ static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
  */
 static inline
 struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
-                                      union i40e_rx_desc *rx_desc)
+                                      union i40e_rx_desc *rx_desc,
+                                      struct sk_buff *skb)
 {
+       u64 local_status_error_len =
+               le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       unsigned int size =
+               (local_status_error_len & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+               I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
        struct i40e_rx_buffer *rx_buffer;
-       struct sk_buff *skb;
        struct page *page;
 
        rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
        page = rx_buffer->page;
        prefetchw(page);
 
-       skb = rx_buffer->skb;
-
        if (likely(!skb)) {
                void *page_addr = page_address(page) + rx_buffer->page_offset;
 
@@ -1124,19 +1142,17 @@ struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
                 * it now to avoid a possible cache miss
                 */
                prefetchw(skb->data);
-       } else {
-               rx_buffer->skb = NULL;
        }
 
        /* we are reusing so sync this buffer for CPU use */
        dma_sync_single_range_for_cpu(rx_ring->dev,
                                      rx_buffer->dma,
                                      rx_buffer->page_offset,
-                                     I40E_RXBUFFER_2048,
+                                     size,
                                      DMA_FROM_DEVICE);
 
        /* pull page into skb */
-       if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+       if (i40e_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
                /* hand second half of page back to the ring */
                i40e_reuse_rx_page(rx_ring, rx_buffer);
                rx_ring->rx_stats.page_reuse_count++;
@@ -1180,8 +1196,6 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
        if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
                return false;
 
-       /* place skb in next buffer to be received */
-       rx_ring->rx_bi[ntc].skb = skb;
        rx_ring->rx_stats.non_eop_descs++;
 
        return true;
@@ -1202,12 +1216,12 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
 static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
 {
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+       struct sk_buff *skb = rx_ring->skb;
        u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
        bool failure = false;
 
        while (likely(total_rx_packets < budget)) {
                union i40e_rx_desc *rx_desc;
-               struct sk_buff *skb;
                u16 vlan_tag;
                u8 rx_ptype;
                u64 qword;
@@ -1236,7 +1250,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                 */
                dma_rmb();
 
-               skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc);
+               skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc, skb);
                if (!skb)
                        break;
 
@@ -1255,8 +1269,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                        continue;
                }
 
-               if (i40e_cleanup_headers(rx_ring, skb))
+               if (i40e_cleanup_headers(rx_ring, skb)) {
+                       skb = NULL;
                        continue;
+               }
 
                /* probably a little skewed due to removing CRC */
                total_rx_bytes += skb->len;
@@ -1273,11 +1289,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                           le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
 
                i40e_receive_skb(rx_ring, skb, vlan_tag);
+               skb = NULL;
 
                /* update budget accounting */
                total_rx_packets++;
        }
 
+       rx_ring->skb = skb;
+
        u64_stats_update_begin(&rx_ring->syncp);
        rx_ring->stats.packets += total_rx_packets;
        rx_ring->stats.bytes += total_rx_bytes;
@@ -1549,14 +1568,16 @@ out:
 
 /**
  * i40e_tso - set up the tso context descriptor
- * @skb:      ptr to the skb we're sending
+ * @first:    pointer to first Tx buffer for xmit
  * @hdr_len:  ptr to the size of the packet header
  * @cd_type_cmd_tso_mss: Quad Word 1
  *
  * Returns 0 if no TSO can happen, 1 if tso is going, or error
  **/
-static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
+                   u64 *cd_type_cmd_tso_mss)
 {
+       struct sk_buff *skb = first->skb;
        u64 cd_cmd, cd_tso_len, cd_mss;
        union {
                struct iphdr *v4;
@@ -1569,6 +1590,7 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
                unsigned char *hdr;
        } l4;
        u32 paylen, l4_offset;
+       u16 gso_segs, gso_size;
        int err;
 
        if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1633,10 +1655,18 @@ static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
        /* compute length of segmentation header */
        *hdr_len = (l4.tcp->doff * 4) + l4_offset;
 
+       /* pull values out of skb_shinfo */
+       gso_size = skb_shinfo(skb)->gso_size;
+       gso_segs = skb_shinfo(skb)->gso_segs;
+
+       /* update GSO size and bytecount with header size */
+       first->gso_segs = gso_segs;
+       first->bytecount += (first->gso_segs - 1) * *hdr_len;
+
        /* find the field values */
        cd_cmd = I40E_TX_CTX_DESC_TSO;
        cd_tso_len = skb->len - *hdr_len;
-       cd_mss = skb_shinfo(skb)->gso_size;
+       cd_mss = gso_size;
        *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
                                (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
                                (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT);
@@ -1949,7 +1979,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
        u16 i = tx_ring->next_to_use;
        u32 td_tag = 0;
        dma_addr_t dma;
-       u16 gso_segs;
        u16 desc_count = 1;
 
        if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
@@ -1958,15 +1987,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                         I40E_TX_FLAGS_VLAN_SHIFT;
        }
 
-       if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO))
-               gso_segs = skb_shinfo(skb)->gso_segs;
-       else
-               gso_segs = 1;
-
-       /* multiply data chunks by size of headers */
-       first->bytecount = skb->len - hdr_len + (gso_segs * hdr_len);
-       first->gso_segs = gso_segs;
-       first->skb = skb;
        first->tx_flags = tx_flags;
 
        dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@@ -2151,8 +2171,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
 
        count = i40e_xmit_descriptor_count(skb);
        if (i40e_chk_linearize(skb, count)) {
-               if (__skb_linearize(skb))
-                       goto out_drop;
+               if (__skb_linearize(skb)) {
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
                count = i40e_txd_use_count(skb->len);
                tx_ring->tx_stats.tx_linearize++;
        }
@@ -2168,6 +2190,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
                return NETDEV_TX_BUSY;
        }
 
+       /* record the location of the first descriptor for this packet */
+       first = &tx_ring->tx_bi[tx_ring->next_to_use];
+       first->skb = skb;
+       first->bytecount = skb->len;
+       first->gso_segs = 1;
+
        /* prepare the xmit flags */
        if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
                goto out_drop;
@@ -2175,16 +2203,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        /* obtain protocol of skb */
        protocol = vlan_get_protocol(skb);
 
-       /* record the location of the first descriptor for this packet */
-       first = &tx_ring->tx_bi[tx_ring->next_to_use];
-
        /* setup IPv4/IPv6 offloads */
        if (protocol == htons(ETH_P_IP))
                tx_flags |= I40E_TX_FLAGS_IPV4;
        else if (protocol == htons(ETH_P_IPV6))
                tx_flags |= I40E_TX_FLAGS_IPV6;
 
-       tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
+       tso = i40e_tso(first, &hdr_len, &cd_type_cmd_tso_mss);
 
        if (tso < 0)
                goto out_drop;
@@ -2211,7 +2236,8 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        return NETDEV_TX_OK;
 
 out_drop:
-       dev_kfree_skb_any(skb);
+       dev_kfree_skb_any(first->skb);
+       first->skb = NULL;
        return NETDEV_TX_OK;
 }
 
index a5fc789f78eba64c26a2ec322a94914ad339d41a..8274ba68bd32a6583538f7af6fd80417dc25e6ec 100644 (file)
@@ -239,7 +239,6 @@ struct i40e_tx_buffer {
 };
 
 struct i40e_rx_buffer {
-       struct sk_buff *skb;
        dma_addr_t dma;
        struct page *page;
        unsigned int page_offset;
@@ -340,6 +339,14 @@ struct i40e_ring {
 
        struct rcu_head rcu;            /* to avoid race on free */
        u16 next_to_alloc;
+       struct sk_buff *skb;            /* When i40evf_clean_rx_ring_irq() must
+                                        * return before it sees the EOP for
+                                        * the current packet, we save that skb
+                                        * here and resume receiving this
+                                        * packet the next time
+                                        * i40evf_clean_rx_ring_irq() is called
+                                        * for this ring.
+                                        */
 } ____cacheline_internodealigned_in_smp;
 
 enum i40e_latency_range {
index c85e8a31c0724d81a9c2cd634c53edec2425dfae..16bb88084bb977f900e91dcbce154e0e6ef447c8 100644 (file)
@@ -100,7 +100,6 @@ enum i40e_debug_mask {
  */
 enum i40e_mac_type {
        I40E_MAC_UNKNOWN = 0,
-       I40E_MAC_X710,
        I40E_MAC_XL710,
        I40E_MAC_VF,
        I40E_MAC_X722,
@@ -159,6 +158,7 @@ struct i40e_link_status {
        enum i40e_aq_link_speed link_speed;
        u8 link_info;
        u8 an_info;
+       u8 fec_info;
        u8 ext_info;
        u8 loopback;
        /* is Link Status Event notification to SW enabled */
@@ -443,6 +443,7 @@ struct i40e_bus_info {
        u16 func;
        u16 device;
        u16 lan_id;
+       u16 bus_id;
 };
 
 /* Flow control (FC) parameters */
index fc374f833aa9a930f2d119c306dd2547209884af..d38a2b2aea2b20d2b0bbf9700c90578ba9fc0197 100644 (file)
@@ -81,6 +81,7 @@ enum i40e_virtchnl_ops {
        I40E_VIRTCHNL_OP_GET_STATS = 15,
        I40E_VIRTCHNL_OP_FCOE = 16,
        I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+       I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
        I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
        I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
        I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
index fffe4cf2c20b302df75a830877719dc915254687..00c42d80327668ef3cc8c614570c44ae58738791 100644 (file)
@@ -195,6 +195,7 @@ struct i40evf_adapter {
        u64 hw_csum_rx_error;
        u32 rx_desc_count;
        int num_msix_vectors;
+       u32 client_pending;
        struct msix_entry *msix_entries;
 
        u32 flags;
index c0fc533618005de2092ada53f236d58e2b9f2c23..920c1cb06a92fa8e29696deb863f73e79a82da12 100644 (file)
@@ -38,7 +38,7 @@ static const char i40evf_driver_string[] =
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 25
+#define DRV_VERSION_BUILD 27
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD) \
@@ -59,7 +59,6 @@ static const struct pci_device_id i40evf_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
-       {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF_HV), 0},
        /* required last entry */
        {0, }
 };
@@ -2727,6 +2726,7 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw->subsystem_device_id = pdev->subsystem_device;
        hw->bus.device = PCI_SLOT(pdev->devfn);
        hw->bus.func = PCI_FUNC(pdev->devfn);
+       hw->bus.bus_id = pdev->bus->number;
 
        /* set up the locks for the AQ, do this only once in probe
         * and destroy them only once in remove
index 2059a8e889082531eae906db2c9c1f6b75b6a32c..bee58af390e1c633006ddc38efddf256a307161a 100644 (file)
@@ -999,6 +999,10 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
                if (v_opcode != adapter->current_op)
                        return;
                break;
+       case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+               adapter->client_pending &=
+                               ~(BIT(I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
+               break;
        case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
                struct i40e_virtchnl_rss_hena *vrh =
                        (struct i40e_virtchnl_rss_hena *)msg;
index a61447fd778eb579ef0b31d1a0d4c33a9e5386ee..ee443985581fe2be3c60bb0bc23f7cfb40ac652e 100644 (file)
@@ -245,6 +245,17 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
        hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
                        E1000_STATUS_FUNC_SHIFT;
 
+       /* Make sure the PHY is in a good state. Several people have reported
+        * firmware leaving the PHY's page select register set to something
+        * other than the default of zero, which causes the PHY ID read to
+        * access something other than the intended register.
+        */
+       ret_val = hw->phy.ops.reset(hw);
+       if (ret_val) {
+               hw_dbg("Error resetting the PHY.\n");
+               goto out;
+       }
+
        /* Set phy->phy_addr and phy->id. */
        ret_val = igb_get_phy_id_82575(hw);
        if (ret_val)
index 8aa798737d4d392f4e240948755daa0fe5decca2..07d48f2e33699e45ea385e148dd4771a4ac04e1f 100644 (file)
@@ -699,9 +699,9 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw)
 
        ret_val = igb_pool_flash_update_done_i210(hw);
        if (ret_val)
-               hw_dbg("Flash update complete\n");
-       else
                hw_dbg("Flash update time out\n");
+       else
+               hw_dbg("Flash update complete\n");
 
 out:
        return ret_val;
index 5010e2232c504dda296b7d19cc5c5f9931f08202..5eff82678f0ba41f7e171f2101b856a594b2264b 100644 (file)
@@ -792,15 +792,13 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
         * control setting, then the variable hw->fc will
         * be initialized based on a value in the EEPROM.
         */
-       if (hw->mac.type == e1000_i350) {
+       if (hw->mac.type == e1000_i350)
                lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
-               ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
-                                          + lan_offset, 1, &nvm_data);
-        } else {
-               ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
-                                          1, &nvm_data);
-        }
+       else
+               lan_offset = 0;
 
+       ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset,
+                                  1, &nvm_data);
        if (ret_val) {
                hw_dbg("NVM Read Error\n");
                goto out;
@@ -808,8 +806,7 @@ static s32 igb_set_default_fc(struct e1000_hw *hw)
 
        if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
                hw->fc.requested_mode = e1000_fc_none;
-       else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
-                NVM_WORD0F_ASM_DIR)
+       else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
                hw->fc.requested_mode = e1000_fc_tx_pause;
        else
                hw->fc.requested_mode = e1000_fc_full;
index 5b54254aed4f35e4529b7c33df8442ef32d8b0dc..2788a5409023ef41ad9ba62620acbc398b96d490 100644 (file)
@@ -77,6 +77,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw)
        s32 ret_val = 0;
        u16 phy_id;
 
+       /* ensure PHY page selection to fix misconfigured i210 */
+       if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+               phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0);
+
        ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
        if (ret_val)
                goto out;
index d84afdd83e539878a9f056d027b0633a1f0e7fe0..58adbf234e07058b0705d847849243fca695609f 100644 (file)
 #define E1000_VT_CTL   0x0581C  /* VMDq Control - RW */
 #define E1000_WUC      0x05800  /* Wakeup Control - RW */
 #define E1000_WUFC     0x05808  /* Wakeup Filter Control - RW */
-#define E1000_WUS      0x05810  /* Wakeup Status - RO */
+#define E1000_WUS      0x05810  /* Wakeup Status - R/W1C */
 #define E1000_MANC     0x05820  /* Management Control - RW */
 #define E1000_IPAV     0x05838  /* IP Address Valid - RW */
 #define E1000_WUPL     0x05900  /* Wakeup Packet Length - RW */
index 1515abaa5ac9cab53ef4aab0ee05104c5bd2a61f..be456bae816906e24338006a8b3597b539f86959 100644 (file)
@@ -137,8 +137,8 @@ static void igb_update_phy_info(unsigned long);
 static void igb_watchdog(unsigned long);
 static void igb_watchdog_task(struct work_struct *);
 static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
-                                         struct rtnl_link_stats64 *stats);
+static void igb_get_stats64(struct net_device *dev,
+                           struct rtnl_link_stats64 *stats);
 static int igb_change_mtu(struct net_device *, int);
 static int igb_set_mac(struct net_device *, void *);
 static void igb_set_uta(struct igb_adapter *adapter, bool set);
@@ -383,9 +383,9 @@ static void igb_dump(struct igb_adapter *adapter)
        /* Print netdevice Info */
        if (netdev) {
                dev_info(&adapter->pdev->dev, "Net device Info\n");
-               pr_info("Device Name     state            trans_start      last_rx\n");
-               pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
-                       netdev->state, dev_trans_start(netdev), netdev->last_rx);
+               pr_info("Device Name     state            trans_start\n");
+               pr_info("%-15s %016lX %016lX\n", netdev->name,
+                       netdev->state, dev_trans_start(netdev));
        }
 
        /* Print Registers */
@@ -3275,7 +3275,9 @@ static int __igb_close(struct net_device *netdev, bool suspending)
 
 int igb_close(struct net_device *netdev)
 {
-       return __igb_close(netdev, false);
+       if (netif_device_present(netdev))
+               return __igb_close(netdev, false);
+       return 0;
 }
 
 /**
@@ -3394,7 +3396,7 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
             tdba & 0x00000000ffffffffULL);
        wr32(E1000_TDBAH(reg_idx), tdba >> 32);
 
-       ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
+       ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
        wr32(E1000_TDH(reg_idx), 0);
        writel(0, ring->tail);
 
@@ -3733,7 +3735,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
             ring->count * sizeof(union e1000_adv_rx_desc));
 
        /* initialize head and tail */
-       ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
+       ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
        wr32(E1000_RDH(reg_idx), 0);
        writel(0, ring->tail);
 
@@ -5402,8 +5404,8 @@ static void igb_reset_task(struct work_struct *work)
  *  @netdev: network interface device structure
  *  @stats: rtnl_link_stats64 pointer
  **/
-static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
-                                               struct rtnl_link_stats64 *stats)
+static void igb_get_stats64(struct net_device *netdev,
+                           struct rtnl_link_stats64 *stats)
 {
        struct igb_adapter *adapter = netdev_priv(netdev);
 
@@ -5411,8 +5413,6 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
        igb_update_stats(adapter, &adapter->stats64);
        memcpy(stats, &adapter->stats64, sizeof(*stats));
        spin_unlock(&adapter->stats64_lock);
-
-       return stats;
 }
 
 /**
@@ -7564,6 +7564,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
        int retval = 0;
 #endif
 
+       rtnl_lock();
        netif_device_detach(netdev);
 
        if (netif_running(netdev))
@@ -7572,6 +7573,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
        igb_ptp_suspend(adapter);
 
        igb_clear_interrupt_scheme(adapter);
+       rtnl_unlock();
 
 #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
@@ -7690,16 +7692,15 @@ static int igb_resume(struct device *dev)
 
        wr32(E1000_WUS, ~0);
 
-       if (netdev->flags & IFF_UP) {
-               rtnl_lock();
+       rtnl_lock();
+       if (!err && netif_running(netdev))
                err = __igb_open(netdev, true);
-               rtnl_unlock();
-               if (err)
-                       return err;
-       }
 
-       netif_device_attach(netdev);
-       return 0;
+       if (!err)
+               netif_device_attach(netdev);
+       rtnl_unlock();
+
+       return err;
 }
 
 static int igb_runtime_idle(struct device *dev)
@@ -7898,6 +7899,11 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
                pci_enable_wake(pdev, PCI_D3hot, 0);
                pci_enable_wake(pdev, PCI_D3cold, 0);
 
+               /* In case of PCI error, adapter lose its HW address
+                * so we should re-assign it here.
+                */
+               hw->hw_addr = adapter->io_addr;
+
                igb_reset(adapter);
                wr32(E1000_WUS, ~0);
                result = PCI_ERS_RESULT_RECOVERED;
index 5826b1ddedcfe1560de83fd5b24a6e36a4ced323..fbd220d137b34ed78e059ab4c1473d1129ef43a1 100644 (file)
@@ -1817,7 +1817,7 @@ ixgb_clean(struct napi_struct *napi, int budget)
 
        /* If budget not fully consumed, exit the polling mode */
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                if (!test_bit(__IXGB_DOWN, &adapter->flags))
                        ixgb_irq_enable(adapter);
        }
index ef81c3d8c2952fa305390232bf9e93a901f402f2..a2cc43d28888012dbed53714c35ab110e6c3a9e3 100644 (file)
@@ -55,9 +55,6 @@
 
 #include <net/busy_poll.h>
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#define BP_EXTENDED_STATS
-#endif
 /* common prefix used by pr_<> macros */
 #undef pr_fmt
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #define IXGBE_RXBUFFER_4K    4096
 #define IXGBE_MAX_RXBUFFER  16384  /* largest size for a single descriptor */
 
+#define IXGBE_SKB_PAD          (NET_SKB_PAD + NET_IP_ALIGN)
+#if (PAGE_SIZE < 8192)
+#define IXGBE_MAX_FRAME_BUILD_SKB \
+       (SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD)
+#else
+#define IGB_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K
+#endif
+
 /*
  * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
  * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define IXGBE_RX_BUFFER_WRITE  16      /* Must be power of 2 */
 
+#define IXGBE_RX_DMA_ATTR \
+       (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
 enum ixgbe_tx_flags {
        /* cmd_type flags */
        IXGBE_TX_FLAGS_HW_VLAN  = 0x01,
@@ -159,6 +167,7 @@ enum ixgbevf_xcast_modes {
        IXGBEVF_XCAST_MODE_NONE = 0,
        IXGBEVF_XCAST_MODE_MULTI,
        IXGBEVF_XCAST_MODE_ALLMULTI,
+       IXGBEVF_XCAST_MODE_PROMISC,
 };
 
 struct vf_macvlans {
@@ -194,17 +203,17 @@ struct ixgbe_rx_buffer {
        struct sk_buff *skb;
        dma_addr_t dma;
        struct page *page;
-       unsigned int page_offset;
+#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
+       __u32 page_offset;
+#else
+       __u16 page_offset;
+#endif
+       __u16 pagecnt_bias;
 };
 
 struct ixgbe_queue_stats {
        u64 packets;
        u64 bytes;
-#ifdef BP_EXTENDED_STATS
-       u64 yields;
-       u64 misses;
-       u64 cleaned;
-#endif  /* BP_EXTENDED_STATS */
 };
 
 struct ixgbe_tx_queue_stats {
@@ -225,15 +234,20 @@ struct ixgbe_rx_queue_stats {
 #define IXGBE_TS_HDR_LEN 8
 
 enum ixgbe_ring_state_t {
+       __IXGBE_RX_3K_BUFFER,
+       __IXGBE_RX_BUILD_SKB_ENABLED,
+       __IXGBE_RX_RSC_ENABLED,
+       __IXGBE_RX_CSUM_UDP_ZERO_ERR,
+       __IXGBE_RX_FCOE,
        __IXGBE_TX_FDIR_INIT_DONE,
        __IXGBE_TX_XPS_INIT_DONE,
        __IXGBE_TX_DETECT_HANG,
        __IXGBE_HANG_CHECK_ARMED,
-       __IXGBE_RX_RSC_ENABLED,
-       __IXGBE_RX_CSUM_UDP_ZERO_ERR,
-       __IXGBE_RX_FCOE,
 };
 
+#define ring_uses_build_skb(ring) \
+       test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
+
 struct ixgbe_fwd_adapter {
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
        struct net_device *netdev;
@@ -343,19 +357,20 @@ struct ixgbe_ring_feature {
  */
 static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
 {
-#ifdef IXGBE_FCOE
-       if (test_bit(__IXGBE_RX_FCOE, &ring->state))
-               return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K :
-                                           IXGBE_RXBUFFER_3K;
+       if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+               return IXGBE_RXBUFFER_3K;
+#if (PAGE_SIZE < 8192)
+       if (ring_uses_build_skb(ring))
+               return IXGBE_MAX_FRAME_BUILD_SKB;
 #endif
        return IXGBE_RXBUFFER_2K;
 }
 
 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
 {
-#ifdef IXGBE_FCOE
-       if (test_bit(__IXGBE_RX_FCOE, &ring->state))
-               return (PAGE_SIZE < 8192) ? 1 : 0;
+#if (PAGE_SIZE < 8192)
+       if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+               return 1;
 #endif
        return 0;
 }
@@ -398,127 +413,10 @@ struct ixgbe_q_vector {
        struct rcu_head rcu;    /* to avoid race with update stats on free */
        char name[IFNAMSIZ + 9];
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       atomic_t state;
-#endif  /* CONFIG_NET_RX_BUSY_POLL */
-
        /* for dynamic allocation of rings associated with this q_vector */
        struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
 };
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-enum ixgbe_qv_state_t {
-       IXGBE_QV_STATE_IDLE = 0,
-       IXGBE_QV_STATE_NAPI,
-       IXGBE_QV_STATE_POLL,
-       IXGBE_QV_STATE_DISABLE
-};
-
-static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
-{
-       /* reset state to idle */
-       atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
-}
-
-/* called from the device poll routine to get ownership of a q_vector */
-static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
-{
-       int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
-                               IXGBE_QV_STATE_NAPI);
-#ifdef BP_EXTENDED_STATS
-       if (rc != IXGBE_QV_STATE_IDLE)
-               q_vector->tx.ring->stats.yields++;
-#endif
-
-       return rc == IXGBE_QV_STATE_IDLE;
-}
-
-/* returns true is someone tried to get the qv while napi had it */
-static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
-{
-       WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI);
-
-       /* flush any outstanding Rx frames */
-       if (q_vector->napi.gro_list)
-               napi_gro_flush(&q_vector->napi, false);
-
-       /* reset state to idle */
-       atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
-}
-
-/* called from ixgbe_low_latency_poll() */
-static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
-{
-       int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
-                               IXGBE_QV_STATE_POLL);
-#ifdef BP_EXTENDED_STATS
-       if (rc != IXGBE_QV_STATE_IDLE)
-               q_vector->rx.ring->stats.yields++;
-#endif
-       return rc == IXGBE_QV_STATE_IDLE;
-}
-
-/* returns true if someone tried to get the qv while it was locked */
-static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
-{
-       WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL);
-
-       /* reset state to idle */
-       atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE);
-}
-
-/* true if a socket is polling, even if it did not get the lock */
-static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
-{
-       return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL;
-}
-
-/* false if QV is currently owned */
-static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
-{
-       int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE,
-                               IXGBE_QV_STATE_DISABLE);
-
-       return rc == IXGBE_QV_STATE_IDLE;
-}
-
-#else /* CONFIG_NET_RX_BUSY_POLL */
-static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
-{
-}
-
-static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector)
-{
-       return true;
-}
-
-static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector)
-{
-       return false;
-}
-
-static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
-{
-       return false;
-}
-
-static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector)
-{
-       return false;
-}
-
-static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector)
-{
-       return false;
-}
-
-static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector)
-{
-       return true;
-}
-
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 #ifdef CONFIG_IXGBE_HWMON
 
 #define IXGBE_HWMON_TYPE_LOC           0
@@ -661,6 +559,9 @@ struct ixgbe_adapter {
 #define IXGBE_FLAG2_PHY_INTERRUPT              BIT(11)
 #define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED       BIT(12)
 #define IXGBE_FLAG2_VLAN_PROMISC               BIT(13)
+#define IXGBE_FLAG2_EEE_CAPABLE                        BIT(14)
+#define IXGBE_FLAG2_EEE_ENABLED                        BIT(15)
+#define IXGBE_FLAG2_RX_LEGACY                  BIT(16)
 
        /* Tx fast path data */
        int num_tx_queues;
@@ -862,6 +763,7 @@ enum ixgbe_boards {
        board_X550,
        board_X550EM_x,
        board_x550em_a,
+       board_x550em_a_fw,
 };
 
 extern const struct ixgbe_info ixgbe_82598_info;
@@ -870,8 +772,9 @@ extern const struct ixgbe_info ixgbe_X540_info;
 extern const struct ixgbe_info ixgbe_X550_info;
 extern const struct ixgbe_info ixgbe_X550EM_x_info;
 extern const struct ixgbe_info ixgbe_x550em_a_info;
+extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
 #ifdef CONFIG_IXGBE_DCB
-extern const struct dcbnl_rtnl_ops dcbnl_ops;
+extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
 #endif
 
 extern char ixgbe_driver_name[];
index 805ab319e578ef16fc90a046518b54086bd966ce..523f9d05a810f175582e5b1474f4d398bfa0aea2 100644 (file)
@@ -139,8 +139,6 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
        case ixgbe_phy_tn:
                phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
                phy->ops.check_link = &ixgbe_check_phy_link_tnx;
-               phy->ops.get_firmware_version =
-                            &ixgbe_get_phy_firmware_version_tnx;
                break;
        case ixgbe_phy_nl:
                phy->ops.reset = &ixgbe_reset_phy_nl;
index e00aaeb9182740f84c1a6a1ba48732f38f2df5fd..30535e6b68f08c9fc4df2988b0316de5c91e2fcf 100644 (file)
@@ -331,8 +331,6 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
        case ixgbe_phy_tn:
                phy->ops.check_link = &ixgbe_check_phy_link_tnx;
                phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
-               phy->ops.get_firmware_version =
-                            &ixgbe_get_phy_firmware_version_tnx;
                break;
        default:
                break;
index 8832df3eba255c9b99c2110f8d7bcf6f94111071..c38d50c1fcf783fdd83782c3380c14cdb0c4dd9d 100644 (file)
@@ -100,6 +100,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
                case IXGBE_DEV_ID_X550T1:
                case IXGBE_DEV_ID_X550EM_X_10G_T:
                case IXGBE_DEV_ID_X550EM_A_10G_T:
+               case IXGBE_DEV_ID_X550EM_A_1G_T:
+               case IXGBE_DEV_ID_X550EM_A_1G_T_L:
                        supported = true;
                        break;
                default:
@@ -348,7 +350,7 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
        }
        IXGBE_WRITE_FLUSH(hw);
 
-#ifndef CONFIG_SPARC
+#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
        /* Disable relaxed ordering */
        for (i = 0; i < hw->mac.max_tx_queues; i++) {
                u32 regval;
@@ -3382,6 +3384,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
                else
                        *speed = IXGBE_LINK_SPEED_100_FULL;
                break;
+       case IXGBE_LINKS_SPEED_10_X550EM_A:
+               *speed = IXGBE_LINK_SPEED_UNKNOWN;
+               if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
+                   hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
+                       *speed = IXGBE_LINK_SPEED_10_FULL;
+               }
+               break;
        default:
                *speed = IXGBE_LINK_SPEED_UNKNOWN;
        }
@@ -3578,7 +3587,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
  *  Calculates the checksum for some buffer on a specified length.  The
  *  checksum calculated is returned.
  **/
-static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
 {
        u32 i;
        u8 sum = 0;
@@ -3593,43 +3602,29 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
 }
 
 /**
- *  ixgbe_host_interface_command - Issue command to manageability block
+ *  ixgbe_hic_unlocked - Issue command to manageability block unlocked
  *  @hw: pointer to the HW structure
- *  @buffer: contains the command to write and where the return status will
- *           be placed
+ *  @buffer: command to write and where the return status will be placed
  *  @length: length of buffer, must be multiple of 4 bytes
  *  @timeout: time in ms to wait for command completion
- *  @return_data: read and return data from the buffer (true) or not (false)
- *  Needed because FW structures are big endian and decoding of
- *  these fields can be 8 bit or 16 bit based on command. Decoding
- *  is not easily understood without making a table of commands.
- *  So we will leave this up to the caller to read back the data
- *  in these cases.
  *
- *  Communicates with the manageability block.  On success return 0
- *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ *  Communicates with the manageability block. On success return 0
+ *  else returns semaphore error when encountering an error acquiring
+ *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ *
+ *  This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
+ *  by the caller.
  **/
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
-                                u32 length, u32 timeout,
-                                bool return_data)
+s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
+                      u32 timeout)
 {
-       u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
-       u32 hicr, i, bi, fwsts;
-       u16 buf_len, dword_len;
-       union {
-               struct ixgbe_hic_hdr hdr;
-               u32 u32arr[1];
-       } *bp = buffer;
-       s32 status;
+       u32 hicr, i, fwsts;
+       u16 dword_len;
 
        if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
                hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
                return IXGBE_ERR_HOST_INTERFACE_COMMAND;
        }
-       /* Take management host interface semaphore */
-       status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
-       if (status)
-               return status;
 
        /* Set bit 9 of FWSTS clearing FW reset indication */
        fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
@@ -3639,15 +3634,13 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
        hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
        if (!(hicr & IXGBE_HICR_EN)) {
                hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
-               status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
-               goto rel_out;
+               return IXGBE_ERR_HOST_INTERFACE_COMMAND;
        }
 
        /* Calculate length in DWORDs. We must be DWORD aligned */
        if (length % sizeof(u32)) {
                hw_dbg(hw, "Buffer length failure, not aligned to dword");
-               status = IXGBE_ERR_INVALID_ARGUMENT;
-               goto rel_out;
+               return IXGBE_ERR_INVALID_ARGUMENT;
        }
 
        dword_len = length >> 2;
@@ -3657,7 +3650,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
         */
        for (i = 0; i < dword_len; i++)
                IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
-                                     i, cpu_to_le32(bp->u32arr[i]));
+                                     i, cpu_to_le32(buffer[i]));
 
        /* Setting this bit tells the ARC that a new command is pending. */
        IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
@@ -3671,11 +3664,54 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
 
        /* Check command successful completion. */
        if ((timeout && i == timeout) ||
-           !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
-               hw_dbg(hw, "Command has failed with no status valid.\n");
-               status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
-               goto rel_out;
+           !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))
+               return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+       return 0;
+}
+
+/**
+ *  ixgbe_host_interface_command - Issue command to manageability block
+ *  @hw: pointer to the HW structure
+ *  @buffer: contains the command to write and where the return status will
+ *           be placed
+ *  @length: length of buffer, must be multiple of 4 bytes
+ *  @timeout: time in ms to wait for command completion
+ *  @return_data: read and return data from the buffer (true) or not (false)
+ *  Needed because FW structures are big endian and decoding of
+ *  these fields can be 8 bit or 16 bit based on command. Decoding
+ *  is not easily understood without making a table of commands.
+ *  So we will leave this up to the caller to read back the data
+ *  in these cases.
+ *
+ *  Communicates with the manageability block.  On success return 0
+ *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
+                                u32 length, u32 timeout,
+                                bool return_data)
+{
+       u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+       union {
+               struct ixgbe_hic_hdr hdr;
+               u32 u32arr[1];
+       } *bp = buffer;
+       u16 buf_len, dword_len;
+       s32 status;
+       u32 bi;
+
+       if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+               hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
+               return IXGBE_ERR_HOST_INTERFACE_COMMAND;
        }
+       /* Take management host interface semaphore */
+       status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+       if (status)
+               return status;
+
+       status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
+       if (status)
+               goto rel_out;
 
        if (!return_data)
                goto rel_out;
@@ -3722,6 +3758,8 @@ rel_out:
  *  @min: driver version minor number
  *  @build: driver version build number
  *  @sub: driver version sub build number
+ *  @len: length of driver_ver string
+ *  @driver_ver: driver string
  *
  *  Sends driver version number to firmware through the manageability
  *  block.  On success return 0
@@ -3729,7 +3767,8 @@ rel_out:
  *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
  **/
 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
-                                u8 build, u8 sub)
+                                u8 build, u8 sub, __always_unused u16 len,
+                                __always_unused const char *driver_ver)
 {
        struct ixgbe_hic_drv_info fw_cmd;
        int i;
index 5b3e3c65927e58b9e4ca602731796d96af0b1769..e083732adf649106ee8ae56d665e77649dba6456 100644 (file)
@@ -111,9 +111,13 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
-                                u8 build, u8 ver);
+                                u8 build, u8 ver, u16 len, const char *str);
+u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
                                 u32 timeout, bool return_data);
+s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 len, u32 timeout);
+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+                         u32 (*data)[FW_PHY_ACT_DATA_COUNT]);
 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
 bool ixgbe_mng_present(struct ixgbe_hw *hw);
 bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
index b8fc3cfec8310b6ac1b83c97810e22b2e64316b8..78c52375acc6a5adf0c5e7570435d3af32418081 100644 (file)
@@ -777,7 +777,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
        return err ? 1 : 0;
 }
 
-const struct dcbnl_rtnl_ops dcbnl_ops = {
+const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops = {
        .ieee_getets    = ixgbe_dcbnl_ieee_getets,
        .ieee_setets    = ixgbe_dcbnl_ieee_setets,
        .ieee_getpfc    = ixgbe_dcbnl_ieee_getpfc,
index fd192bf29b26bae2a30a564aef486052d7c39a95..a7574c7b12af06dbd9e57a8503ea73ec01437c5a 100644 (file)
@@ -151,6 +151,13 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
 };
 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
 
+static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define IXGBE_PRIV_FLAGS_LEGACY_RX     BIT(0)
+       "legacy-rx",
+};
+
+#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
+
 /* currently supported speeds for 10G */
 #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
                         SUPPORTED_10000baseKX4_Full | \
@@ -197,15 +204,17 @@ static int ixgbe_get_settings(struct net_device *netdev,
                                   SUPPORTED_1000baseKX_Full :
                                   SUPPORTED_1000baseT_Full;
        if (supported_link & IXGBE_LINK_SPEED_100_FULL)
-               ecmd->supported |= ixgbe_isbackplane(hw->phy.media_type) ?
-                                  SUPPORTED_1000baseKX_Full :
-                                  SUPPORTED_1000baseT_Full;
+               ecmd->supported |= SUPPORTED_100baseT_Full;
+       if (supported_link & IXGBE_LINK_SPEED_10_FULL)
+               ecmd->supported |= SUPPORTED_10baseT_Full;
 
        /* default advertised speed if phy.autoneg_advertised isn't set */
        ecmd->advertising = ecmd->supported;
        /* set the advertised speeds */
        if (hw->phy.autoneg_advertised) {
                ecmd->advertising = 0;
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
+                       ecmd->advertising |= ADVERTISED_10baseT_Full;
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
                        ecmd->advertising |= ADVERTISED_100baseT_Full;
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
@@ -237,6 +246,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
        case ixgbe_phy_tn:
        case ixgbe_phy_aq:
        case ixgbe_phy_x550em_ext_t:
+       case ixgbe_phy_fw:
        case ixgbe_phy_cu_unknown:
                ecmd->supported |= SUPPORTED_TP;
                ecmd->advertising |= ADVERTISED_TP;
@@ -337,6 +347,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
                case IXGBE_LINK_SPEED_10GB_FULL:
                        ethtool_cmd_speed_set(ecmd, SPEED_10000);
                        break;
+               case IXGBE_LINK_SPEED_5GB_FULL:
+                       ethtool_cmd_speed_set(ecmd, SPEED_5000);
+                       break;
                case IXGBE_LINK_SPEED_2_5GB_FULL:
                        ethtool_cmd_speed_set(ecmd, SPEED_2500);
                        break;
@@ -346,6 +359,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
                case IXGBE_LINK_SPEED_100_FULL:
                        ethtool_cmd_speed_set(ecmd, SPEED_100);
                        break;
+               case IXGBE_LINK_SPEED_10_FULL:
+                       ethtool_cmd_speed_set(ecmd, SPEED_10);
+                       break;
                default:
                        break;
                }
@@ -394,6 +410,9 @@ static int ixgbe_set_settings(struct net_device *netdev,
                if (ecmd->advertising & ADVERTISED_100baseT_Full)
                        advertised |= IXGBE_LINK_SPEED_100_FULL;
 
+               if (ecmd->advertising & ADVERTISED_10baseT_Full)
+                       advertised |= IXGBE_LINK_SPEED_10_FULL;
+
                if (old == advertised)
                        return err;
                /* this sets the link speed and restarts auto-neg */
@@ -989,6 +1008,8 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
 
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
+
+       drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
 }
 
 static void ixgbe_get_ringparam(struct net_device *netdev,
@@ -1128,6 +1149,8 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
                return IXGBE_TEST_LEN;
        case ETH_SS_STATS:
                return IXGBE_STATS_LEN;
+       case ETH_SS_PRIV_FLAGS:
+               return IXGBE_PRIV_FLAGS_STR_LEN;
        default:
                return -EOPNOTSUPP;
        }
@@ -1170,12 +1193,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i] = 0;
                        data[i+1] = 0;
                        i += 2;
-#ifdef BP_EXTENDED_STATS
-                       data[i] = 0;
-                       data[i+1] = 0;
-                       data[i+2] = 0;
-                       i += 3;
-#endif
                        continue;
                }
 
@@ -1185,12 +1202,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i+1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
                i += 2;
-#ifdef BP_EXTENDED_STATS
-               data[i] = ring->stats.yields;
-               data[i+1] = ring->stats.misses;
-               data[i+2] = ring->stats.cleaned;
-               i += 3;
-#endif
        }
        for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
                ring = adapter->rx_ring[j];
@@ -1198,12 +1209,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i] = 0;
                        data[i+1] = 0;
                        i += 2;
-#ifdef BP_EXTENDED_STATS
-                       data[i] = 0;
-                       data[i+1] = 0;
-                       data[i+2] = 0;
-                       i += 3;
-#endif
                        continue;
                }
 
@@ -1213,12 +1218,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
                        data[i+1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
                i += 2;
-#ifdef BP_EXTENDED_STATS
-               data[i] = ring->stats.yields;
-               data[i+1] = ring->stats.misses;
-               data[i+2] = ring->stats.cleaned;
-               i += 3;
-#endif
        }
 
        for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
@@ -1255,28 +1254,12 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "tx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
-#ifdef BP_EXTENDED_STATS
-                       sprintf(p, "tx_queue_%u_bp_napi_yield", i);
-                       p += ETH_GSTRING_LEN;
-                       sprintf(p, "tx_queue_%u_bp_misses", i);
-                       p += ETH_GSTRING_LEN;
-                       sprintf(p, "tx_queue_%u_bp_cleaned", i);
-                       p += ETH_GSTRING_LEN;
-#endif /* BP_EXTENDED_STATS */
                }
                for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
                        sprintf(p, "rx_queue_%u_packets", i);
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "rx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
-#ifdef BP_EXTENDED_STATS
-                       sprintf(p, "rx_queue_%u_bp_poll_yield", i);
-                       p += ETH_GSTRING_LEN;
-                       sprintf(p, "rx_queue_%u_bp_misses", i);
-                       p += ETH_GSTRING_LEN;
-                       sprintf(p, "rx_queue_%u_bp_cleaned", i);
-                       p += ETH_GSTRING_LEN;
-#endif /* BP_EXTENDED_STATS */
                }
                for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
                        sprintf(p, "tx_pb_%u_pxon", i);
@@ -1292,6 +1275,9 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
                }
                /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
                break;
+       case ETH_SS_PRIV_FLAGS:
+               memcpy(data, ixgbe_priv_flags_strings,
+                      IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
        }
 }
 
@@ -1896,7 +1882,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
        tx_ntc = tx_ring->next_to_clean;
        rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
 
-       while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
+       while (rx_desc->wb.upper.length) {
                /* check Rx buffer */
                rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
 
@@ -1918,7 +1904,16 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
 
                /* unmap buffer on Tx side */
                tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
-               ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
+
+               /* Free all the Tx ring sk_buffs */
+               dev_kfree_skb_any(tx_buffer->skb);
+
+               /* unmap skb header data */
+               dma_unmap_single(tx_ring->dev,
+                                dma_unmap_addr(tx_buffer, dma),
+                                dma_unmap_len(tx_buffer, len),
+                                DMA_TO_DEVICE);
+               dma_unmap_len_set(tx_buffer, len, 0);
 
                /* increment Rx/Tx next to clean counters */
                rx_ntc++;
@@ -3173,6 +3168,9 @@ static int ixgbe_get_module_info(struct net_device *dev,
        u8 sff8472_rev, addr_mode;
        bool page_swap = false;
 
+       if (hw->phy.type == ixgbe_phy_fw)
+               return -ENXIO;
+
        /* Check whether we support SFF-8472 or not */
        status = hw->phy.ops.read_i2c_eeprom(hw,
                                             IXGBE_SFF_SFF_8472_COMP,
@@ -3218,6 +3216,9 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
        if (ee->len == 0)
                return -EINVAL;
 
+       if (hw->phy.type == ixgbe_phy_fw)
+               return -ENXIO;
+
        for (i = ee->offset; i < ee->offset + ee->len; i++) {
                /* I2C reads can take long time */
                if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
@@ -3237,6 +3238,167 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
        return 0;
 }
 
+static const struct {
+       ixgbe_link_speed mac_speed;
+       u32 supported;
+} ixgbe_ls_map[] = {
+       { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full },
+       { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full },
+       { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full },
+       { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full },
+       { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full },
+};
+
+static const struct {
+       u32 lp_advertised;
+       u32 mac_speed;
+} ixgbe_lp_map[] = {
+       { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full },
+       { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full },
+       { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full },
+       { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full },
+       { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full },
+       { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full},
+};
+
+static int
+ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata)
+{
+       u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+       struct ixgbe_hw *hw = &adapter->hw;
+       s32 rc;
+       u16 i;
+
+       rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info);
+       if (rc)
+               return rc;
+
+       edata->lp_advertised = 0;
+       for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) {
+               if (info[0] & ixgbe_lp_map[i].lp_advertised)
+                       edata->lp_advertised |= ixgbe_lp_map[i].mac_speed;
+       }
+
+       edata->supported = 0;
+       for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
+               if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed)
+                       edata->supported |= ixgbe_ls_map[i].supported;
+       }
+
+       edata->advertised = 0;
+       for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) {
+               if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed)
+                       edata->advertised |= ixgbe_ls_map[i].supported;
+       }
+
+       edata->eee_enabled = !!edata->advertised;
+       edata->tx_lpi_enabled = edata->eee_enabled;
+       if (edata->advertised & edata->lp_advertised)
+               edata->eee_active = true;
+
+       return 0;
+}
+
+static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
+               return -EOPNOTSUPP;
+
+       if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw)
+               return ixgbe_get_eee_fw(adapter, edata);
+
+       return -EOPNOTSUPP;
+}
+
+static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ethtool_eee eee_data;
+       s32 ret_val;
+
+       if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))
+               return -EOPNOTSUPP;
+
+       memset(&eee_data, 0, sizeof(struct ethtool_eee));
+
+       ret_val = ixgbe_get_eee(netdev, &eee_data);
+       if (ret_val)
+               return ret_val;
+
+       if (eee_data.eee_enabled && !edata->eee_enabled) {
+               if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) {
+                       e_err(drv, "Setting EEE tx-lpi is not supported\n");
+                       return -EINVAL;
+               }
+
+               if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) {
+                       e_err(drv,
+                             "Setting EEE Tx LPI timer is not supported\n");
+                       return -EINVAL;
+               }
+
+               if (eee_data.advertised != edata->advertised) {
+                       e_err(drv,
+                             "Setting EEE advertised speeds is not supported\n");
+                       return -EINVAL;
+               }
+       }
+
+       if (eee_data.eee_enabled != edata->eee_enabled) {
+               if (edata->eee_enabled) {
+                       adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
+                       hw->phy.eee_speeds_advertised =
+                                                  hw->phy.eee_speeds_supported;
+               } else {
+                       adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
+                       hw->phy.eee_speeds_advertised = 0;
+               }
+
+               /* reset link */
+               if (netif_running(netdev))
+                       ixgbe_reinit_locked(adapter);
+               else
+                       ixgbe_reset(adapter);
+       }
+
+       return 0;
+}
+
+static u32 ixgbe_get_priv_flags(struct net_device *netdev)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       u32 priv_flags = 0;
+
+       if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
+               priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
+
+       return priv_flags;
+}
+
+static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       unsigned int flags2 = adapter->flags2;
+
+       flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
+       if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
+               flags2 |= IXGBE_FLAG2_RX_LEGACY;
+
+       if (flags2 != adapter->flags2) {
+               adapter->flags2 = flags2;
+
+               /* reset interface to repopulate queues */
+               if (netif_running(netdev))
+                       ixgbe_reinit_locked(adapter);
+       }
+
+       return 0;
+}
+
 static const struct ethtool_ops ixgbe_ethtool_ops = {
        .get_settings           = ixgbe_get_settings,
        .set_settings           = ixgbe_set_settings,
@@ -3269,8 +3431,12 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
        .get_rxfh_key_size      = ixgbe_get_rxfh_key_size,
        .get_rxfh               = ixgbe_get_rxfh,
        .set_rxfh               = ixgbe_set_rxfh,
+       .get_eee                = ixgbe_get_eee,
+       .set_eee                = ixgbe_set_eee,
        .get_channels           = ixgbe_get_channels,
        .set_channels           = ixgbe_set_channels,
+       .get_priv_flags         = ixgbe_get_priv_flags,
+       .set_priv_flags         = ixgbe_set_priv_flags,
        .get_ts_info            = ixgbe_get_ts_info,
        .get_module_info        = ixgbe_get_module_info,
        .get_module_eeprom      = ixgbe_get_module_eeprom,
index 15ab337fd7ad798846f117a45b321471f9b4b893..1b8be7d813bd992bc8e869f6c458d5197ac44fc4 100644 (file)
@@ -308,6 +308,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
        ixgbe_cache_ring_rss(adapter);
 }
 
+#define IXGBE_RSS_64Q_MASK     0x3F
 #define IXGBE_RSS_16Q_MASK     0xF
 #define IXGBE_RSS_8Q_MASK      0x7
 #define IXGBE_RSS_4Q_MASK      0x3
@@ -604,6 +605,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
  **/
 static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
 {
+       struct ixgbe_hw *hw = &adapter->hw;
        struct ixgbe_ring_feature *f;
        u16 rss_i;
 
@@ -612,7 +614,11 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
        rss_i = f->limit;
 
        f->indices = rss_i;
-       f->mask = IXGBE_RSS_16Q_MASK;
+
+       if (hw->mac.type < ixgbe_mac_X550)
+               f->mask = IXGBE_RSS_16Q_MASK;
+       else
+               f->mask = IXGBE_RSS_64Q_MASK;
 
        /* disable ATR by default, it will be configured below */
        adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -847,11 +853,6 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
        netif_napi_add(adapter->netdev, &q_vector->napi,
                       ixgbe_poll, 64);
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       /* initialize busy poll */
-       atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE);
-
-#endif
        /* tie q_vector and adapter together */
        adapter->q_vector[v_idx] = q_vector;
        q_vector->adapter = adapter;
index 1e2f39ebd82495495c872fc310c0aec637f3c5fd..060cdce8058f9bd49b1bda7bdef3b1d0fdc4a943 100644 (file)
@@ -72,7 +72,7 @@ char ixgbe_default_device_descr[] =
 static char ixgbe_default_device_descr[] =
                              "Intel(R) 10 Gigabit Network Connection";
 #endif
-#define DRV_VERSION "4.4.0-k"
+#define DRV_VERSION "5.0.0-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
                                "Copyright (c) 1999-2016 Intel Corporation.";
@@ -86,6 +86,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
        [board_X550]            = &ixgbe_X550_info,
        [board_X550EM_x]        = &ixgbe_X550EM_x_info,
        [board_x550em_a]        = &ixgbe_x550em_a_info,
+       [board_x550em_a_fw]     = &ixgbe_x550em_a_fw_info,
 };
 
 /* ixgbe_pci_tbl - PCI Device ID Table
@@ -140,6 +141,8 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
        /* required last entry */
        {0, }
 };
@@ -180,6 +183,7 @@ MODULE_VERSION(DRV_VERSION);
 static struct workqueue_struct *ixgbe_wq;
 
 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
+static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
 
 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
                                          u32 reg, u16 *value)
@@ -607,12 +611,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
        if (netdev) {
                dev_info(&adapter->pdev->dev, "Net device Info\n");
                pr_info("Device Name     state            "
-                       "trans_start      last_rx\n");
-               pr_info("%-15s %016lX %016lX %016lX\n",
+                       "trans_start\n");
+               pr_info("%-15s %016lX %016lX\n",
                        netdev->name,
                        netdev->state,
-                       dev_trans_start(netdev),
-                       netdev->last_rx);
+                       dev_trans_start(netdev));
        }
 
        /* Print Registers */
@@ -942,28 +945,6 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
        }
 }
 
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring,
-                                     struct ixgbe_tx_buffer *tx_buffer)
-{
-       if (tx_buffer->skb) {
-               dev_kfree_skb_any(tx_buffer->skb);
-               if (dma_unmap_len(tx_buffer, len))
-                       dma_unmap_single(ring->dev,
-                                        dma_unmap_addr(tx_buffer, dma),
-                                        dma_unmap_len(tx_buffer, len),
-                                        DMA_TO_DEVICE);
-       } else if (dma_unmap_len(tx_buffer, len)) {
-               dma_unmap_page(ring->dev,
-                              dma_unmap_addr(tx_buffer, dma),
-                              dma_unmap_len(tx_buffer, len),
-                              DMA_TO_DEVICE);
-       }
-       tx_buffer->next_to_watch = NULL;
-       tx_buffer->skb = NULL;
-       dma_unmap_len_set(tx_buffer, len, 0);
-       /* tx_buffer must be completely set up in the transmit path */
-}
-
 static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
@@ -1195,7 +1176,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                                 DMA_TO_DEVICE);
 
                /* clear tx_buffer data */
-               tx_buffer->skb = NULL;
                dma_unmap_len_set(tx_buffer, len, 0);
 
                /* unmap remaining buffers */
@@ -1549,6 +1529,11 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
        }
 }
 
+static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
+{
+       return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
+}
+
 static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
                                    struct ixgbe_rx_buffer *bi)
 {
@@ -1567,8 +1552,10 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
        }
 
        /* map page for use */
-       dma = dma_map_page(rx_ring->dev, page, 0,
-                          ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+       dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+                                ixgbe_rx_pg_size(rx_ring),
+                                DMA_FROM_DEVICE,
+                                IXGBE_RX_DMA_ATTR);
 
        /*
         * if mapping failed free memory back to system since
@@ -1583,7 +1570,8 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
 
        bi->dma = dma;
        bi->page = page;
-       bi->page_offset = 0;
+       bi->page_offset = ixgbe_rx_offset(rx_ring);
+       bi->pagecnt_bias = 1;
 
        return true;
 }
@@ -1598,6 +1586,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
        union ixgbe_adv_rx_desc *rx_desc;
        struct ixgbe_rx_buffer *bi;
        u16 i = rx_ring->next_to_use;
+       u16 bufsz;
 
        /* nothing to do */
        if (!cleaned_count)
@@ -1607,10 +1596,17 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
        bi = &rx_ring->rx_buffer_info[i];
        i -= rx_ring->count;
 
+       bufsz = ixgbe_rx_bufsz(rx_ring);
+
        do {
                if (!ixgbe_alloc_mapped_page(rx_ring, bi))
                        break;
 
+               /* sync the buffer for use by the device */
+               dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+                                                bi->page_offset, bufsz,
+                                                DMA_FROM_DEVICE);
+
                /*
                 * Refresh the desc even if buffer_addrs didn't change
                 * because each write-back erases this info.
@@ -1626,8 +1622,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
                        i -= rx_ring->count;
                }
 
-               /* clear the status bits for the next_to_use descriptor */
-               rx_desc->wb.upper.status_error = 0;
+               /* clear the length for the next_to_use descriptor */
+               rx_desc->wb.upper.length = 0;
 
                cleaned_count--;
        } while (cleaned_count);
@@ -1717,11 +1713,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
                         struct sk_buff *skb)
 {
-       skb_mark_napi_id(skb, &q_vector->napi);
-       if (ixgbe_qv_busy_polling(q_vector))
-               netif_receive_skb(skb);
-       else
-               napi_gro_receive(&q_vector->napi, skb);
+       napi_gro_receive(&q_vector->napi, skb);
 }
 
 /**
@@ -1833,19 +1825,19 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
 {
        /* if the page was released unmap it, else just sync our portion */
        if (unlikely(IXGBE_CB(skb)->page_released)) {
-               dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
-                              ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
-               IXGBE_CB(skb)->page_released = false;
+               dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
+                                    ixgbe_rx_pg_size(rx_ring),
+                                    DMA_FROM_DEVICE,
+                                    IXGBE_RX_DMA_ATTR);
        } else {
                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
 
                dma_sync_single_range_for_cpu(rx_ring->dev,
                                              IXGBE_CB(skb)->dma,
                                              frag->page_offset,
-                                             ixgbe_rx_bufsz(rx_ring),
+                                             skb_frag_size(frag),
                                              DMA_FROM_DEVICE);
        }
-       IXGBE_CB(skb)->dma = 0;
 }
 
 /**
@@ -1881,7 +1873,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
        }
 
        /* place header in linear portion of buffer */
-       if (skb_is_nonlinear(skb))
+       if (!skb_headlen(skb))
                ixgbe_pull_tail(rx_ring, skb);
 
 #ifdef IXGBE_FCOE
@@ -1916,14 +1908,14 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
        nta++;
        rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 
-       /* transfer page from old buffer to new buffer */
-       *new_buff = *old_buff;
-
-       /* sync the buffer for use by the device */
-       dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
-                                        new_buff->page_offset,
-                                        ixgbe_rx_bufsz(rx_ring),
-                                        DMA_FROM_DEVICE);
+       /* Transfer page from old buffer to new buffer.
+        * Move each member individually to avoid possible store
+        * forwarding stalls and unnecessary copy of skb.
+        */
+       new_buff->dma           = old_buff->dma;
+       new_buff->page          = old_buff->page;
+       new_buff->page_offset   = old_buff->page_offset;
+       new_buff->pagecnt_bias  = old_buff->pagecnt_bias;
 }
 
 static inline bool ixgbe_page_is_reserved(struct page *page)
@@ -1931,6 +1923,43 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
        return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 }
 
+static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
+{
+       unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+       struct page *page = rx_buffer->page;
+
+       /* avoid re-using remote pages */
+       if (unlikely(ixgbe_page_is_reserved(page)))
+               return false;
+
+#if (PAGE_SIZE < 8192)
+       /* if we are only owner of page we can reuse it */
+       if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+               return false;
+#else
+       /* The last offset is a bit aggressive in that we assume the
+        * worst case of FCoE being enabled and using a 3K buffer.
+        * However this should have minimal impact as the 1K extra is
+        * still less than one buffer in size.
+        */
+#define IXGBE_LAST_OFFSET \
+       (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K)
+       if (rx_buffer->page_offset > IXGBE_LAST_OFFSET)
+               return false;
+#endif
+
+       /* If we have drained the page fragment pool we need to update
+        * the pagecnt_bias and page count so that we fully restock the
+        * number of references the driver holds.
+        */
+       if (unlikely(!pagecnt_bias)) {
+               page_ref_add(page, USHRT_MAX);
+               rx_buffer->pagecnt_bias = USHRT_MAX;
+       }
+
+       return true;
+}
+
 /**
  * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
  * @rx_ring: rx descriptor ring to transact packets on
@@ -1946,144 +1975,172 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
  * The function will then update the page offset if necessary and return
  * true if the buffer can be reused by the adapter.
  **/
-static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
+static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
                              struct ixgbe_rx_buffer *rx_buffer,
-                             union ixgbe_adv_rx_desc *rx_desc,
-                             struct sk_buff *skb)
+                             struct sk_buff *skb,
+                             unsigned int size)
 {
-       struct page *page = rx_buffer->page;
-       unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
 #if (PAGE_SIZE < 8192)
-       unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
+       unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
 #else
-       unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
-       unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
-                                  ixgbe_rx_bufsz(rx_ring);
+       unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+                               SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
+                               SKB_DATA_ALIGN(size);
 #endif
-
-       if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
-               unsigned char *va = page_address(page) + rx_buffer->page_offset;
-
-               memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
-               /* page is not reserved, we can reuse buffer as-is */
-               if (likely(!ixgbe_page_is_reserved(page)))
-                       return true;
-
-               /* this page cannot be reused so discard it */
-               __free_pages(page, ixgbe_rx_pg_order(rx_ring));
-               return false;
-       }
-
-       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
                        rx_buffer->page_offset, size, truesize);
-
-       /* avoid re-using remote pages */
-       if (unlikely(ixgbe_page_is_reserved(page)))
-               return false;
-
 #if (PAGE_SIZE < 8192)
-       /* if we are only owner of page we can reuse it */
-       if (unlikely(page_count(page) != 1))
-               return false;
-
-       /* flip page offset to other buffer */
        rx_buffer->page_offset ^= truesize;
 #else
-       /* move offset up to the next cache line */
        rx_buffer->page_offset += truesize;
-
-       if (rx_buffer->page_offset > last_offset)
-               return false;
 #endif
-
-       /* Even if we own the page, we are not allowed to use atomic_set()
-        * This would break get_page_unless_zero() users.
-        */
-       page_ref_inc(page);
-
-       return true;
 }
 
-static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
-                                            union ixgbe_adv_rx_desc *rx_desc)
+static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
+                                                  union ixgbe_adv_rx_desc *rx_desc,
+                                                  struct sk_buff **skb,
+                                                  const unsigned int size)
 {
        struct ixgbe_rx_buffer *rx_buffer;
-       struct sk_buff *skb;
-       struct page *page;
 
        rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
-       page = rx_buffer->page;
-       prefetchw(page);
+       prefetchw(rx_buffer->page);
+       *skb = rx_buffer->skb;
 
-       skb = rx_buffer->skb;
+       /* Delay unmapping of the first packet. It carries the header
+        * information, HW may still access the header after the writeback.
+        * Only unmap it when EOP is reached
+        */
+       if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) {
+               if (!*skb)
+                       goto skip_sync;
+       } else {
+               if (*skb)
+                       ixgbe_dma_sync_frag(rx_ring, *skb);
+       }
 
-       if (likely(!skb)) {
-               void *page_addr = page_address(page) +
-                                 rx_buffer->page_offset;
+       /* we are reusing so sync this buffer for CPU use */
+       dma_sync_single_range_for_cpu(rx_ring->dev,
+                                     rx_buffer->dma,
+                                     rx_buffer->page_offset,
+                                     size,
+                                     DMA_FROM_DEVICE);
+skip_sync:
+       rx_buffer->pagecnt_bias--;
 
-               /* prefetch first cache line of first page */
-               prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
-               prefetch(page_addr + L1_CACHE_BYTES);
-#endif
+       return rx_buffer;
+}
 
-               /* allocate a skb to store the frags */
-               skb = napi_alloc_skb(&rx_ring->q_vector->napi,
-                                    IXGBE_RX_HDR_SIZE);
-               if (unlikely(!skb)) {
-                       rx_ring->rx_stats.alloc_rx_buff_failed++;
-                       return NULL;
+static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
+                               struct ixgbe_rx_buffer *rx_buffer,
+                               struct sk_buff *skb)
+{
+       if (ixgbe_can_reuse_rx_page(rx_buffer)) {
+               /* hand second half of page back to the ring */
+               ixgbe_reuse_rx_page(rx_ring, rx_buffer);
+       } else {
+               if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+                       /* the page has been released from the ring */
+                       IXGBE_CB(skb)->page_released = true;
+               } else {
+                       /* we are not reusing the buffer so unmap it */
+                       dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+                                            ixgbe_rx_pg_size(rx_ring),
+                                            DMA_FROM_DEVICE,
+                                            IXGBE_RX_DMA_ATTR);
                }
+               __page_frag_cache_drain(rx_buffer->page,
+                                       rx_buffer->pagecnt_bias);
+       }
 
-               /*
-                * we will be copying header into skb->data in
-                * pskb_may_pull so it is in our interest to prefetch
-                * it now to avoid a possible cache miss
-                */
-               prefetchw(skb->data);
+       /* clear contents of rx_buffer */
+       rx_buffer->page = NULL;
+       rx_buffer->skb = NULL;
+}
 
-               /*
-                * Delay unmapping of the first packet. It carries the
-                * header information, HW may still access the header
-                * after the writeback.  Only unmap it when EOP is
-                * reached
-                */
-               if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)))
-                       goto dma_sync;
+static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
+                                          struct ixgbe_rx_buffer *rx_buffer,
+                                          union ixgbe_adv_rx_desc *rx_desc,
+                                          unsigned int size)
+{
+       void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
+#else
+       unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+       struct sk_buff *skb;
 
-               IXGBE_CB(skb)->dma = rx_buffer->dma;
-       } else {
-               if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
-                       ixgbe_dma_sync_frag(rx_ring, skb);
+       /* prefetch first cache line of first page */
+       prefetch(va);
+#if L1_CACHE_BYTES < 128
+       prefetch(va + L1_CACHE_BYTES);
+#endif
 
-dma_sync:
-               /* we are reusing so sync this buffer for CPU use */
-               dma_sync_single_range_for_cpu(rx_ring->dev,
-                                             rx_buffer->dma,
-                                             rx_buffer->page_offset,
-                                             ixgbe_rx_bufsz(rx_ring),
-                                             DMA_FROM_DEVICE);
+       /* allocate a skb to store the frags */
+       skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE);
+       if (unlikely(!skb))
+               return NULL;
 
-               rx_buffer->skb = NULL;
-       }
+       if (size > IXGBE_RX_HDR_SIZE) {
+               if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
+                       IXGBE_CB(skb)->dma = rx_buffer->dma;
 
-       /* pull page into skb */
-       if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
-               /* hand second half of page back to the ring */
-               ixgbe_reuse_rx_page(rx_ring, rx_buffer);
-       } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
-               /* the page has been released from the ring */
-               IXGBE_CB(skb)->page_released = true;
+               skb_add_rx_frag(skb, 0, rx_buffer->page,
+                               rx_buffer->page_offset,
+                               size, truesize);
+#if (PAGE_SIZE < 8192)
+               rx_buffer->page_offset ^= truesize;
+#else
+               rx_buffer->page_offset += truesize;
+#endif
        } else {
-               /* we are not reusing the buffer so unmap it */
-               dma_unmap_page(rx_ring->dev, rx_buffer->dma,
-                              ixgbe_rx_pg_size(rx_ring),
-                              DMA_FROM_DEVICE);
+               memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+               rx_buffer->pagecnt_bias++;
        }
 
-       /* clear contents of buffer_info */
-       rx_buffer->page = NULL;
+       return skb;
+}
+
+static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
+                                      struct ixgbe_rx_buffer *rx_buffer,
+                                      union ixgbe_adv_rx_desc *rx_desc,
+                                      unsigned int size)
+{
+       void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+       unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
+#else
+       unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+                               SKB_DATA_ALIGN(IXGBE_SKB_PAD + size);
+#endif
+       struct sk_buff *skb;
+
+       /* prefetch first cache line of first page */
+       prefetch(va);
+#if L1_CACHE_BYTES < 128
+       prefetch(va + L1_CACHE_BYTES);
+#endif
+
+       /* build an skb to around the page buffer */
+       skb = build_skb(va - IXGBE_SKB_PAD, truesize);
+       if (unlikely(!skb))
+               return NULL;
+
+       /* update pointers within the skb to store the data */
+       skb_reserve(skb, IXGBE_SKB_PAD);
+       __skb_put(skb, size);
+
+       /* record DMA address if this is the start of a chain of buffers */
+       if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))
+               IXGBE_CB(skb)->dma = rx_buffer->dma;
+
+       /* update buffer offset */
+#if (PAGE_SIZE < 8192)
+       rx_buffer->page_offset ^= truesize;
+#else
+       rx_buffer->page_offset += truesize;
+#endif
 
        return skb;
 }
@@ -2115,7 +2172,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 
        while (likely(total_rx_packets < budget)) {
                union ixgbe_adv_rx_desc *rx_desc;
+               struct ixgbe_rx_buffer *rx_buffer;
                struct sk_buff *skb;
+               unsigned int size;
 
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
@@ -2124,8 +2183,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                }
 
                rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
-
-               if (!rx_desc->wb.upper.status_error)
+               size = le16_to_cpu(rx_desc->wb.upper.length);
+               if (!size)
                        break;
 
                /* This memory barrier is needed to keep us from reading
@@ -2134,13 +2193,26 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                 */
                dma_rmb();
 
+               rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
+
                /* retrieve a buffer from the ring */
-               skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc);
+               if (skb)
+                       ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size);
+               else if (ring_uses_build_skb(rx_ring))
+                       skb = ixgbe_build_skb(rx_ring, rx_buffer,
+                                             rx_desc, size);
+               else
+                       skb = ixgbe_construct_skb(rx_ring, rx_buffer,
+                                                 rx_desc, size);
 
                /* exit if we failed to retrieve a buffer */
-               if (!skb)
+               if (!skb) {
+                       rx_ring->rx_stats.alloc_rx_buff_failed++;
+                       rx_buffer->pagecnt_bias++;
                        break;
+               }
 
+               ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
                cleaned_count++;
 
                /* place incomplete frames back on ring for completion */
@@ -2198,40 +2270,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        return total_rx_packets;
 }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-/* must be called with local_bh_disable()d */
-static int ixgbe_low_latency_recv(struct napi_struct *napi)
-{
-       struct ixgbe_q_vector *q_vector =
-                       container_of(napi, struct ixgbe_q_vector, napi);
-       struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct ixgbe_ring  *ring;
-       int found = 0;
-
-       if (test_bit(__IXGBE_DOWN, &adapter->state))
-               return LL_FLUSH_FAILED;
-
-       if (!ixgbe_qv_lock_poll(q_vector))
-               return LL_FLUSH_BUSY;
-
-       ixgbe_for_each_ring(ring, q_vector->rx) {
-               found = ixgbe_clean_rx_irq(q_vector, ring, 4);
-#ifdef BP_EXTENDED_STATS
-               if (found)
-                       ring->stats.cleaned += found;
-               else
-                       ring->stats.misses++;
-#endif
-               if (found)
-                       break;
-       }
-
-       ixgbe_qv_unlock_poll(q_vector);
-
-       return found;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 /**
  * ixgbe_configure_msix - Configure MSI-X hardware
  * @adapter: board private structure
@@ -2447,6 +2485,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        u32 eicr = adapter->interrupt_event;
+       s32 rc;
 
        if (test_bit(__IXGBE_DOWN, &adapter->state))
                return;
@@ -2485,6 +2524,12 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
                        return;
 
                break;
+       case IXGBE_DEV_ID_X550EM_A_1G_T:
+       case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+               rc = hw->phy.ops.check_overtemp(hw);
+               if (rc != IXGBE_ERR_OVERTEMP)
+                       return;
+               break;
        default:
                if (adapter->hw.mac.type >= ixgbe_mac_X540)
                        return;
@@ -2531,6 +2576,18 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
                        return;
                }
                return;
+       case ixgbe_mac_x550em_a:
+               if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
+                       adapter->interrupt_event = eicr;
+                       adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
+                       ixgbe_service_event_schedule(adapter);
+                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
+                                       IXGBE_EICR_GPI_SDP0_X550EM_a);
+                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
+                                       IXGBE_EICR_GPI_SDP0_X550EM_a);
+               }
+               return;
+       case ixgbe_mac_X550:
        case ixgbe_mac_X540:
                if (!(eicr & IXGBE_EICR_TS))
                        return;
@@ -2856,8 +2913,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                        clean_complete = false;
        }
 
-       /* Exit if we are called by netpoll or busy polling is active */
-       if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
+       /* Exit if we are called by netpoll */
+       if (budget <= 0)
                return budget;
 
        /* attempt to distribute budget to each queue fairly, but don't allow
@@ -2876,7 +2933,6 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                        clean_complete = false;
        }
 
-       ixgbe_qv_unlock_napi(q_vector);
        /* If all work not completed, return budget and keep polling */
        if (!clean_complete)
                return budget;
@@ -3214,6 +3270,10 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
 
        clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
 
+       /* reinitialize tx_buffer_info */
+       memset(ring->tx_buffer_info, 0,
+              sizeof(struct ixgbe_tx_buffer) * ring->count);
+
        /* enable queue */
        IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
 
@@ -3384,7 +3444,10 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
        srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
 
        /* configure the packet buffer length */
-       srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+       if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state))
+               srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+       else
+               srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 
        /* configure descriptor type */
        srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
@@ -3685,6 +3748,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
                             struct ixgbe_ring *ring)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       union ixgbe_adv_rx_desc *rx_desc;
        u64 rdba = ring->dma;
        u32 rxdctl;
        u8 reg_idx = ring->reg_idx;
@@ -3717,8 +3781,27 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
                 */
                rxdctl &= ~0x3FFFFF;
                rxdctl |=  0x080420;
+#if (PAGE_SIZE < 8192)
+       } else {
+               rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
+                           IXGBE_RXDCTL_RLPML_EN);
+
+               /* Limit the maximum frame size so we don't overrun the skb */
+               if (ring_uses_build_skb(ring) &&
+                   !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
+                       rxdctl |= IXGBE_MAX_FRAME_BUILD_SKB |
+                                 IXGBE_RXDCTL_RLPML_EN;
+#endif
        }
 
+       /* initialize rx_buffer_info */
+       memset(ring->rx_buffer_info, 0,
+              sizeof(struct ixgbe_rx_buffer) * ring->count);
+
+       /* initialize Rx descriptor 0 */
+       rx_desc = IXGBE_RX_DESC(ring, 0);
+       rx_desc->wb.upper.length = 0;
+
        /* enable receive descriptor ring */
        rxdctl |= IXGBE_RXDCTL_ENABLE;
        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
@@ -3855,10 +3938,30 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
         */
        for (i = 0; i < adapter->num_rx_queues; i++) {
                rx_ring = adapter->rx_ring[i];
+
+               clear_ring_rsc_enabled(rx_ring);
+               clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+               clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+
                if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
                        set_ring_rsc_enabled(rx_ring);
-               else
-                       clear_ring_rsc_enabled(rx_ring);
+
+               if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
+                       set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+
+               clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+               if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
+                       continue;
+
+               set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
+
+#if (PAGE_SIZE < 8192)
+               if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+                       set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+
+               if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))
+                       set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+#endif
        }
 }
 
@@ -4559,23 +4662,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
 {
        int q_idx;
 
-       for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
-               ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
+       for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
                napi_enable(&adapter->q_vector[q_idx]->napi);
-       }
 }
 
 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
 {
        int q_idx;
 
-       for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
+       for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
                napi_disable(&adapter->q_vector[q_idx]->napi);
-               while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
-                       pr_info("QV %d locked\n", q_idx);
-                       usleep_range(1000, 20000);
-               }
-       }
 }
 
 static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
@@ -4879,45 +4975,47 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
  **/
 static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
 {
-       struct device *dev = rx_ring->dev;
-       unsigned long size;
-       u16 i;
-
-       /* ring already cleared, nothing to do */
-       if (!rx_ring->rx_buffer_info)
-               return;
+       u16 i = rx_ring->next_to_clean;
+       struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
 
        /* Free all the Rx ring sk_buffs */
-       for (i = 0; i < rx_ring->count; i++) {
-               struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
-
+       while (i != rx_ring->next_to_alloc) {
                if (rx_buffer->skb) {
                        struct sk_buff *skb = rx_buffer->skb;
                        if (IXGBE_CB(skb)->page_released)
-                               dma_unmap_page(dev,
-                                              IXGBE_CB(skb)->dma,
-                                              ixgbe_rx_bufsz(rx_ring),
-                                              DMA_FROM_DEVICE);
+                               dma_unmap_page_attrs(rx_ring->dev,
+                                                    IXGBE_CB(skb)->dma,
+                                                    ixgbe_rx_pg_size(rx_ring),
+                                                    DMA_FROM_DEVICE,
+                                                    IXGBE_RX_DMA_ATTR);
                        dev_kfree_skb(skb);
-                       rx_buffer->skb = NULL;
                }
 
-               if (!rx_buffer->page)
-                       continue;
+               /* Invalidate cache lines that may have been written to by
+                * device so that we avoid corrupting memory.
+                */
+               dma_sync_single_range_for_cpu(rx_ring->dev,
+                                             rx_buffer->dma,
+                                             rx_buffer->page_offset,
+                                             ixgbe_rx_bufsz(rx_ring),
+                                             DMA_FROM_DEVICE);
 
-               dma_unmap_page(dev, rx_buffer->dma,
-                              ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
-               __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
+               /* free resources associated with mapping */
+               dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+                                    ixgbe_rx_pg_size(rx_ring),
+                                    DMA_FROM_DEVICE,
+                                    IXGBE_RX_DMA_ATTR);
+               __page_frag_cache_drain(rx_buffer->page,
+                                       rx_buffer->pagecnt_bias);
 
-               rx_buffer->page = NULL;
+               i++;
+               rx_buffer++;
+               if (i == rx_ring->count) {
+                       i = 0;
+                       rx_buffer = rx_ring->rx_buffer_info;
+               }
        }
 
-       size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
-       memset(rx_ring->rx_buffer_info, 0, size);
-
-       /* Zero out the descriptor ring */
-       memset(rx_ring->desc, 0, rx_ring->size);
-
        rx_ring->next_to_alloc = 0;
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
@@ -5294,6 +5392,8 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
 
        while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
                usleep_range(1000, 2000);
+       if (adapter->hw.phy.type == ixgbe_phy_fw)
+               ixgbe_watchdog_link_is_down(adapter);
        ixgbe_down(adapter);
        /*
         * If SR-IOV enabled then wait a bit before bringing the adapter
@@ -5384,28 +5484,57 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
  **/
 static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
 {
-       struct ixgbe_tx_buffer *tx_buffer_info;
-       unsigned long size;
-       u16 i;
+       u16 i = tx_ring->next_to_clean;
+       struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
 
-       /* ring already cleared, nothing to do */
-       if (!tx_ring->tx_buffer_info)
-               return;
+       while (i != tx_ring->next_to_use) {
+               union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
 
-       /* Free all the Tx ring sk_buffs */
-       for (i = 0; i < tx_ring->count; i++) {
-               tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
-       }
+               /* Free all the Tx ring sk_buffs */
+               dev_kfree_skb_any(tx_buffer->skb);
 
-       netdev_tx_reset_queue(txring_txq(tx_ring));
+               /* unmap skb header data */
+               dma_unmap_single(tx_ring->dev,
+                                dma_unmap_addr(tx_buffer, dma),
+                                dma_unmap_len(tx_buffer, len),
+                                DMA_TO_DEVICE);
 
-       size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
-       memset(tx_ring->tx_buffer_info, 0, size);
+               /* check for eop_desc to determine the end of the packet */
+               eop_desc = tx_buffer->next_to_watch;
+               tx_desc = IXGBE_TX_DESC(tx_ring, i);
+
+               /* unmap remaining buffers */
+               while (tx_desc != eop_desc) {
+                       tx_buffer++;
+                       tx_desc++;
+                       i++;
+                       if (unlikely(i == tx_ring->count)) {
+                               i = 0;
+                               tx_buffer = tx_ring->tx_buffer_info;
+                               tx_desc = IXGBE_TX_DESC(tx_ring, 0);
+                       }
+
+                       /* unmap any remaining paged data */
+                       if (dma_unmap_len(tx_buffer, len))
+                               dma_unmap_page(tx_ring->dev,
+                                              dma_unmap_addr(tx_buffer, dma),
+                                              dma_unmap_len(tx_buffer, len),
+                                              DMA_TO_DEVICE);
+               }
 
-       /* Zero out the descriptor ring */
-       memset(tx_ring->desc, 0, tx_ring->size);
+               /* move us one more past the eop_desc for start of next pkt */
+               tx_buffer++;
+               i++;
+               if (unlikely(i == tx_ring->count)) {
+                       i = 0;
+                       tx_buffer = tx_ring->tx_buffer_info;
+               }
+       }
+
+       /* reset BQL for queue */
+       netdev_tx_reset_queue(txring_txq(tx_ring));
 
+       /* reset next_to_use and next_to_clean */
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
 }
@@ -5553,6 +5682,31 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        ixgbe_clean_all_rx_rings(adapter);
 }
 
+/**
+ * ixgbe_eee_capable - helper function to determine EEE support on X550
+ * @adapter: board private structure
+ */
+static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_X550EM_A_1G_T:
+       case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+               if (!hw->phy.eee_speeds_supported)
+                       break;
+               adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
+               if (!hw->phy.eee_speeds_advertised)
+                       break;
+               adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
+               break;
+       default:
+               adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
+               adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
+               break;
+       }
+}
+
 /**
  * ixgbe_tx_timeout - Respond to a Tx Hang
  * @netdev: network interface device structure
@@ -5717,6 +5871,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
                break;
        case ixgbe_mac_x550em_a:
                adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
+               switch (hw->device_id) {
+               case IXGBE_DEV_ID_X550EM_A_1G_T:
+               case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+                       adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
+                       break;
+               default:
+                       break;
+               }
        /* fall through */
        case ixgbe_mac_X550EM_x:
 #ifdef CONFIG_IXGBE_DCB
@@ -5730,6 +5892,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
 #endif /* IXGBE_FCOE */
        /* Fall Through */
        case ixgbe_mac_X550:
+               if (hw->mac.type == ixgbe_mac_X550)
+                       adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
 #ifdef CONFIG_IXGBE_DCA
                adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
 #endif
@@ -5816,9 +5980,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
        if (tx_ring->q_vector)
                ring_node = tx_ring->q_vector->numa_node;
 
-       tx_ring->tx_buffer_info = vzalloc_node(size, ring_node);
+       tx_ring->tx_buffer_info = vmalloc_node(size, ring_node);
        if (!tx_ring->tx_buffer_info)
-               tx_ring->tx_buffer_info = vzalloc(size);
+               tx_ring->tx_buffer_info = vmalloc(size);
        if (!tx_ring->tx_buffer_info)
                goto err;
 
@@ -5900,9 +6064,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
        if (rx_ring->q_vector)
                ring_node = rx_ring->q_vector->numa_node;
 
-       rx_ring->rx_buffer_info = vzalloc_node(size, ring_node);
+       rx_ring->rx_buffer_info = vmalloc_node(size, ring_node);
        if (!rx_ring->rx_buffer_info)
-               rx_ring->rx_buffer_info = vzalloc(size);
+               rx_ring->rx_buffer_info = vmalloc(size);
        if (!rx_ring->rx_buffer_info)
                goto err;
 
@@ -6200,7 +6364,8 @@ int ixgbe_close(struct net_device *netdev)
 
        ixgbe_ptp_stop(adapter);
 
-       ixgbe_close_suspend(adapter);
+       if (netif_device_present(netdev))
+               ixgbe_close_suspend(adapter);
 
        ixgbe_fdir_filter_exit(adapter);
 
@@ -6245,14 +6410,12 @@ static int ixgbe_resume(struct pci_dev *pdev)
        if (!err && netif_running(netdev))
                err = ixgbe_open(netdev);
 
-       rtnl_unlock();
 
-       if (err)
-               return err;
-
-       netif_device_attach(netdev);
+       if (!err)
+               netif_device_attach(netdev);
+       rtnl_unlock();
 
-       return 0;
+       return err;
 }
 #endif /* CONFIG_PM */
 
@@ -6267,14 +6430,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
        int retval = 0;
 #endif
 
+       rtnl_lock();
        netif_device_detach(netdev);
 
-       rtnl_lock();
        if (netif_running(netdev))
                ixgbe_close_suspend(adapter);
-       rtnl_unlock();
 
        ixgbe_clear_interrupt_scheme(adapter);
+       rtnl_unlock();
 
 #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
@@ -6808,6 +6971,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
        case IXGBE_LINK_SPEED_100_FULL:
                speed_str = "100 Mbps";
                break;
+       case IXGBE_LINK_SPEED_10_FULL:
+               speed_str = "10 Mbps";
+               break;
        default:
                speed_str = "unknown speed";
                break;
@@ -7615,18 +7781,32 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
        return;
 dma_error:
        dev_err(tx_ring->dev, "TX DMA map failed\n");
+       tx_buffer = &tx_ring->tx_buffer_info[i];
 
        /* clear dma mappings for failed tx_buffer_info map */
-       for (;;) {
+       while (tx_buffer != first) {
+               if (dma_unmap_len(tx_buffer, len))
+                       dma_unmap_page(tx_ring->dev,
+                                      dma_unmap_addr(tx_buffer, dma),
+                                      dma_unmap_len(tx_buffer, len),
+                                      DMA_TO_DEVICE);
+               dma_unmap_len_set(tx_buffer, len, 0);
+
+               if (i--)
+                       i += tx_ring->count;
                tx_buffer = &tx_ring->tx_buffer_info[i];
-               ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
-               if (tx_buffer == first)
-                       break;
-               if (i == 0)
-                       i = tx_ring->count;
-               i--;
        }
 
+       if (dma_unmap_len(tx_buffer, len))
+               dma_unmap_single(tx_ring->dev,
+                                dma_unmap_addr(tx_buffer, dma),
+                                dma_unmap_len(tx_buffer, len),
+                                DMA_TO_DEVICE);
+       dma_unmap_len_set(tx_buffer, len, 0);
+
+       dev_kfree_skb_any(first->skb);
+       first->skb = NULL;
+
        tx_ring->next_to_use = i;
 }
 
@@ -8111,8 +8291,9 @@ static void ixgbe_netpoll(struct net_device *netdev)
 }
 
 #endif
-static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
-                                                  struct rtnl_link_stats64 *stats)
+
+static void ixgbe_get_stats64(struct net_device *netdev,
+                             struct rtnl_link_stats64 *stats)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        int i;
@@ -8150,13 +8331,13 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
                }
        }
        rcu_read_unlock();
+
        /* following stats updated by ixgbe_watchdog_task() */
        stats->multicast        = netdev->stats.multicast;
        stats->rx_errors        = netdev->stats.rx_errors;
        stats->rx_length_errors = netdev->stats.rx_length_errors;
        stats->rx_crc_errors    = netdev->stats.rx_crc_errors;
        stats->rx_missed_errors = netdev->stats.rx_missed_errors;
-       return stats;
 }
 
 #ifdef CONFIG_IXGBE_DCB
@@ -9290,9 +9471,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ixgbe_netpoll,
 #endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       .ndo_busy_poll          = ixgbe_low_latency_recv,
-#endif
 #ifdef IXGBE_FCOE
        .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
        .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
@@ -9596,6 +9774,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw->phy.reset_if_overtemp = true;
        err = hw->mac.ops.reset_hw(hw);
        hw->phy.reset_if_overtemp = false;
+       ixgbe_set_eee_capable(adapter);
        if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
                err = 0;
        } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
@@ -9673,7 +9852,7 @@ skip_sriov:
 
 #ifdef CONFIG_IXGBE_DCB
        if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
-               netdev->dcbnl_ops = &dcbnl_ops;
+               netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
 #endif
 
 #ifdef IXGBE_FCOE
@@ -9833,8 +10012,9 @@ skip_sriov:
         * since os does not support feature
         */
        if (hw->mac.ops.set_fw_drv_ver)
-               hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
-                                          0xFF);
+               hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
+                                          sizeof(ixgbe_driver_version) - 1,
+                                          ixgbe_driver_version);
 
        /* add san mac addr to netdev */
        ixgbe_add_sanmac_netdev(netdev);
@@ -10082,7 +10262,7 @@ skip_bad_vf_detection:
        }
 
        if (netif_running(netdev))
-               ixgbe_down(adapter);
+               ixgbe_close_suspend(adapter);
 
        if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
                pci_disable_device(pdev);
@@ -10152,10 +10332,12 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
        }
 
 #endif
+       rtnl_lock();
        if (netif_running(netdev))
-               ixgbe_up(adapter);
+               ixgbe_open(netdev);
 
        netif_device_attach(netdev);
+       rtnl_unlock();
 }
 
 static const struct pci_error_handlers ixgbe_err_handler = {
index 01c2667c0f922321e808c7dba698916ee727a50b..811cb4f64a5bd60f102886204a1070d3b40298f3 100644 (file)
@@ -74,6 +74,7 @@ enum ixgbe_pfvf_api_rev {
        ixgbe_mbox_api_20,      /* API version 2.0, solaris Phase1 VF driver */
        ixgbe_mbox_api_11,      /* API version 1.1, linux/freebsd VF driver */
        ixgbe_mbox_api_12,      /* API version 1.2, linux/freebsd VF driver */
+       ixgbe_mbox_api_13,      /* API version 1.3, linux/freebsd VF driver */
        /* This value should always be last */
        ixgbe_mbox_api_unknown, /* indicates that API version is not known */
 };
index 3b8362085f57b15dba5b9ff5e8c61bf41bec3713..e55b2602f37166d7eea6442fa3bc5797e97d470c 100644 (file)
@@ -113,7 +113,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr,
                                        u16 reg, u16 *val, bool lock)
 {
        u32 swfw_mask = hw->phy.phy_semaphore_mask;
-       int max_retry = 10;
+       int max_retry = 3;
        int retry = 0;
        u8 csum_byte;
        u8 high_bits;
@@ -452,10 +452,27 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
         */
        for (i = 0; i < 30; i++) {
                msleep(100);
-               hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &ctrl);
-               if (!(ctrl & MDIO_CTRL1_RESET)) {
-                       udelay(2);
-                       break;
+               if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
+                       status = hw->phy.ops.read_reg(hw,
+                                                 IXGBE_MDIO_TX_VENDOR_ALARMS_3,
+                                                 MDIO_MMD_PMAPMD, &ctrl);
+                       if (status)
+                               return status;
+
+                       if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
+                               udelay(2);
+                               break;
+                       }
+               } else {
+                       status = hw->phy.ops.read_reg(hw, MDIO_CTRL1,
+                                                     MDIO_MMD_PHYXS, &ctrl);
+                       if (status)
+                               return status;
+
+                       if (!(ctrl & MDIO_CTRL1_RESET)) {
+                               udelay(2);
+                               break;
+                       }
                }
        }
 
@@ -751,9 +768,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
                                       ixgbe_link_speed speed,
                                       bool autoneg_wait_to_complete)
 {
-
-       /*
-        * Clear autoneg_advertised and set new values based on input link
+       /* Clear autoneg_advertised and set new values based on input link
         * speed.
         */
        hw->phy.autoneg_advertised = 0;
@@ -761,12 +776,21 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
        if (speed & IXGBE_LINK_SPEED_10GB_FULL)
                hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
 
+       if (speed & IXGBE_LINK_SPEED_5GB_FULL)
+               hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
+
+       if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
+               hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
        if (speed & IXGBE_LINK_SPEED_1GB_FULL)
                hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 
        if (speed & IXGBE_LINK_SPEED_100_FULL)
                hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
 
+       if (speed & IXGBE_LINK_SPEED_10_FULL)
+               hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
+
        /* Setup link based on the new speed settings */
        hw->phy.ops.setup_link(hw);
 
@@ -959,40 +983,6 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
        return 0;
 }
 
-/**
- *  ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
- *  @hw: pointer to hardware structure
- *  @firmware_version: pointer to the PHY Firmware Version
- **/
-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
-                                      u16 *firmware_version)
-{
-       s32 status;
-
-       status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
-                                     MDIO_MMD_VEND1,
-                                     firmware_version);
-
-       return status;
-}
-
-/**
- *  ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
- *  @hw: pointer to hardware structure
- *  @firmware_version: pointer to the PHY Firmware Version
- **/
-s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
-                                          u16 *firmware_version)
-{
-       s32 status;
-
-       status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
-                                     MDIO_MMD_VEND1,
-                                     firmware_version);
-
-       return status;
-}
-
 /**
  *  ixgbe_reset_phy_nl - Performs a PHY reset
  *  @hw: pointer to hardware structure
@@ -1738,6 +1728,8 @@ static s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset,
        u32 swfw_mask = hw->phy.phy_semaphore_mask;
        bool nack = true;
 
+       if (hw->mac.type >= ixgbe_mac_X550)
+               max_retry = 3;
        if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr))
                max_retry = IXGBE_SFP_DETECT_RETRIES;
 
index ecf05f838fc52a19bdaee01456a6e948af075b5a..5aa2c3cf7aecfa59de40a0eafd1f8e6b2a557bc7 100644 (file)
@@ -168,10 +168,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
                             ixgbe_link_speed *speed,
                             bool *link_up);
 s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
-s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
-                                      u16 *firmware_version);
-s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
-                                          u16 *firmware_version);
 
 s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
 s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
index 1efb404431e95bb97752935b3061aca852133820..ef0635e0918c239f066362f5c33eec5beb38a17d 100644 (file)
@@ -858,14 +858,14 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
                tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
                tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
-               adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
-                                   IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+               adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+                                  IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
                break;
        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
                tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
                tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
-               adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
-                                   IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+               adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+                                  IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
                break;
        case HWTSTAMP_FILTER_PTP_V2_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -879,8 +879,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
                tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
                is_l2 = true;
                config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
-               adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
-                                   IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+               adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+                                  IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
                break;
        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
        case HWTSTAMP_FILTER_ALL:
index 7e5d9850e4b264b08a3ce43b0e589734f8cb9afe..044cb44747cfe1909f1d5bbc399f1d7dabe80409 100644 (file)
@@ -512,6 +512,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
                switch (adapter->vfinfo[vf].vf_api) {
                case ixgbe_mbox_api_11:
                case ixgbe_mbox_api_12:
+               case ixgbe_mbox_api_13:
                        /*
                         * Version 1.1 supports jumbo frames on VFs if PF has
                         * jumbo frames enabled which means legacy VFs are
@@ -934,7 +935,8 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
                    IXGBE_VT_MSGINFO_SHIFT;
        int err;
 
-       if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
+       if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
+           index > 0) {
                e_warn(drv,
                       "VF %d requested MACVLAN filter but is administratively denied\n",
                       vf);
@@ -978,6 +980,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
        case ixgbe_mbox_api_10:
        case ixgbe_mbox_api_11:
        case ixgbe_mbox_api_12:
+       case ixgbe_mbox_api_13:
                adapter->vfinfo[vf].vf_api = api;
                return 0;
        default:
@@ -1002,6 +1005,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
        case ixgbe_mbox_api_20:
        case ixgbe_mbox_api_11:
        case ixgbe_mbox_api_12:
+       case ixgbe_mbox_api_13:
                break;
        default:
                return -1;
@@ -1041,8 +1045,13 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
                return -EPERM;
 
        /* verify the PF is supporting the correct API */
-       if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12)
+       switch (adapter->vfinfo[vf].vf_api) {
+       case ixgbe_mbox_api_13:
+       case ixgbe_mbox_api_12:
+               break;
+       default:
                return -EOPNOTSUPP;
+       }
 
        /* This mailbox command is supported (required) only for 82599 and x540
         * VFs which support up to 4 RSS queues. Therefore we will compress the
@@ -1068,8 +1077,13 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter,
                return -EPERM;
 
        /* verify the PF is supporting the correct API */
-       if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12)
+       switch (adapter->vfinfo[vf].vf_api) {
+       case ixgbe_mbox_api_13:
+       case ixgbe_mbox_api_12:
+               break;
+       default:
                return -EOPNOTSUPP;
+       }
 
        memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key));
 
@@ -1081,11 +1095,16 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
 {
        struct ixgbe_hw *hw = &adapter->hw;
        int xcast_mode = msgbuf[1];
-       u32 vmolr, disable, enable;
+       u32 vmolr, fctrl, disable, enable;
 
        /* verify the PF is supporting the correct APIs */
        switch (adapter->vfinfo[vf].vf_api) {
        case ixgbe_mbox_api_12:
+               /* promisc introduced in 1.3 version */
+               if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
+                       return -EOPNOTSUPP;
+               /* Fall threw */
+       case ixgbe_mbox_api_13:
                break;
        default:
                return -EOPNOTSUPP;
@@ -1101,17 +1120,34 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
 
        switch (xcast_mode) {
        case IXGBEVF_XCAST_MODE_NONE:
-               disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
+               disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
+                         IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
                enable = 0;
                break;
        case IXGBEVF_XCAST_MODE_MULTI:
-               disable = IXGBE_VMOLR_MPE;
+               disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
                enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE;
                break;
        case IXGBEVF_XCAST_MODE_ALLMULTI:
-               disable = 0;
+               disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
                enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE;
                break;
+       case IXGBEVF_XCAST_MODE_PROMISC:
+               if (hw->mac.type <= ixgbe_mac_82599EB)
+                       return -EOPNOTSUPP;
+
+               fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+               if (!(fctrl & IXGBE_FCTRL_UPE)) {
+                       /* VF promisc requires PF in promisc */
+                       e_warn(drv,
+                              "Enabling VF promisc requires PF in promisc\n");
+                       return -EPERM;
+               }
+
+               disable = 0;
+               enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE |
+                        IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE;
+               break;
        default:
                return -EOPNOTSUPP;
        }
index cf21273db20197a13157051704180a7107fe36dc..1d07f2ead914ccd361ed5084693e968114dd1514 100644 (file)
@@ -92,6 +92,8 @@
 #define IXGBE_DEV_ID_X550EM_A_SGMII_L  0x15C7
 #define IXGBE_DEV_ID_X550EM_A_10G_T    0x15C8
 #define IXGBE_DEV_ID_X550EM_A_SFP      0x15CE
+#define IXGBE_DEV_ID_X550EM_A_1G_T     0x15E4
+#define IXGBE_DEV_ID_X550EM_A_1G_T_L   0x15E5
 
 /* VF Device IDs */
 #define IXGBE_DEV_ID_82599_VF          0x10ED
@@ -1499,6 +1501,8 @@ enum {
 #define IXGBE_VT_CTL_POOL_MASK  (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
 
 /* VMOLR bitmasks */
+#define IXGBE_VMOLR_UPE                0x00400000 /* unicast promiscuous */
+#define IXGBE_VMOLR_VPE                0x00800000 /* VLAN promiscuous */
 #define IXGBE_VMOLR_AUPE        0x01000000 /* accept untagged packets */
 #define IXGBE_VMOLR_ROMPE       0x02000000 /* accept packets in MTA tbl */
 #define IXGBE_VMOLR_ROPE        0x04000000 /* accept packets in UC tbl */
@@ -1914,6 +1918,7 @@ enum {
 #define IXGBE_LINKS_SPEED_10G_82599 0x30000000
 #define IXGBE_LINKS_SPEED_1G_82599  0x20000000
 #define IXGBE_LINKS_SPEED_100_82599 0x10000000
+#define IXGBE_LINKS_SPEED_10_X550EM_A 0
 #define IXGBE_LINK_UP_TIME      90 /* 9.0 Seconds */
 #define IXGBE_AUTO_NEG_TIME     45 /* 4.5 Seconds */
 
@@ -2619,6 +2624,7 @@ enum ixgbe_fdir_pballoc_type {
 #define FW_CEM_UNUSED_VER              0x0
 #define FW_CEM_MAX_RETRIES             3
 #define FW_CEM_RESP_STATUS_SUCCESS     0x1
+#define FW_CEM_DRIVER_VERSION_SIZE     39 /* +9 would send 48 bytes to fw */
 #define FW_READ_SHADOW_RAM_CMD         0x31
 #define FW_READ_SHADOW_RAM_LEN         0x6
 #define FW_WRITE_SHADOW_RAM_CMD                0x33
@@ -2644,6 +2650,59 @@ enum ixgbe_fdir_pballoc_type {
 #define FW_INT_PHY_REQ_LEN             10
 #define FW_INT_PHY_REQ_READ            0
 #define FW_INT_PHY_REQ_WRITE           1
+#define FW_PHY_ACT_REQ_CMD             5
+#define FW_PHY_ACT_DATA_COUNT          4
+#define FW_PHY_ACT_REQ_LEN             (4 + 4 * FW_PHY_ACT_DATA_COUNT)
+#define FW_PHY_ACT_INIT_PHY            1
+#define FW_PHY_ACT_SETUP_LINK          2
+#define FW_PHY_ACT_LINK_SPEED_10       BIT(0)
+#define FW_PHY_ACT_LINK_SPEED_100      BIT(1)
+#define FW_PHY_ACT_LINK_SPEED_1G       BIT(2)
+#define FW_PHY_ACT_LINK_SPEED_2_5G     BIT(3)
+#define FW_PHY_ACT_LINK_SPEED_5G       BIT(4)
+#define FW_PHY_ACT_LINK_SPEED_10G      BIT(5)
+#define FW_PHY_ACT_LINK_SPEED_20G      BIT(6)
+#define FW_PHY_ACT_LINK_SPEED_25G      BIT(7)
+#define FW_PHY_ACT_LINK_SPEED_40G      BIT(8)
+#define FW_PHY_ACT_LINK_SPEED_50G      BIT(9)
+#define FW_PHY_ACT_LINK_SPEED_100G     BIT(10)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3 << \
+                                         HW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u
+#define FW_PHY_ACT_SETUP_LINK_LP       BIT(18)
+#define FW_PHY_ACT_SETUP_LINK_HP       BIT(19)
+#define FW_PHY_ACT_SETUP_LINK_EEE      BIT(20)
+#define FW_PHY_ACT_SETUP_LINK_AN       BIT(22)
+#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN BIT(0)
+#define FW_PHY_ACT_GET_LINK_INFO       3
+#define FW_PHY_ACT_GET_LINK_INFO_EEE   BIT(19)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_TX BIT(20)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_RX BIT(21)
+#define FW_PHY_ACT_GET_LINK_INFO_POWER BIT(22)
+#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE   BIT(24)
+#define FW_PHY_ACT_GET_LINK_INFO_TEMP  BIT(25)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX      BIT(28)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX      BIT(29)
+#define FW_PHY_ACT_FORCE_LINK_DOWN     4
+#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF BIT(0)
+#define FW_PHY_ACT_PHY_SW_RESET                5
+#define FW_PHY_ACT_PHY_HW_RESET                6
+#define FW_PHY_ACT_GET_PHY_INFO                7
+#define FW_PHY_ACT_UD_2                        0x1002
+#define FW_PHY_ACT_UD_2_10G_KR_EEE     BIT(6)
+#define FW_PHY_ACT_UD_2_10G_KX4_EEE    BIT(5)
+#define FW_PHY_ACT_UD_2_1G_KX_EEE      BIT(4)
+#define FW_PHY_ACT_UD_2_10G_T_EEE      BIT(3)
+#define FW_PHY_ACT_UD_2_1G_T_EEE       BIT(2)
+#define FW_PHY_ACT_UD_2_100M_TX_EEE    BIT(1)
+#define FW_PHY_ACT_RETRIES             50
+#define FW_PHY_INFO_SPEED_MASK         0xFFFu
+#define FW_PHY_INFO_ID_HI_MASK         0xFFFF0000u
+#define FW_PHY_INFO_ID_LO_MASK         0x0000FFFFu
 
 /* Host Interface Command Structures */
 struct ixgbe_hic_hdr {
@@ -2686,6 +2745,16 @@ struct ixgbe_hic_drv_info {
        u16 pad2; /* end spacing to ensure length is mult. of dword2 */
 };
 
+struct ixgbe_hic_drv_info2 {
+       struct ixgbe_hic_hdr hdr;
+       u8 port_num;
+       u8 ver_sub;
+       u8 ver_build;
+       u8 ver_min;
+       u8 ver_maj;
+       char driver_string[FW_CEM_DRIVER_VERSION_SIZE];
+};
+
 /* These need to be dword aligned */
 struct ixgbe_hic_read_shadow_ram {
        union ixgbe_hic_hdr2 hdr;
@@ -2734,6 +2803,19 @@ struct ixgbe_hic_internal_phy_resp {
        __be32 read_data;
 };
 
+struct ixgbe_hic_phy_activity_req {
+       struct ixgbe_hic_hdr hdr;
+       u8 port_number;
+       u8 pad;
+       __le16 activity_id;
+       __be32 data[FW_PHY_ACT_DATA_COUNT];
+};
+
+struct ixgbe_hic_phy_activity_resp {
+       struct ixgbe_hic_hdr hdr;
+       __be32 data[FW_PHY_ACT_DATA_COUNT];
+};
+
 /* Transmit Descriptor - Advanced */
 union ixgbe_adv_tx_desc {
        struct {
@@ -2849,6 +2931,7 @@ typedef u32 ixgbe_autoneg_advertised;
 /* Link speed */
 typedef u32 ixgbe_link_speed;
 #define IXGBE_LINK_SPEED_UNKNOWN       0
+#define IXGBE_LINK_SPEED_10_FULL       0x0002
 #define IXGBE_LINK_SPEED_100_FULL      0x0008
 #define IXGBE_LINK_SPEED_1GB_FULL      0x0020
 #define IXGBE_LINK_SPEED_2_5GB_FULL    0x0400
@@ -3064,6 +3147,7 @@ enum ixgbe_phy_type {
        ixgbe_phy_qsfp_unknown,
        ixgbe_phy_sfp_unsupported,
        ixgbe_phy_sgmii,
+       ixgbe_phy_fw,
        ixgbe_phy_generic
 };
 
@@ -3362,7 +3446,8 @@ struct ixgbe_mac_operations {
        void (*fc_autoneg)(struct ixgbe_hw *);
 
        /* Manageability interface */
-       s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
+       s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
+                             const char *);
        s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
        s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
        void (*disable_rx)(struct ixgbe_hw *hw);
@@ -3392,7 +3477,6 @@ struct ixgbe_phy_operations {
        s32 (*setup_internal_link)(struct ixgbe_hw *);
        s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
        s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
-       s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
        s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
        s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
        s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *);
@@ -3478,6 +3562,8 @@ struct ixgbe_phy_info {
        bool                            reset_disable;
        ixgbe_autoneg_advertised        autoneg_advertised;
        ixgbe_link_speed                speeds_supported;
+       ixgbe_link_speed                eee_speeds_supported;
+       ixgbe_link_speed                eee_speeds_advertised;
        enum ixgbe_smart_speed          smart_speed;
        bool                            smart_speed_active;
        bool                            multispeed_fiber;
index e2ff823ee202f36c1edd0cd5815209765976aaf4..84a467a8ed3d5f1aaab77b53dc158d3feba95c98 100644 (file)
@@ -780,8 +780,10 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
        ixgbe_link_speed speed;
        bool link_up;
 
-       /*
-        * Link should be up in order for the blink bit in the LED control
+       if (index > 3)
+               return IXGBE_ERR_PARAM;
+
+       /* Link should be up in order for the blink bit in the LED control
         * register to work. Force link and speed in the MAC if link is down.
         * This will be reversed when we stop the blinking.
         */
@@ -814,6 +816,9 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
        u32 macc_reg;
        u32 ledctl_reg;
 
+       if (index > 3)
+               return IXGBE_ERR_PARAM;
+
        /* Restore the LED to its default value. */
        ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
        ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
@@ -913,7 +918,6 @@ static const struct ixgbe_phy_operations phy_ops_X540 = {
        .write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic,
        .check_overtemp         = &ixgbe_tn_check_overtemp,
        .set_phy_power          = &ixgbe_set_copper_phy_power,
-       .get_firmware_version   = &ixgbe_get_phy_firmware_version_generic,
 };
 
 static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
index 11fb433eb924ef7f1771400db4c6e69c10354c6c..200f847fd8f31e58005ac72f3d90d4e97b11f96d 100644 (file)
@@ -63,6 +63,18 @@ static s32 ixgbe_get_invariants_X550_a(struct ixgbe_hw *hw)
        return 0;
 }
 
+static s32 ixgbe_get_invariants_X550_a_fw(struct ixgbe_hw *hw)
+{
+       struct ixgbe_phy_info *phy = &hw->phy;
+
+       /* Start with X540 invariants, since so similar */
+       ixgbe_get_invariants_X540(hw);
+
+       phy->ops.set_phy_power = NULL;
+
+       return 0;
+}
+
 /** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control
  *  @hw: pointer to hardware structure
  **/
@@ -402,6 +414,204 @@ ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw,
        return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false);
 }
 
+/**
+ * ixgbe_fw_phy_activity - Perform an activity on a PHY
+ * @hw: pointer to hardware structure
+ * @activity: activity to perform
+ * @data: Pointer to 4 32-bit words of data
+ */
+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+                         u32 (*data)[FW_PHY_ACT_DATA_COUNT])
+{
+       union {
+               struct ixgbe_hic_phy_activity_req cmd;
+               struct ixgbe_hic_phy_activity_resp rsp;
+       } hic;
+       u16 retries = FW_PHY_ACT_RETRIES;
+       s32 rc;
+       u32 i;
+
+       do {
+               memset(&hic, 0, sizeof(hic));
+               hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
+               hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
+               hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+               hic.cmd.port_number = hw->bus.lan_id;
+               hic.cmd.activity_id = cpu_to_le16(activity);
+               for (i = 0; i < ARRAY_SIZE(hic.cmd.data); ++i)
+                       hic.cmd.data[i] = cpu_to_be32((*data)[i]);
+
+               rc = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd),
+                                                 IXGBE_HI_COMMAND_TIMEOUT,
+                                                 true);
+               if (rc)
+                       return rc;
+               if (hic.rsp.hdr.cmd_or_resp.ret_status ==
+                   FW_CEM_RESP_STATUS_SUCCESS) {
+                       for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
+                               (*data)[i] = be32_to_cpu(hic.rsp.data[i]);
+                       return 0;
+               }
+               usleep_range(20, 30);
+               --retries;
+       } while (retries > 0);
+
+       return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+}
+
+static const struct {
+       u16 fw_speed;
+       ixgbe_link_speed phy_speed;
+} ixgbe_fw_map[] = {
+       { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
+       { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
+       { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
+       { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
+       { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
+       { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
+};
+
+/**
+ * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
+{
+       u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+       u16 phy_speeds;
+       u16 phy_id_lo;
+       s32 rc;
+       u16 i;
+
+       if (hw->phy.id)
+               return 0;
+
+       rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
+       if (rc)
+               return rc;
+
+       hw->phy.speeds_supported = 0;
+       phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
+       for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) {
+               if (phy_speeds & ixgbe_fw_map[i].fw_speed)
+                       hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
+       }
+
+       hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
+       phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
+       hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
+       hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
+       if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
+               return IXGBE_ERR_PHY_ADDR_INVALID;
+
+       hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+       hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
+                                      IXGBE_LINK_SPEED_1GB_FULL;
+       hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+       return 0;
+}
+
+/**
+ * ixgbe_identify_phy_fw - Get PHY type based on firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
+{
+       if (hw->bus.lan_id)
+               hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+       else
+               hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+
+       hw->phy.type = ixgbe_phy_fw;
+       hw->phy.ops.read_reg = NULL;
+       hw->phy.ops.write_reg = NULL;
+       return ixgbe_get_phy_id_fw(hw);
+}
+
+/**
+ * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
+{
+       u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+
+       setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
+       return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
+}
+
+/**
+ * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+{
+       u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+       s32 rc;
+       u16 i;
+
+       if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+               return 0;
+
+       if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+               hw_err(hw, "rx_pause not valid in strict IEEE mode\n");
+               return IXGBE_ERR_INVALID_LINK_SETTINGS;
+       }
+
+       switch (hw->fc.requested_mode) {
+       case ixgbe_fc_full:
+               setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
+                           FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+               break;
+       case ixgbe_fc_rx_pause:
+               setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
+                           FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+               break;
+       case ixgbe_fc_tx_pause:
+               setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
+                           FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+               break;
+       default:
+               break;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(ixgbe_fw_map); ++i) {
+               if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
+                       setup[0] |= ixgbe_fw_map[i].fw_speed;
+       }
+       setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
+
+       if (hw->phy.eee_speeds_advertised)
+               setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
+
+       rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
+       if (rc)
+               return rc;
+       if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
+               return IXGBE_ERR_OVERTEMP;
+       return 0;
+}
+
+/**
+ * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ */
+static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
+{
+       if (hw->fc.requested_mode == ixgbe_fc_default)
+               hw->fc.requested_mode = ixgbe_fc_full;
+
+       return ixgbe_setup_fw_link(hw);
+}
+
 /** ixgbe_init_eeprom_params_X550 - Initialize EEPROM params
  *  @hw: pointer to hardware structure
  *
@@ -624,41 +834,6 @@ static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
        return status;
 }
 
-/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
- *  command assuming that the semaphore is already obtained.
- *  @hw: pointer to hardware structure
- *  @offset: offset of  word in the EEPROM to read
- *  @data: word read from the EEPROM
- *
- *  Reads a 16 bit word from the EEPROM using the hostif.
- **/
-static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
-                                         u16 *data)
-{
-       s32 status;
-       struct ixgbe_hic_read_shadow_ram buffer;
-
-       buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
-       buffer.hdr.req.buf_lenh = 0;
-       buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
-       buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
-
-       /* convert offset from words to bytes */
-       buffer.address = cpu_to_be32(offset * 2);
-       /* one word */
-       buffer.length = cpu_to_be16(sizeof(u16));
-
-       status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
-                                             IXGBE_HI_COMMAND_TIMEOUT, false);
-       if (status)
-               return status;
-
-       *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
-                                         FW_NVM_DATA_OFFSET);
-
-       return 0;
-}
-
 /** ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif
  *  @hw: pointer to hardware structure
  *  @offset: offset of  word in the EEPROM to read
@@ -670,6 +845,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
 static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
                                            u16 offset, u16 words, u16 *data)
 {
+       const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
        struct ixgbe_hic_read_shadow_ram buffer;
        u32 current_word = 0;
        u16 words_to_read;
@@ -677,7 +853,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
        u32 i;
 
        /* Take semaphore for the entire operation. */
-       status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+       status = hw->mac.ops.acquire_swfw_sync(hw, mask);
        if (status) {
                hw_dbg(hw, "EEPROM read buffer - semaphore failed\n");
                return status;
@@ -698,10 +874,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
                buffer.address = cpu_to_be32((offset + current_word) * 2);
                buffer.length = cpu_to_be16(words_to_read * 2);
 
-               status = ixgbe_host_interface_command(hw, &buffer,
-                                                     sizeof(buffer),
-                                                     IXGBE_HI_COMMAND_TIMEOUT,
-                                                     false);
+               status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
+                                           IXGBE_HI_COMMAND_TIMEOUT);
                if (status) {
                        hw_dbg(hw, "Host interface command failed\n");
                        goto out;
@@ -725,7 +899,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
        }
 
 out:
-       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+       hw->mac.ops.release_swfw_sync(hw, mask);
        return status;
 }
 
@@ -896,15 +1070,32 @@ static s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw)
  **/
 static s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data)
 {
-       s32 status = 0;
+       const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM;
+       struct ixgbe_hic_read_shadow_ram buffer;
+       s32 status;
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
-               status = ixgbe_read_ee_hostif_data_X550(hw, offset, data);
-               hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-       } else {
-               status = IXGBE_ERR_SWFW_SYNC;
+       buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
+       buffer.hdr.req.buf_lenh = 0;
+       buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
+       buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
+
+       /* convert offset from words to bytes */
+       buffer.address = cpu_to_be32(offset * 2);
+       /* one word */
+       buffer.length = cpu_to_be16(sizeof(u16));
+
+       status = hw->mac.ops.acquire_swfw_sync(hw, mask);
+       if (status)
+               return status;
+
+       status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
+                                   IXGBE_HI_COMMAND_TIMEOUT);
+       if (!status) {
+               *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+                                                 FW_NVM_DATA_OFFSET);
        }
 
+       hw->mac.ops.release_swfw_sync(hw, mask);
        return status;
 }
 
@@ -1768,6 +1959,125 @@ ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
        return rc;
 }
 
+/**
+ * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+                               bool autoneg_wait)
+{
+       struct ixgbe_mac_info *mac = &hw->mac;
+       u32 lval, sval, flx_val;
+       s32 rc;
+
+       rc = mac->ops.read_iosf_sb_reg(hw,
+                                      IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+                                      IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
+       if (rc)
+               return rc;
+
+       lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+       lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+       lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
+       lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
+       lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+       rc = mac->ops.write_iosf_sb_reg(hw,
+                                       IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+                                       IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+       if (rc)
+               return rc;
+
+       rc = mac->ops.read_iosf_sb_reg(hw,
+                                      IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+                                      IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
+       if (rc)
+               return rc;
+
+       sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
+       sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
+       rc = mac->ops.write_iosf_sb_reg(hw,
+                                       IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+                                       IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
+       if (rc)
+               return rc;
+
+       rc = mac->ops.write_iosf_sb_reg(hw,
+                                       IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+                                       IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+       if (rc)
+               return rc;
+
+       rc = mac->ops.read_iosf_sb_reg(hw,
+                                   IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+                                   IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
+       if (rc)
+               return rc;
+
+       flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+       flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
+       flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+       flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+       flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+
+       rc = mac->ops.write_iosf_sb_reg(hw,
+                                   IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+                                   IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
+       if (rc)
+               return rc;
+
+       ixgbe_restart_an_internal_phy_x550em(hw);
+
+       return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ */
+static void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+       u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+       ixgbe_link_speed speed;
+       bool link_up;
+
+       /* AN should have completed when the cable was plugged in.
+        * Look for reasons to bail out.  Bail out if:
+        * - FC autoneg is disabled, or if
+        * - link is not up.
+        */
+       if (hw->fc.disable_fc_autoneg)
+               goto out;
+
+       hw->mac.ops.check_link(hw, &speed, &link_up, false);
+       if (!link_up)
+               goto out;
+
+       /* Check if auto-negotiation has completed */
+       status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
+       if (status || !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
+               status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+               goto out;
+       }
+
+       /* Negotiate the flow control */
+       status = ixgbe_negotiate_fc(hw, info[0], info[0],
+                                   FW_PHY_ACT_GET_LINK_INFO_FC_RX,
+                                   FW_PHY_ACT_GET_LINK_INFO_FC_TX,
+                                   FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
+                                   FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
+
+out:
+       if (!status) {
+               hw->fc.fc_was_autonegged = true;
+       } else {
+               hw->fc.fc_was_autonegged = false;
+               hw->fc.current_mode = hw->fc.requested_mode;
+       }
+}
+
 /** ixgbe_init_mac_link_ops_X550em_a - Init mac link function pointers
  *  @hw: pointer to hardware structure
  **/
@@ -1780,6 +2090,17 @@ static void ixgbe_init_mac_link_ops_X550em_a(struct ixgbe_hw *hw)
                mac->ops.setup_fc = NULL;
                mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
                break;
+       case ixgbe_media_type_copper:
+               if (hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T &&
+                   hw->device_id != IXGBE_DEV_ID_X550EM_A_1G_T_L) {
+                       mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+                       break;
+               }
+               mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
+               mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
+               mac->ops.setup_link = ixgbe_setup_sgmii_fw;
+               mac->ops.check_link = ixgbe_check_mac_link_generic;
+               break;
        case ixgbe_media_type_backplane:
                mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
                mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
@@ -1827,7 +2148,7 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
                mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
                mac->ops.setup_fc = ixgbe_setup_fc_generic;
                mac->ops.check_link = ixgbe_check_link_t_X550em;
-               return;
+               break;
        case ixgbe_media_type_backplane:
                if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
                    hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
@@ -1870,6 +2191,12 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
                                              ixgbe_link_speed *speed,
                                              bool *autoneg)
 {
+       if (hw->phy.type == ixgbe_phy_fw) {
+               *autoneg = true;
+               *speed = hw->phy.speeds_supported;
+               return 0;
+       }
+
        /* SFP */
        if (hw->phy.media_type == ixgbe_media_type_fiber) {
                /* CS4227 SFP must not enable auto-negotiation */
@@ -2108,8 +2435,6 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
                return status;
 
        reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
-       reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ |
-                    IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC);
        reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR |
                     IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX);
 
@@ -2189,12 +2514,11 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
 /**
  * ixgbe_setup_kr_x550em - Configure the KR PHY
  * @hw: pointer to hardware structure
- *
- * Configures the integrated KR PHY for X550EM_x.
  **/
 static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
 {
-       if (hw->mac.type != ixgbe_mac_X550EM_x)
+       /* leave link alone for 2.5G */
+       if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
                return 0;
 
        return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
@@ -2356,6 +2680,62 @@ static s32 ixgbe_led_off_t_x550em(struct ixgbe_hw *hw, u32 led_idx)
        return 0;
 }
 
+/**
+ *  ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
+ *  @hw: pointer to the HW structure
+ *  @maj: driver version major number
+ *  @min: driver version minor number
+ *  @build: driver version build number
+ *  @sub: driver version sub build number
+ *  @len: length of driver_ver string
+ *  @driver_ver: driver string
+ *
+ *  Sends driver version number to firmware through the manageability
+ *  block.  On success return 0
+ *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+static s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+                                    u8 build, u8 sub, u16 len,
+                                    const char *driver_ver)
+{
+       struct ixgbe_hic_drv_info2 fw_cmd;
+       s32 ret_val;
+       int i;
+
+       if (!len || !driver_ver || (len > sizeof(fw_cmd.driver_string)))
+               return IXGBE_ERR_INVALID_ARGUMENT;
+
+       fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+       fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
+       fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+       fw_cmd.port_num = (u8)hw->bus.func;
+       fw_cmd.ver_maj = maj;
+       fw_cmd.ver_min = min;
+       fw_cmd.ver_build = build;
+       fw_cmd.ver_sub = sub;
+       fw_cmd.hdr.checksum = 0;
+       memcpy(fw_cmd.driver_string, driver_ver, len);
+       fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+                             (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+
+       for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+               ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+                                                      sizeof(fw_cmd),
+                                                      IXGBE_HI_COMMAND_TIMEOUT,
+                                                      true);
+               if (ret_val)
+                       continue;
+
+               if (fw_cmd.hdr.cmd_or_resp.ret_status !=
+                   FW_CEM_RESP_STATUS_SUCCESS)
+                       return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+               return 0;
+       }
+
+       return ret_val;
+}
+
 /** ixgbe_get_lcd_x550em - Determine lowest common denominator
  *  @hw: pointer to hardware structure
  *  @lcd_speed: pointer to lowest common link speed
@@ -2654,6 +3034,50 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
        return status;
 }
 
+/**
+ * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
+{
+       u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
+       s32 rc;
+
+       if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+               return 0;
+
+       rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
+       if (rc)
+               return rc;
+       memset(store, 0, sizeof(store));
+
+       rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
+       if (rc)
+               return rc;
+
+       return ixgbe_setup_fw_link(hw);
+}
+
+/**
+ * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
+{
+       u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
+       s32 rc;
+
+       rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
+       if (rc)
+               return rc;
+
+       if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
+               ixgbe_shutdown_fw_phy(hw);
+               return IXGBE_ERR_OVERTEMP;
+       }
+       return 0;
+}
+
 /**
  * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
  * @hw: pointer to hardware structure
@@ -2740,6 +3164,10 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
                phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
                phy->ops.reset = ixgbe_reset_phy_t_X550em;
                break;
+       case ixgbe_phy_fw:
+               phy->ops.setup_link = ixgbe_setup_fw_link;
+               phy->ops.reset = ixgbe_reset_phy_fw;
+               break;
        default:
                break;
        }
@@ -2777,6 +3205,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
        case IXGBE_DEV_ID_X550EM_X_1G_T:
        case IXGBE_DEV_ID_X550EM_X_10G_T:
        case IXGBE_DEV_ID_X550EM_A_10G_T:
+       case IXGBE_DEV_ID_X550EM_A_1G_T:
+       case IXGBE_DEV_ID_X550EM_A_1G_T_L:
                media_type = ixgbe_media_type_copper;
                break;
        default:
@@ -2844,6 +3274,13 @@ static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
                hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
                IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
                break;
+       case IXGBE_DEV_ID_X550EM_A_1G_T:
+       case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+               /* Select fast MDIO clock speed for these devices */
+               hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+               hlreg0 |= IXGBE_HLREG0_MDCSPD;
+               IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+               break;
        default:
                break;
        }
@@ -3275,7 +3712,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
        .clear_vfta                     = &ixgbe_clear_vfta_generic, \
        .set_vfta                       = &ixgbe_set_vfta_generic, \
        .fc_enable                      = &ixgbe_fc_enable_generic, \
-       .set_fw_drv_ver                 = &ixgbe_set_fw_drv_ver_generic, \
+       .set_fw_drv_ver                 = &ixgbe_set_fw_drv_ver_x550, \
        .init_uta_tables                = &ixgbe_init_uta_tables_generic, \
        .set_mac_anti_spoofing          = &ixgbe_set_mac_anti_spoofing, \
        .set_vlan_anti_spoofing         = &ixgbe_set_vlan_anti_spoofing, \
@@ -3355,6 +3792,27 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = {
        .write_iosf_sb_reg      = ixgbe_write_iosf_sb_reg_x550a,
 };
 
+static struct ixgbe_mac_operations mac_ops_x550em_a_fw = {
+       X550_COMMON_MAC
+       .led_on                 = ixgbe_led_on_generic,
+       .led_off                = ixgbe_led_off_generic,
+       .init_led_link_act      = ixgbe_init_led_link_act_generic,
+       .reset_hw               = ixgbe_reset_hw_X550em,
+       .get_media_type         = ixgbe_get_media_type_X550em,
+       .get_san_mac_addr       = NULL,
+       .get_wwn_prefix         = NULL,
+       .setup_link             = NULL, /* defined later */
+       .get_link_capabilities  = ixgbe_get_link_capabilities_X550em,
+       .get_bus_info           = ixgbe_get_bus_info_X550em,
+       .setup_sfp              = ixgbe_setup_sfp_modules_X550em,
+       .acquire_swfw_sync      = ixgbe_acquire_swfw_sync_x550em_a,
+       .release_swfw_sync      = ixgbe_release_swfw_sync_x550em_a,
+       .setup_fc               = ixgbe_setup_fc_x550em,
+       .fc_autoneg             = ixgbe_fc_autoneg,
+       .read_iosf_sb_reg       = ixgbe_read_iosf_sb_reg_x550a,
+       .write_iosf_sb_reg      = ixgbe_write_iosf_sb_reg_x550a,
+};
+
 #define X550_COMMON_EEP \
        .read                   = &ixgbe_read_ee_hostif_X550, \
        .read_buffer            = &ixgbe_read_ee_hostif_buffer_X550, \
@@ -3384,12 +3842,11 @@ static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
        .read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_generic, \
        .write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic, \
        .setup_link             = &ixgbe_setup_phy_link_generic, \
-       .set_phy_power          = NULL, \
-       .check_overtemp         = &ixgbe_tn_check_overtemp, \
-       .get_firmware_version   = &ixgbe_get_phy_firmware_version_generic,
+       .set_phy_power          = NULL,
 
 static const struct ixgbe_phy_operations phy_ops_X550 = {
        X550_COMMON_PHY
+       .check_overtemp         = &ixgbe_tn_check_overtemp,
        .init                   = NULL,
        .identify               = &ixgbe_identify_phy_generic,
        .read_reg               = &ixgbe_read_phy_reg_generic,
@@ -3398,6 +3855,7 @@ static const struct ixgbe_phy_operations phy_ops_X550 = {
 
 static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
        X550_COMMON_PHY
+       .check_overtemp         = &ixgbe_tn_check_overtemp,
        .init                   = &ixgbe_init_phy_ops_X550em,
        .identify               = &ixgbe_identify_phy_x550em,
        .read_reg               = &ixgbe_read_phy_reg_generic,
@@ -3406,6 +3864,7 @@ static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
 
 static const struct ixgbe_phy_operations phy_ops_x550em_a = {
        X550_COMMON_PHY
+       .check_overtemp         = &ixgbe_tn_check_overtemp,
        .init                   = &ixgbe_init_phy_ops_X550em,
        .identify               = &ixgbe_identify_phy_x550em,
        .read_reg               = &ixgbe_read_phy_reg_x550a,
@@ -3414,6 +3873,17 @@ static const struct ixgbe_phy_operations phy_ops_x550em_a = {
        .write_reg_mdi          = &ixgbe_write_phy_reg_mdi,
 };
 
+static const struct ixgbe_phy_operations phy_ops_x550em_a_fw = {
+       X550_COMMON_PHY
+       .check_overtemp         = ixgbe_check_overtemp_fw,
+       .init                   = ixgbe_init_phy_ops_X550em,
+       .identify               = ixgbe_identify_phy_fw,
+       .read_reg               = NULL,
+       .write_reg              = NULL,
+       .read_reg_mdi           = NULL,
+       .write_reg_mdi          = NULL,
+};
+
 static const struct ixgbe_link_operations link_ops_x550em_x = {
        .read_link              = &ixgbe_read_i2c_combined_generic,
        .read_link_unlocked     = &ixgbe_read_i2c_combined_generic_unlocked,
@@ -3463,3 +3933,13 @@ const struct ixgbe_info ixgbe_x550em_a_info = {
        .mbx_ops                = &mbx_ops_generic,
        .mvals                  = ixgbe_mvals_x550em_a,
 };
+
+const struct ixgbe_info ixgbe_x550em_a_fw_info = {
+       .mac                    = ixgbe_mac_x550em_a,
+       .get_invariants         = ixgbe_get_invariants_X550_a_fw,
+       .mac_ops                = &mac_ops_x550em_a_fw,
+       .eeprom_ops             = &eeprom_ops_X550EM_x,
+       .phy_ops                = &phy_ops_x550em_a_fw,
+       .mbx_ops                = &mbx_ops_generic,
+       .mvals                  = ixgbe_mvals_x550em_a,
+};
index 508e72c5f1c21b44d9176020993542dd6dfb4b6b..1f6c0ecd50bbbcf8e20fa9f883712003b055648f 100644 (file)
@@ -432,11 +432,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
                if (!ring) {
                        data[i++] = 0;
                        data[i++] = 0;
-#ifdef BP_EXTENDED_STATS
-                       data[i++] = 0;
-                       data[i++] = 0;
-                       data[i++] = 0;
-#endif
                        continue;
                }
 
@@ -446,12 +441,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
                        data[i + 1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
                i += 2;
-#ifdef BP_EXTENDED_STATS
-               data[i] = ring->stats.yields;
-               data[i + 1] = ring->stats.misses;
-               data[i + 2] = ring->stats.cleaned;
-               i += 3;
-#endif
        }
 
        /* populate Rx queue data */
@@ -460,11 +449,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
                if (!ring) {
                        data[i++] = 0;
                        data[i++] = 0;
-#ifdef BP_EXTENDED_STATS
-                       data[i++] = 0;
-                       data[i++] = 0;
-                       data[i++] = 0;
-#endif
                        continue;
                }
 
@@ -474,12 +458,6 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
                        data[i + 1] = ring->stats.bytes;
                } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
                i += 2;
-#ifdef BP_EXTENDED_STATS
-               data[i] = ring->stats.yields;
-               data[i + 1] = ring->stats.misses;
-               data[i + 2] = ring->stats.cleaned;
-               i += 3;
-#endif
        }
 }
 
@@ -507,28 +485,12 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "tx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
-#ifdef BP_EXTENDED_STATS
-                       sprintf(p, "tx_queue_%u_bp_napi_yield", i);
-                       p += ETH_GSTRING_LEN;
-                       sprintf(p, "tx_queue_%u_bp_misses", i);
-                       p += ETH_GSTRING_LEN;
-                       sprintf(p, "tx_queue_%u_bp_cleaned", i);
-                       p += ETH_GSTRING_LEN;
-#endif /* BP_EXTENDED_STATS */
                }
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        sprintf(p, "rx_queue_%u_packets", i);
                        p += ETH_GSTRING_LEN;
                        sprintf(p, "rx_queue_%u_bytes", i);
                        p += ETH_GSTRING_LEN;
-#ifdef BP_EXTENDED_STATS
-                       sprintf(p, "rx_queue_%u_bp_poll_yield", i);
-                       p += ETH_GSTRING_LEN;
-                       sprintf(p, "rx_queue_%u_bp_misses", i);
-                       p += ETH_GSTRING_LEN;
-                       sprintf(p, "rx_queue_%u_bp_cleaned", i);
-                       p += ETH_GSTRING_LEN;
-#endif /* BP_EXTENDED_STATS */
                }
                break;
        }
index 5639fbe294d0bbb1ae4fd46828d1c790353edf5d..a8cbc2dda0dd65456c082247693a009b20504d9b 100644 (file)
 
 #include "vf.h"
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-#include <net/busy_poll.h>
-#define BP_EXTENDED_STATS
-#endif
-
 #define IXGBE_MAX_TXD_PWR      14
 #define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR)
 
@@ -73,11 +68,6 @@ struct ixgbevf_rx_buffer {
 struct ixgbevf_stats {
        u64 packets;
        u64 bytes;
-#ifdef BP_EXTENDED_STATS
-       u64 yields;
-       u64 misses;
-       u64 cleaned;
-#endif
 };
 
 struct ixgbevf_tx_queue_stats {
@@ -217,109 +207,6 @@ struct ixgbevf_q_vector {
 #endif /* CONFIG_NET_RX_BUSY_POLL */
 };
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
-{
-       spin_lock_init(&q_vector->lock);
-       q_vector->state = IXGBEVF_QV_STATE_IDLE;
-}
-
-/* called from the device poll routine to get ownership of a q_vector */
-static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
-{
-       int rc = true;
-
-       spin_lock_bh(&q_vector->lock);
-       if (q_vector->state & IXGBEVF_QV_LOCKED) {
-               WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
-               q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
-               rc = false;
-#ifdef BP_EXTENDED_STATS
-               q_vector->tx.ring->stats.yields++;
-#endif
-       } else {
-               /* we don't care if someone yielded */
-               q_vector->state = IXGBEVF_QV_STATE_NAPI;
-       }
-       spin_unlock_bh(&q_vector->lock);
-       return rc;
-}
-
-/* returns true is someone tried to get the qv while napi had it */
-static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
-{
-       int rc = false;
-
-       spin_lock_bh(&q_vector->lock);
-       WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
-                                  IXGBEVF_QV_STATE_NAPI_YIELD));
-
-       if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
-               rc = true;
-       /* reset state to idle, unless QV is disabled */
-       q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
-       spin_unlock_bh(&q_vector->lock);
-       return rc;
-}
-
-/* called from ixgbevf_low_latency_poll() */
-static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
-{
-       int rc = true;
-
-       spin_lock_bh(&q_vector->lock);
-       if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
-               q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
-               rc = false;
-#ifdef BP_EXTENDED_STATS
-               q_vector->rx.ring->stats.yields++;
-#endif
-       } else {
-               /* preserve yield marks */
-               q_vector->state |= IXGBEVF_QV_STATE_POLL;
-       }
-       spin_unlock_bh(&q_vector->lock);
-       return rc;
-}
-
-/* returns true if someone tried to get the qv while it was locked */
-static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
-{
-       int rc = false;
-
-       spin_lock_bh(&q_vector->lock);
-       WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
-
-       if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
-               rc = true;
-       /* reset state to idle, unless QV is disabled */
-       q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
-       spin_unlock_bh(&q_vector->lock);
-       return rc;
-}
-
-/* true if a socket is polling, even if it did not get the lock */
-static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
-{
-       WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED));
-       return q_vector->state & IXGBEVF_QV_USER_PEND;
-}
-
-/* false if QV is currently owned */
-static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
-{
-       int rc = true;
-
-       spin_lock_bh(&q_vector->lock);
-       if (q_vector->state & IXGBEVF_QV_OWNED)
-               rc = false;
-       q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
-       spin_unlock_bh(&q_vector->lock);
-       return rc;
-}
-
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 /* microsecond values for various ITR rates shifted by 2 to fit itr register
  * with the first 3 bits reserved 0
  */
@@ -464,6 +351,7 @@ enum ixgbevf_xcast_modes {
        IXGBEVF_XCAST_MODE_NONE = 0,
        IXGBEVF_XCAST_MODE_MULTI,
        IXGBEVF_XCAST_MODE_ALLMULTI,
+       IXGBEVF_XCAST_MODE_PROMISC,
 };
 
 extern const struct ixgbevf_info ixgbevf_82599_vf_info;
index 6d4bef5803f2931fd02b9197c4e9231e957ae316..80bab261a0ec778f09ee8437f5e9618c4c8a8372 100644 (file)
@@ -457,16 +457,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
 static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
                           struct sk_buff *skb)
 {
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       skb_mark_napi_id(skb, &q_vector->napi);
-
-       if (ixgbevf_qv_busy_polling(q_vector)) {
-               netif_receive_skb(skb);
-               /* exit early if we busy polled */
-               return;
-       }
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
        napi_gro_receive(&q_vector->napi, skb);
 }
 
@@ -1031,10 +1021,6 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
 
        if (budget <= 0)
                return budget;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       if (!ixgbevf_qv_lock_napi(q_vector))
-               return budget;
-#endif
 
        /* attempt to distribute budget to each queue fairly, but don't allow
         * the budget to go below 1 because we'll exit polling
@@ -1052,10 +1038,6 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
                        clean_complete = false;
        }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       ixgbevf_qv_unlock_napi(q_vector);
-#endif
-
        /* If all work not completed, return budget and keep polling */
        if (!clean_complete)
                return budget;
@@ -1090,40 +1072,6 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
        IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
 }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-/* must be called with local_bh_disable()d */
-static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
-{
-       struct ixgbevf_q_vector *q_vector =
-                       container_of(napi, struct ixgbevf_q_vector, napi);
-       struct ixgbevf_adapter *adapter = q_vector->adapter;
-       struct ixgbevf_ring  *ring;
-       int found = 0;
-
-       if (test_bit(__IXGBEVF_DOWN, &adapter->state))
-               return LL_FLUSH_FAILED;
-
-       if (!ixgbevf_qv_lock_poll(q_vector))
-               return LL_FLUSH_BUSY;
-
-       ixgbevf_for_each_ring(ring, q_vector->rx) {
-               found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
-#ifdef BP_EXTENDED_STATS
-               if (found)
-                       ring->stats.cleaned += found;
-               else
-                       ring->stats.misses++;
-#endif
-               if (found)
-                       break;
-       }
-
-       ixgbevf_qv_unlock_poll(q_vector);
-
-       return found;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 /**
  * ixgbevf_configure_msix - Configure MSI-X hardware
  * @adapter: board private structure
@@ -1930,6 +1878,16 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
                     (flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
                     IXGBEVF_XCAST_MODE_MULTI : IXGBEVF_XCAST_MODE_NONE;
 
+       /* request the most inclusive mode we need */
+       if (flags & IFF_PROMISC)
+               xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
+       else if (flags & IFF_ALLMULTI)
+               xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
+       else if (flags & (IFF_BROADCAST | IFF_MULTICAST))
+               xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
+       else
+               xcast_mode = IXGBEVF_XCAST_MODE_NONE;
+
        spin_lock_bh(&adapter->mbx_lock);
 
        hw->mac.ops.update_xcast_mode(hw, xcast_mode);
@@ -1950,9 +1908,6 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
 
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
                q_vector = adapter->q_vector[q_idx];
-#ifdef CONFIG_NET_RX_BUSY_POLL
-               ixgbevf_qv_init_lock(adapter->q_vector[q_idx]);
-#endif
                napi_enable(&q_vector->napi);
        }
 }
@@ -1966,12 +1921,6 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
                q_vector = adapter->q_vector[q_idx];
                napi_disable(&q_vector->napi);
-#ifdef CONFIG_NET_RX_BUSY_POLL
-               while (!ixgbevf_qv_disable(adapter->q_vector[q_idx])) {
-                       pr_info("QV %d locked\n", q_idx);
-                       usleep_range(1000, 20000);
-               }
-#endif /* CONFIG_NET_RX_BUSY_POLL */
        }
 }
 
@@ -2071,7 +2020,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
 static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       int api[] = { ixgbe_mbox_api_12,
+       int api[] = { ixgbe_mbox_api_13,
+                     ixgbe_mbox_api_12,
                      ixgbe_mbox_api_11,
                      ixgbe_mbox_api_10,
                      ixgbe_mbox_api_unknown };
@@ -2373,6 +2323,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
                switch (hw->api_version) {
                case ixgbe_mbox_api_11:
                case ixgbe_mbox_api_12:
+               case ixgbe_mbox_api_13:
                        adapter->num_rx_queues = rss;
                        adapter->num_tx_queues = rss;
                default:
@@ -3227,6 +3178,21 @@ err_setup_reset:
        return err;
 }
 
+/**
+ * ixgbevf_close_suspend - actions necessary to both suspend and close flows
+ * @adapter: the private adapter struct
+ *
+ * This function should contain the necessary work common to both suspending
+ * and closing of the device.
+ */
+static void ixgbevf_close_suspend(struct ixgbevf_adapter *adapter)
+{
+       ixgbevf_down(adapter);
+       ixgbevf_free_irq(adapter);
+       ixgbevf_free_all_tx_resources(adapter);
+       ixgbevf_free_all_rx_resources(adapter);
+}
+
 /**
  * ixgbevf_close - Disables a network interface
  * @netdev: network interface device structure
@@ -3242,11 +3208,8 @@ int ixgbevf_close(struct net_device *netdev)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
-       ixgbevf_down(adapter);
-       ixgbevf_free_irq(adapter);
-
-       ixgbevf_free_all_tx_resources(adapter);
-       ixgbevf_free_all_rx_resources(adapter);
+       if (netif_device_present(netdev))
+               ixgbevf_close_suspend(adapter);
 
        return 0;
 }
@@ -3268,6 +3231,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
         * match packet buffer alignment. Unfortunately, the
         * hardware is not flexible enough to do this dynamically.
         */
+       rtnl_lock();
+
        if (netif_running(dev))
                ixgbevf_close(dev);
 
@@ -3276,6 +3241,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
 
        if (netif_running(dev))
                ixgbevf_open(dev);
+
+       rtnl_unlock();
 }
 
 static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
@@ -3796,17 +3763,14 @@ static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
        int retval = 0;
 #endif
 
+       rtnl_lock();
        netif_device_detach(netdev);
 
-       if (netif_running(netdev)) {
-               rtnl_lock();
-               ixgbevf_down(adapter);
-               ixgbevf_free_irq(adapter);
-               ixgbevf_free_all_tx_resources(adapter);
-               ixgbevf_free_all_rx_resources(adapter);
-               ixgbevf_clear_interrupt_scheme(adapter);
-               rtnl_unlock();
-       }
+       if (netif_running(netdev))
+               ixgbevf_close_suspend(adapter);
+
+       ixgbevf_clear_interrupt_scheme(adapter);
+       rtnl_unlock();
 
 #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
@@ -3838,6 +3802,8 @@ static int ixgbevf_resume(struct pci_dev *pdev)
                dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
                return err;
        }
+
+       adapter->hw.hw_addr = adapter->io_addr;
        smp_mb__before_atomic();
        clear_bit(__IXGBEVF_DISABLED, &adapter->state);
        pci_set_master(pdev);
@@ -3869,8 +3835,8 @@ static void ixgbevf_shutdown(struct pci_dev *pdev)
        ixgbevf_suspend(pdev, PMSG_SUSPEND);
 }
 
-static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
-                                               struct rtnl_link_stats64 *stats)
+static void ixgbevf_get_stats(struct net_device *netdev,
+                             struct rtnl_link_stats64 *stats)
 {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        unsigned int start;
@@ -3903,8 +3869,6 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
                stats->tx_bytes += bytes;
                stats->tx_packets += packets;
        }
-
-       return stats;
 }
 
 #define IXGBEVF_MAX_MAC_HDR_LEN                127
@@ -3953,9 +3917,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
        .ndo_tx_timeout         = ixgbevf_tx_timeout,
        .ndo_vlan_rx_add_vid    = ixgbevf_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ixgbevf_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       .ndo_busy_poll          = ixgbevf_busy_poll_recv,
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ixgbevf_netpoll,
 #endif
@@ -4102,6 +4063,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        switch (adapter->hw.api_version) {
        case ixgbe_mbox_api_11:
        case ixgbe_mbox_api_12:
+       case ixgbe_mbox_api_13:
                netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE -
                                  (ETH_HLEN + ETH_FCS_LEN);
                break;
@@ -4244,7 +4206,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
        }
 
        if (netif_running(netdev))
-               ixgbevf_down(adapter);
+               ixgbevf_close_suspend(adapter);
 
        if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state))
                pci_disable_device(pdev);
@@ -4272,6 +4234,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
+       adapter->hw.hw_addr = adapter->io_addr;
        smp_mb__before_atomic();
        clear_bit(__IXGBEVF_DISABLED, &adapter->state);
        pci_set_master(pdev);
@@ -4292,12 +4255,13 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
 static void ixgbevf_io_resume(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
+       rtnl_lock();
        if (netif_running(netdev))
-               ixgbevf_up(adapter);
+               ixgbevf_open(netdev);
 
        netif_device_attach(netdev);
+       rtnl_unlock();
 }
 
 /* PCI Error Recovery (ERS) */
index 340cdd469455ef646f38b25bd0ecde62788976ce..bc0442acae787ff2c1c67d24be9cf8577afb4371 100644 (file)
@@ -84,6 +84,7 @@ enum ixgbe_pfvf_api_rev {
        ixgbe_mbox_api_20,      /* API version 2.0, solaris Phase1 VF driver */
        ixgbe_mbox_api_11,      /* API version 1.1, linux/freebsd VF driver */
        ixgbe_mbox_api_12,      /* API version 1.2, linux/freebsd VF driver */
+       ixgbe_mbox_api_13,      /* API version 1.3, linux/freebsd VF driver */
        /* This value should always be last */
        ixgbe_mbox_api_unknown, /* indicates that API version is not known */
 };
index d46ba1dabcb7beb3769cdcc65466d9cc06d9ed24..8a5db9d7219d14d9845215e6fa7e76036350d5db 100644 (file)
@@ -330,9 +330,14 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
         * Thus return an error if API doesn't support RETA querying or querying
         * is not supported for this device type.
         */
-       if (hw->api_version != ixgbe_mbox_api_12 ||
-           hw->mac.type >= ixgbe_mac_X550_vf)
+       switch (hw->api_version) {
+       case ixgbe_mbox_api_13:
+       case ixgbe_mbox_api_12:
+               if (hw->mac.type >= ixgbe_mac_X550_vf)
+                       break;
+       default:
                return -EOPNOTSUPP;
+       }
 
        msgbuf[0] = IXGBE_VF_GET_RETA;
 
@@ -391,9 +396,14 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
         * Thus return an error if API doesn't support RSS Random Key retrieval
         * or if the operation is not supported for this device type.
         */
-       if (hw->api_version != ixgbe_mbox_api_12 ||
-           hw->mac.type >= ixgbe_mac_X550_vf)
+       switch (hw->api_version) {
+       case ixgbe_mbox_api_13:
+       case ixgbe_mbox_api_12:
+               if (hw->mac.type >= ixgbe_mac_X550_vf)
+                       break;
+       default:
                return -EOPNOTSUPP;
+       }
 
        msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
        err = hw->mbx.ops.write_posted(hw, msgbuf, 1);
@@ -545,6 +555,11 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
 
        switch (hw->api_version) {
        case ixgbe_mbox_api_12:
+               /* promisc introduced in 1.3 version */
+               if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
+                       return -EOPNOTSUPP;
+               /* Fall threw */
+       case ixgbe_mbox_api_13:
                break;
        default:
                return -EOPNOTSUPP;
@@ -884,6 +899,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
        switch (hw->api_version) {
        case ixgbe_mbox_api_11:
        case ixgbe_mbox_api_12:
+       case ixgbe_mbox_api_13:
                break;
        default:
                return 0;
index f9fcab54783c45b0ff49b3d3edc5077cf18401fa..f580b49e6b678a20e6679ba3d945ee5cc99f9975 100644 (file)
@@ -1879,7 +1879,7 @@ jme_open(struct net_device *netdev)
 
        jme_phy_on(jme);
        if (test_bit(JME_FLAG_SSET, &jme->flags))
-               jme_set_settings(netdev, &jme->old_ecmd);
+               jme_set_link_ksettings(netdev, &jme->old_cmd);
        else
                jme_reset_phy_processor(jme);
        jme_phy_calibration(jme);
@@ -2374,7 +2374,7 @@ jme_tx_timeout(struct net_device *netdev)
        jme->phylink = 0;
        jme_reset_phy_processor(jme);
        if (test_bit(JME_FLAG_SSET, &jme->flags))
-               jme_set_settings(netdev, &jme->old_ecmd);
+               jme_set_link_ksettings(netdev, &jme->old_cmd);
 
        /*
         * Force to Reset the link again
@@ -2648,27 +2648,27 @@ jme_set_wol(struct net_device *netdev,
 }
 
 static int
-jme_get_settings(struct net_device *netdev,
-                    struct ethtool_cmd *ecmd)
+jme_get_link_ksettings(struct net_device *netdev,
+                      struct ethtool_link_ksettings *cmd)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
        int rc;
 
        spin_lock_bh(&jme->phy_lock);
-       rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
+       rc = mii_ethtool_get_link_ksettings(&jme->mii_if, cmd);
        spin_unlock_bh(&jme->phy_lock);
        return rc;
 }
 
 static int
-jme_set_settings(struct net_device *netdev,
-                    struct ethtool_cmd *ecmd)
+jme_set_link_ksettings(struct net_device *netdev,
+                      const struct ethtool_link_ksettings *cmd)
 {
        struct jme_adapter *jme = netdev_priv(netdev);
        int rc, fdc = 0;
 
-       if (ethtool_cmd_speed(ecmd) == SPEED_1000
-           && ecmd->autoneg != AUTONEG_ENABLE)
+       if (cmd->base.speed == SPEED_1000 &&
+           cmd->base.autoneg != AUTONEG_ENABLE)
                return -EINVAL;
 
        /*
@@ -2676,18 +2676,18 @@ jme_set_settings(struct net_device *netdev,
         * Hardware would not generate link change interrupt.
         */
        if (jme->mii_if.force_media &&
-       ecmd->autoneg != AUTONEG_ENABLE &&
-       (jme->mii_if.full_duplex != ecmd->duplex))
+           cmd->base.autoneg != AUTONEG_ENABLE &&
+           (jme->mii_if.full_duplex != cmd->base.duplex))
                fdc = 1;
 
        spin_lock_bh(&jme->phy_lock);
-       rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
+       rc = mii_ethtool_set_link_ksettings(&jme->mii_if, cmd);
        spin_unlock_bh(&jme->phy_lock);
 
        if (!rc) {
                if (fdc)
                        jme_reset_link(jme);
-               jme->old_ecmd = *ecmd;
+               jme->old_cmd = *cmd;
                set_bit(JME_FLAG_SSET, &jme->flags);
        }
 
@@ -2716,7 +2716,7 @@ jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
        if (!rc && (cmd == SIOCSMIIREG)) {
                if (duplex_chg)
                        jme_reset_link(jme);
-               jme_get_settings(netdev, &jme->old_ecmd);
+               jme_get_link_ksettings(netdev, &jme->old_cmd);
                set_bit(JME_FLAG_SSET, &jme->flags);
        }
 
@@ -2915,8 +2915,6 @@ static const struct ethtool_ops jme_ethtool_ops = {
        .set_pauseparam         = jme_set_pauseparam,
        .get_wol                = jme_get_wol,
        .set_wol                = jme_set_wol,
-       .get_settings           = jme_get_settings,
-       .set_settings           = jme_set_settings,
        .get_link               = jme_get_link,
        .get_msglevel           = jme_get_msglevel,
        .set_msglevel           = jme_set_msglevel,
@@ -2924,6 +2922,8 @@ static const struct ethtool_ops jme_ethtool_ops = {
        .get_eeprom_len         = jme_get_eeprom_len,
        .get_eeprom             = jme_get_eeprom,
        .set_eeprom             = jme_set_eeprom,
+       .get_link_ksettings     = jme_get_link_ksettings,
+       .set_link_ksettings     = jme_set_link_ksettings,
 };
 
 static int
@@ -3306,7 +3306,7 @@ jme_resume(struct device *dev)
        jme_clear_pm_disable_wol(jme);
        jme_phy_on(jme);
        if (test_bit(JME_FLAG_SSET, &jme->flags))
-               jme_set_settings(netdev, &jme->old_ecmd);
+               jme_set_link_ksettings(netdev, &jme->old_cmd);
        else
                jme_reset_phy_processor(jme);
        jme_phy_calibration(jme);
index 58cd67c0c8e42527504719cc7c2c39bf6ee34de0..89535c019f04547319d5eaeb452c342dc8790f52 100644 (file)
@@ -447,7 +447,7 @@ struct jme_adapter {
        u8                      chip_sub_rev;
        u8                      pcirev;
        u32                     msg_enable;
-       struct ethtool_cmd      old_ecmd;
+       struct ethtool_link_ksettings old_cmd;
        unsigned int            old_mtu;
        struct dynpcc_info      dpi;
        atomic_t                intr_sem;
@@ -1270,8 +1270,8 @@ static inline int new_phy_power_ctrl(u8 chip_main_rev)
 /*
  * Function prototypes
  */
-static int jme_set_settings(struct net_device *netdev,
-                               struct ethtool_cmd *ecmd);
+static int jme_set_link_ksettings(struct net_device *netdev,
+                                 const struct ethtool_link_ksettings *cmd);
 static void jme_set_unicastaddr(struct net_device *netdev);
 static void jme_set_multi(struct net_device *netdev);
 
index 8037426ec50fa4337cd4e0cc40dabb71082fb487..9fae98caf83a378da959eb0cd70211f3c5d93f7b 100644 (file)
@@ -464,7 +464,7 @@ static int korina_poll(struct napi_struct *napi, int budget)
 
        work_done = korina_rx(dev, budget);
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                writel(readl(&lp->rx_dma_regs->dmasm) &
                        ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
@@ -695,25 +695,27 @@ static void netdev_get_drvinfo(struct net_device *dev,
        strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
 }
 
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+                                    struct ethtool_link_ksettings *cmd)
 {
        struct korina_private *lp = netdev_priv(dev);
        int rc;
 
        spin_lock_irq(&lp->lock);
-       rc = mii_ethtool_gset(&lp->mii_if, cmd);
+       rc = mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
        spin_unlock_irq(&lp->lock);
 
        return rc;
 }
 
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+                                    const struct ethtool_link_ksettings *cmd)
 {
        struct korina_private *lp = netdev_priv(dev);
        int rc;
 
        spin_lock_irq(&lp->lock);
-       rc = mii_ethtool_sset(&lp->mii_if, cmd);
+       rc = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
        spin_unlock_irq(&lp->lock);
        korina_set_carrier(&lp->mii_if);
 
@@ -729,9 +731,9 @@ static u32 netdev_get_link(struct net_device *dev)
 
 static const struct ethtool_ops netdev_ethtool_ops = {
        .get_drvinfo            = netdev_get_drvinfo,
-       .get_settings           = netdev_get_settings,
-       .set_settings           = netdev_set_settings,
        .get_link               = netdev_get_link,
+       .get_link_ksettings     = netdev_get_link_ksettings,
+       .set_link_ksettings     = netdev_set_link_ksettings,
 };
 
 static int korina_alloc_ring(struct net_device *dev)
index faea52da8dae9ea39469116bca09b513bc7ec90a..afc81006944059837b5cbbdaaaa26d9dc87d82a0 100644 (file)
@@ -156,24 +156,21 @@ ltq_etop_poll_rx(struct napi_struct *napi, int budget)
 {
        struct ltq_etop_chan *ch = container_of(napi,
                                struct ltq_etop_chan, napi);
-       int rx = 0;
-       int complete = 0;
+       int work_done = 0;
 
-       while ((rx < budget) && !complete) {
+       while (work_done < budget) {
                struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
 
-               if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
-                       ltq_etop_hw_receive(ch);
-                       rx++;
-               } else {
-                       complete = 1;
-               }
+               if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
+                       break;
+               ltq_etop_hw_receive(ch);
+               work_done++;
        }
-       if (complete || !rx) {
-               napi_complete(&ch->napi);
+       if (work_done < budget) {
+               napi_complete_done(&ch->napi, work_done);
                ltq_dma_ack_irq(&ch->dma);
        }
-       return rx;
+       return work_done;
 }
 
 static int
index 1fa7c03edec2fd0aa91f93dd86d7705f4a273a4b..25642dee49d328f0ab963d03d06e507bdbfeddbc 100644 (file)
@@ -1504,9 +1504,7 @@ mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
        int err;
        u32 supported, advertising;
 
-       err = phy_read_status(dev->phydev);
-       if (err == 0)
-               err = phy_ethtool_ksettings_get(dev->phydev, cmd);
+       err = phy_ethtool_ksettings_get(dev->phydev, cmd);
 
        /*
         * The MAC does not support 1000baseT_Half.
@@ -2319,7 +2317,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
        if (work_done < budget) {
                if (mp->oom)
                        mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                wrlp(mp, INT_MASK, mp->int_mask);
        }
 
index e05e22705cf76a6e0a21dddd7d31238e8f4c736a..61dd4462411c03511d6121d75ca65dc36c7f688f 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
 #include <linux/phy.h>
+#include <linux/phy_fixed.h>
 #include <linux/platform_device.h>
 #include <linux/skbuff.h>
 #include <net/hwbm.h>
 #define      MVNETA_TXQ_SENT_THRESH_MASK(coal)   ((coal) << 16)
 #define MVNETA_TXQ_UPDATE_REG(q)                 (0x3c60 + ((q) << 2))
 #define      MVNETA_TXQ_DEC_SENT_SHIFT           16
+#define      MVNETA_TXQ_DEC_SENT_MASK            0xff
 #define MVNETA_TXQ_STATUS_REG(q)                 (0x3c40 + ((q) << 2))
 #define      MVNETA_TXQ_SENT_DESC_SHIFT          16
 #define      MVNETA_TXQ_SENT_DESC_MASK           0x3fff0000
@@ -525,6 +527,7 @@ struct mvneta_tx_queue {
         * descriptor ring
         */
        int count;
+       int pending;
        int tx_stop_threshold;
        int tx_wake_threshold;
 
@@ -652,7 +655,7 @@ static void mvneta_mib_counters_clear(struct mvneta_port *pp)
 }
 
 /* Get System Network Statistics */
-static struct rtnl_link_stats64 *
+static void
 mvneta_get_stats64(struct net_device *dev,
                   struct rtnl_link_stats64 *stats)
 {
@@ -686,8 +689,6 @@ mvneta_get_stats64(struct net_device *dev,
        stats->rx_dropped       = dev->stats.rx_dropped;
 
        stats->tx_dropped       = dev->stats.tx_dropped;
-
-       return stats;
 }
 
 /* Rx descriptors helper methods */
@@ -820,8 +821,9 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
        /* Only 255 descriptors can be added at once ; Assume caller
         * process TX desriptors in quanta less than 256
         */
-       val = pend_desc;
+       val = pend_desc + txq->pending;
        mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+       txq->pending = 0;
 }
 
 /* Get pointer to next TX descriptor to be processed (send) by HW */
@@ -1758,8 +1760,10 @@ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
 
 /* Free tx queue skbuffs */
 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
-                                struct mvneta_tx_queue *txq, int num)
+                                struct mvneta_tx_queue *txq, int num,
+                                struct netdev_queue *nq)
 {
+       unsigned int bytes_compl = 0, pkts_compl = 0;
        int i;
 
        for (i = 0; i < num; i++) {
@@ -1767,6 +1771,11 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
                        txq->txq_get_index;
                struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
 
+               if (skb) {
+                       bytes_compl += skb->len;
+                       pkts_compl++;
+               }
+
                mvneta_txq_inc_get(txq);
 
                if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
@@ -1777,6 +1786,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
                        continue;
                dev_kfree_skb_any(skb);
        }
+
+       netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
 }
 
 /* Handle end of transmission */
@@ -1790,7 +1801,7 @@ static void mvneta_txq_done(struct mvneta_port *pp,
        if (!tx_done)
                return;
 
-       mvneta_txq_bufs_free(pp, txq, tx_done);
+       mvneta_txq_bufs_free(pp, txq, tx_done, nq);
 
        txq->count -= tx_done;
 
@@ -2400,12 +2411,18 @@ out:
                struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
                struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
 
-               txq->count += frags;
-               mvneta_txq_pend_desc_add(pp, txq, frags);
+               netdev_tx_sent_queue(nq, len);
 
+               txq->count += frags;
                if (txq->count >= txq->tx_stop_threshold)
                        netif_tx_stop_queue(nq);
 
+               if (!skb->xmit_more || netif_xmit_stopped(nq) ||
+                   txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
+                       mvneta_txq_pend_desc_add(pp, txq, frags);
+               else
+                       txq->pending += frags;
+
                u64_stats_update_begin(&stats->syncp);
                stats->tx_packets++;
                stats->tx_bytes  += len;
@@ -2424,9 +2441,10 @@ static void mvneta_txq_done_force(struct mvneta_port *pp,
                                  struct mvneta_tx_queue *txq)
 
 {
+       struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
        int tx_done = txq->count;
 
-       mvneta_txq_bufs_free(pp, txq, tx_done);
+       mvneta_txq_bufs_free(pp, txq, tx_done, nq);
 
        /* reset txq */
        txq->count = 0;
@@ -2750,11 +2768,9 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
                        rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
        }
 
-       budget -= rx_done;
-
-       if (budget > 0) {
+       if (rx_done < budget) {
                cause_rx_tx = 0;
-               napi_complete(napi);
+               napi_complete_done(napi, rx_done);
 
                if (pp->neta_armada3700) {
                        unsigned long flags;
@@ -2952,6 +2968,8 @@ static int mvneta_txq_init(struct mvneta_port *pp,
 static void mvneta_txq_deinit(struct mvneta_port *pp,
                              struct mvneta_tx_queue *txq)
 {
+       struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
+
        kfree(txq->tx_skb);
 
        if (txq->tso_hdrs)
@@ -2963,6 +2981,8 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
                                  txq->size * MVNETA_DESC_ALIGNED_SIZE,
                                  txq->descs, txq->descs_phys);
 
+       netdev_tx_reset_queue(nq);
+
        txq->descs             = NULL;
        txq->last_desc         = 0;
        txq->next_desc_to_proc = 0;
@@ -3908,6 +3928,25 @@ static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
        return 0;
 }
 
+static void mvneta_ethtool_get_wol(struct net_device *dev,
+                                  struct ethtool_wolinfo *wol)
+{
+       wol->supported = 0;
+       wol->wolopts = 0;
+
+       if (dev->phydev)
+               phy_ethtool_get_wol(dev->phydev, wol);
+}
+
+static int mvneta_ethtool_set_wol(struct net_device *dev,
+                                 struct ethtool_wolinfo *wol)
+{
+       if (!dev->phydev)
+               return -EOPNOTSUPP;
+
+       return phy_ethtool_set_wol(dev->phydev, wol);
+}
+
 static const struct net_device_ops mvneta_netdev_ops = {
        .ndo_open            = mvneta_open,
        .ndo_stop            = mvneta_stop,
@@ -3920,7 +3959,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
        .ndo_do_ioctl        = mvneta_ioctl,
 };
 
-const struct ethtool_ops mvneta_eth_tool_ops = {
+static const struct ethtool_ops mvneta_eth_tool_ops = {
        .nway_reset     = phy_ethtool_nway_reset,
        .get_link       = ethtool_op_get_link,
        .set_coalesce   = mvneta_ethtool_set_coalesce,
@@ -3937,6 +3976,8 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
        .set_rxfh       = mvneta_ethtool_set_rxfh,
        .get_link_ksettings = phy_ethtool_get_link_ksettings,
        .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
+       .get_wol        = mvneta_ethtool_get_wol,
+       .set_wol        = mvneta_ethtool_set_wol,
 };
 
 /* Initialize hw */
index 4fe430ceb194519ba87c2360479e27c7ceb0ebda..c2fd7c36f9278842104b6c8a0dcee2147bae5835 100644 (file)
@@ -5405,7 +5405,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
 
        if (budget > 0) {
                cause_rx = 0;
-               napi_complete(napi);
+               napi_complete_done(napi, rx_done);
 
                mvpp2_interrupts_enable(port);
        }
@@ -5739,7 +5739,7 @@ error:
        return err;
 }
 
-static struct rtnl_link_stats64 *
+static void
 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct mvpp2_port *port = netdev_priv(dev);
@@ -5771,8 +5771,6 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
        stats->rx_errors        = dev->stats.rx_errors;
        stats->rx_dropped       = dev->stats.rx_dropped;
        stats->tx_dropped       = dev->stats.tx_dropped;
-
-       return stats;
 }
 
 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
index 3af2814ada235c124fd49f1c72f7d0a5c4afbe99..28cb36d9e50a24a798705ded2f8eb981c9f9ac63 100644 (file)
@@ -274,8 +274,6 @@ enum hash_table_entry {
        HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
 };
 
-static int pxa168_get_link_ksettings(struct net_device *dev,
-                                    struct ethtool_link_ksettings *cmd);
 static int pxa168_init_hw(struct pxa168_eth_private *pep);
 static int pxa168_init_phy(struct net_device *dev);
 static void eth_port_reset(struct net_device *dev);
@@ -987,10 +985,6 @@ static int pxa168_init_phy(struct net_device *dev)
        if (err)
                return err;
 
-       err = pxa168_get_link_ksettings(dev, &cmd);
-       if (err)
-               return err;
-
        cmd.base.phy_address = pep->phy_addr;
        cmd.base.speed = pep->phy_speed;
        cmd.base.duplex = pep->phy_duplex;
@@ -1261,7 +1255,7 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget)
        }
        work_done = rxq_process(dev, budget);
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                wrl(pep, INT_MASK, ALL_INTS);
        }
 
@@ -1370,18 +1364,6 @@ static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
        return -EOPNOTSUPP;
 }
 
-static int pxa168_get_link_ksettings(struct net_device *dev,
-                                    struct ethtool_link_ksettings *cmd)
-{
-       int err;
-
-       err = phy_read_status(dev->phydev);
-       if (err == 0)
-               err = phy_ethtool_ksettings_get(dev->phydev, cmd);
-
-       return err;
-}
-
 static void pxa168_get_drvinfo(struct net_device *dev,
                               struct ethtool_drvinfo *info)
 {
@@ -1396,7 +1378,7 @@ static const struct ethtool_ops pxa168_ethtool_ops = {
        .nway_reset     = phy_ethtool_nway_reset,
        .get_link       = ethtool_op_get_link,
        .get_ts_info    = ethtool_op_get_ts_info,
-       .get_link_ksettings = pxa168_get_link_ksettings,
+       .get_link_ksettings = phy_ethtool_get_link_ksettings,
        .set_link_ksettings = phy_ethtool_set_link_ksettings,
 };
 
index 9146a514fb33c7993ec1018347a44fc606a283e8..edb95271a4f2f784d740b3201364a5e45de15bcb 100644 (file)
@@ -300,65 +300,76 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
        return supported;
 }
 
-static int skge_get_settings(struct net_device *dev,
-                            struct ethtool_cmd *ecmd)
+static int skge_get_link_ksettings(struct net_device *dev,
+                                  struct ethtool_link_ksettings *cmd)
 {
        struct skge_port *skge = netdev_priv(dev);
        struct skge_hw *hw = skge->hw;
+       u32 supported, advertising;
 
-       ecmd->transceiver = XCVR_INTERNAL;
-       ecmd->supported = skge_supported_modes(hw);
+       supported = skge_supported_modes(hw);
 
        if (hw->copper) {
-               ecmd->port = PORT_TP;
-               ecmd->phy_address = hw->phy_addr;
+               cmd->base.port = PORT_TP;
+               cmd->base.phy_address = hw->phy_addr;
        } else
-               ecmd->port = PORT_FIBRE;
+               cmd->base.port = PORT_FIBRE;
+
+       advertising = skge->advertising;
+       cmd->base.autoneg = skge->autoneg;
+       cmd->base.speed = skge->speed;
+       cmd->base.duplex = skge->duplex;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
 
-       ecmd->advertising = skge->advertising;
-       ecmd->autoneg = skge->autoneg;
-       ethtool_cmd_speed_set(ecmd, skge->speed);
-       ecmd->duplex = skge->duplex;
        return 0;
 }
 
-static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int skge_set_link_ksettings(struct net_device *dev,
+                                  const struct ethtool_link_ksettings *cmd)
 {
        struct skge_port *skge = netdev_priv(dev);
        const struct skge_hw *hw = skge->hw;
        u32 supported = skge_supported_modes(hw);
        int err = 0;
+       u32 advertising;
+
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
 
-       if (ecmd->autoneg == AUTONEG_ENABLE) {
-               ecmd->advertising = supported;
+       if (cmd->base.autoneg == AUTONEG_ENABLE) {
+               advertising = supported;
                skge->duplex = -1;
                skge->speed = -1;
        } else {
                u32 setting;
-               u32 speed = ethtool_cmd_speed(ecmd);
+               u32 speed = cmd->base.speed;
 
                switch (speed) {
                case SPEED_1000:
-                       if (ecmd->duplex == DUPLEX_FULL)
+                       if (cmd->base.duplex == DUPLEX_FULL)
                                setting = SUPPORTED_1000baseT_Full;
-                       else if (ecmd->duplex == DUPLEX_HALF)
+                       else if (cmd->base.duplex == DUPLEX_HALF)
                                setting = SUPPORTED_1000baseT_Half;
                        else
                                return -EINVAL;
                        break;
                case SPEED_100:
-                       if (ecmd->duplex == DUPLEX_FULL)
+                       if (cmd->base.duplex == DUPLEX_FULL)
                                setting = SUPPORTED_100baseT_Full;
-                       else if (ecmd->duplex == DUPLEX_HALF)
+                       else if (cmd->base.duplex == DUPLEX_HALF)
                                setting = SUPPORTED_100baseT_Half;
                        else
                                return -EINVAL;
                        break;
 
                case SPEED_10:
-                       if (ecmd->duplex == DUPLEX_FULL)
+                       if (cmd->base.duplex == DUPLEX_FULL)
                                setting = SUPPORTED_10baseT_Full;
-                       else if (ecmd->duplex == DUPLEX_HALF)
+                       else if (cmd->base.duplex == DUPLEX_HALF)
                                setting = SUPPORTED_10baseT_Half;
                        else
                                return -EINVAL;
@@ -371,11 +382,11 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                        return -EINVAL;
 
                skge->speed = speed;
-               skge->duplex = ecmd->duplex;
+               skge->duplex = cmd->base.duplex;
        }
 
-       skge->autoneg = ecmd->autoneg;
-       skge->advertising = ecmd->advertising;
+       skge->autoneg = cmd->base.autoneg;
+       skge->advertising = advertising;
 
        if (netif_running(dev)) {
                skge_down(dev);
@@ -875,8 +886,6 @@ static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
 }
 
 static const struct ethtool_ops skge_ethtool_ops = {
-       .get_settings   = skge_get_settings,
-       .set_settings   = skge_set_settings,
        .get_drvinfo    = skge_get_drvinfo,
        .get_regs_len   = skge_get_regs_len,
        .get_regs       = skge_get_regs,
@@ -899,6 +908,8 @@ static const struct ethtool_ops skge_ethtool_ops = {
        .set_phys_id    = skge_set_phys_id,
        .get_sset_count = skge_get_sset_count,
        .get_ethtool_stats = skge_get_ethtool_stats,
+       .get_link_ksettings = skge_get_link_ksettings,
+       .set_link_ksettings = skge_set_link_ksettings,
 };
 
 /*
@@ -3190,7 +3201,7 @@ static void skge_tx_done(struct net_device *dev)
        }
 }
 
-static int skge_poll(struct napi_struct *napi, int to_do)
+static int skge_poll(struct napi_struct *napi, int budget)
 {
        struct skge_port *skge = container_of(napi, struct skge_port, napi);
        struct net_device *dev = skge->netdev;
@@ -3203,7 +3214,7 @@ static int skge_poll(struct napi_struct *napi, int to_do)
 
        skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
 
-       for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
+       for (e = ring->to_clean; prefetch(e->next), work_done < budget; e = e->next) {
                struct skge_rx_desc *rd = e->desc;
                struct sk_buff *skb;
                u32 control;
@@ -3225,12 +3236,10 @@ static int skge_poll(struct napi_struct *napi, int to_do)
        wmb();
        skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
 
-       if (work_done < to_do) {
+       if (work_done < budget && napi_complete_done(napi, work_done)) {
                unsigned long flags;
 
-               napi_gro_flush(napi, false);
                spin_lock_irqsave(&hw->hw_lock, flags);
-               __napi_complete(napi);
                hw->intr_mask |= napimask[skge->port];
                skge_write32(hw, B0_IMSK, hw->intr_mask);
                skge_read32(hw, B0_IMSK);
index b60ad0e56a9f1105ce2f93c9f8abb8c31790736c..2b2cc3f3ca1084ef7aebcb486a1eedc4985f3fa4 100644 (file)
@@ -2666,7 +2666,7 @@ static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
        sky2->rx_stats.bytes += bytes;
        u64_stats_update_end(&sky2->rx_stats.syncp);
 
-       dev->last_rx = jiffies;
+       sky2->last_rx = jiffies;
        sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
 }
 
@@ -2953,7 +2953,7 @@ static int sky2_rx_hung(struct net_device *dev)
        u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
 
        /* If idle and MAC or PCI is stuck */
-       if (sky2->check.last == dev->last_rx &&
+       if (sky2->check.last == sky2->last_rx &&
            ((mac_rp == sky2->check.mac_rp &&
              mac_lev != 0 && mac_lev >= sky2->check.mac_lev) ||
             /* Check if the PCI RX hang */
@@ -2965,7 +2965,7 @@ static int sky2_rx_hung(struct net_device *dev)
                              fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
                return 1;
        } else {
-               sky2->check.last = dev->last_rx;
+               sky2->check.last = sky2->last_rx;
                sky2->check.mac_rp = mac_rp;
                sky2->check.mac_lev = mac_lev;
                sky2->check.fifo_rp = fifo_rp;
@@ -3589,47 +3589,59 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw)
                        | SUPPORTED_1000baseT_Full;
 }
 
-static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int sky2_get_link_ksettings(struct net_device *dev,
+                                  struct ethtool_link_ksettings *cmd)
 {
        struct sky2_port *sky2 = netdev_priv(dev);
        struct sky2_hw *hw = sky2->hw;
+       u32 supported, advertising;
 
-       ecmd->transceiver = XCVR_INTERNAL;
-       ecmd->supported = sky2_supported_modes(hw);
-       ecmd->phy_address = PHY_ADDR_MARV;
+       supported = sky2_supported_modes(hw);
+       cmd->base.phy_address = PHY_ADDR_MARV;
        if (sky2_is_copper(hw)) {
-               ecmd->port = PORT_TP;
-               ethtool_cmd_speed_set(ecmd, sky2->speed);
-               ecmd->supported |=  SUPPORTED_Autoneg | SUPPORTED_TP;
+               cmd->base.port = PORT_TP;
+               cmd->base.speed = sky2->speed;
+               supported |=  SUPPORTED_Autoneg | SUPPORTED_TP;
        } else {
-               ethtool_cmd_speed_set(ecmd, SPEED_1000);
-               ecmd->port = PORT_FIBRE;
-               ecmd->supported |=  SUPPORTED_Autoneg | SUPPORTED_FIBRE;
+               cmd->base.speed = SPEED_1000;
+               cmd->base.port = PORT_FIBRE;
+               supported |=  SUPPORTED_Autoneg | SUPPORTED_FIBRE;
        }
 
-       ecmd->advertising = sky2->advertising;
-       ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED)
+       advertising = sky2->advertising;
+       cmd->base.autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED)
                ? AUTONEG_ENABLE : AUTONEG_DISABLE;
-       ecmd->duplex = sky2->duplex;
+       cmd->base.duplex = sky2->duplex;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
+
        return 0;
 }
 
-static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int sky2_set_link_ksettings(struct net_device *dev,
+                                  const struct ethtool_link_ksettings *cmd)
 {
        struct sky2_port *sky2 = netdev_priv(dev);
        const struct sky2_hw *hw = sky2->hw;
        u32 supported = sky2_supported_modes(hw);
+       u32 new_advertising;
 
-       if (ecmd->autoneg == AUTONEG_ENABLE) {
-               if (ecmd->advertising & ~supported)
+       ethtool_convert_link_mode_to_legacy_u32(&new_advertising,
+                                               cmd->link_modes.advertising);
+
+       if (cmd->base.autoneg == AUTONEG_ENABLE) {
+               if (new_advertising & ~supported)
                        return -EINVAL;
 
                if (sky2_is_copper(hw))
-                       sky2->advertising = ecmd->advertising |
+                       sky2->advertising = new_advertising |
                                            ADVERTISED_TP |
                                            ADVERTISED_Autoneg;
                else
-                       sky2->advertising = ecmd->advertising |
+                       sky2->advertising = new_advertising |
                                            ADVERTISED_FIBRE |
                                            ADVERTISED_Autoneg;
 
@@ -3638,30 +3650,30 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                sky2->speed = -1;
        } else {
                u32 setting;
-               u32 speed = ethtool_cmd_speed(ecmd);
+               u32 speed = cmd->base.speed;
 
                switch (speed) {
                case SPEED_1000:
-                       if (ecmd->duplex == DUPLEX_FULL)
+                       if (cmd->base.duplex == DUPLEX_FULL)
                                setting = SUPPORTED_1000baseT_Full;
-                       else if (ecmd->duplex == DUPLEX_HALF)
+                       else if (cmd->base.duplex == DUPLEX_HALF)
                                setting = SUPPORTED_1000baseT_Half;
                        else
                                return -EINVAL;
                        break;
                case SPEED_100:
-                       if (ecmd->duplex == DUPLEX_FULL)
+                       if (cmd->base.duplex == DUPLEX_FULL)
                                setting = SUPPORTED_100baseT_Full;
-                       else if (ecmd->duplex == DUPLEX_HALF)
+                       else if (cmd->base.duplex == DUPLEX_HALF)
                                setting = SUPPORTED_100baseT_Half;
                        else
                                return -EINVAL;
                        break;
 
                case SPEED_10:
-                       if (ecmd->duplex == DUPLEX_FULL)
+                       if (cmd->base.duplex == DUPLEX_FULL)
                                setting = SUPPORTED_10baseT_Full;
-                       else if (ecmd->duplex == DUPLEX_HALF)
+                       else if (cmd->base.duplex == DUPLEX_HALF)
                                setting = SUPPORTED_10baseT_Half;
                        else
                                return -EINVAL;
@@ -3674,7 +3686,7 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                        return -EINVAL;
 
                sky2->speed = speed;
-               sky2->duplex = ecmd->duplex;
+               sky2->duplex = cmd->base.duplex;
                sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
        }
 
@@ -3888,8 +3900,8 @@ static void sky2_set_multicast(struct net_device *dev)
        gma_write16(hw, port, GM_RX_CTRL, reg);
 }
 
-static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
-                                               struct rtnl_link_stats64 *stats)
+static void sky2_get_stats(struct net_device *dev,
+                          struct rtnl_link_stats64 *stats)
 {
        struct sky2_port *sky2 = netdev_priv(dev);
        struct sky2_hw *hw = sky2->hw;
@@ -3929,8 +3941,6 @@ static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
        stats->rx_dropped = dev->stats.rx_dropped;
        stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
        stats->tx_fifo_errors = dev->stats.tx_fifo_errors;
-
-       return stats;
 }
 
 /* Can have one global because blinking is controlled by
@@ -4407,8 +4417,6 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
 }
 
 static const struct ethtool_ops sky2_ethtool_ops = {
-       .get_settings   = sky2_get_settings,
-       .set_settings   = sky2_set_settings,
        .get_drvinfo    = sky2_get_drvinfo,
        .get_wol        = sky2_get_wol,
        .set_wol        = sky2_set_wol,
@@ -4431,6 +4439,8 @@ static const struct ethtool_ops sky2_ethtool_ops = {
        .set_phys_id    = sky2_set_phys_id,
        .get_sset_count = sky2_get_sset_count,
        .get_ethtool_stats = sky2_get_ethtool_stats,
+       .get_link_ksettings = sky2_get_link_ksettings,
+       .set_link_ksettings = sky2_set_link_ksettings,
 };
 
 #ifdef CONFIG_SKY2_DEBUG
index ec6dcd80152bdd46550b0aaf0865d00a01261a93..0fe1607968428db0b759dfb0c1be1c82afa1382d 100644 (file)
@@ -2247,6 +2247,7 @@ struct sky2_port {
        u16                  rx_data_size;
        u16                  rx_nfrags;
 
+       unsigned long        last_rx;
        struct {
                unsigned long last;
                u32     mac_rp;
index 1c29c86f8709f16bc75087d023858185b37c2faf..9e757684816d48b903f62cdac2d6a1123e6c3305 100644 (file)
@@ -462,8 +462,8 @@ static void mtk_stats_update(struct mtk_eth *eth)
        }
 }
 
-static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
-                                       struct rtnl_link_stats64 *storage)
+static void mtk_get_stats64(struct net_device *dev,
+                           struct rtnl_link_stats64 *storage)
 {
        struct mtk_mac *mac = netdev_priv(dev);
        struct mtk_hw_stats *hw_stats = mac->hw_stats;
@@ -494,8 +494,6 @@ static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev,
        storage->tx_errors = dev->stats.tx_errors;
        storage->rx_dropped = dev->stats.rx_dropped;
        storage->tx_dropped = dev->stats.tx_dropped;
-
-       return storage;
 }
 
 static inline int mtk_max_frag_size(int mtu)
index 6b8635378f1fcb2aae4e8ac390bcd09d552c2256..fa6d2354a0e910ee160863e3cbe21a512d77bf03 100644 (file)
@@ -81,8 +81,9 @@ void mlx4_cq_tasklet_cb(unsigned long data)
 
 static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
 {
-       unsigned long flags;
        struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
+       unsigned long flags;
+       bool kick;
 
        spin_lock_irqsave(&tasklet_ctx->lock, flags);
        /* When migrating CQs between EQs will be implemented, please note
@@ -92,7 +93,10 @@ static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
         */
        if (list_empty_careful(&cq->tasklet_ctx.list)) {
                atomic_inc(&cq->refcount);
+               kick = list_empty(&tasklet_ctx->list);
                list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
+               if (kick)
+                       tasklet_schedule(&tasklet_ctx->task);
        }
        spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
 }
index 504461a464c581bf77b5cca127680f2622221cde..e7b81a305469e64b97f68bc0e2bcb064b78f08fe 100644 (file)
@@ -62,12 +62,13 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
                            struct skb_shared_hwtstamps *hwts,
                            u64 timestamp)
 {
-       unsigned long flags;
+       unsigned int seq;
        u64 nsec;
 
-       read_lock_irqsave(&mdev->clock_lock, flags);
-       nsec = timecounter_cyc2time(&mdev->clock, timestamp);
-       read_unlock_irqrestore(&mdev->clock_lock, flags);
+       do {
+               seq = read_seqbegin(&mdev->clock_lock);
+               nsec = timecounter_cyc2time(&mdev->clock, timestamp);
+       } while (read_seqretry(&mdev->clock_lock, seq));
 
        memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
        hwts->hwtstamp = ns_to_ktime(nsec);
@@ -95,9 +96,9 @@ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
        unsigned long flags;
 
        if (timeout) {
-               write_lock_irqsave(&mdev->clock_lock, flags);
+               write_seqlock_irqsave(&mdev->clock_lock, flags);
                timecounter_read(&mdev->clock);
-               write_unlock_irqrestore(&mdev->clock_lock, flags);
+               write_sequnlock_irqrestore(&mdev->clock_lock, flags);
                mdev->last_overflow_check = jiffies;
        }
 }
@@ -128,10 +129,10 @@ static int mlx4_en_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
        adj *= delta;
        diff = div_u64(adj, 1000000000ULL);
 
-       write_lock_irqsave(&mdev->clock_lock, flags);
+       write_seqlock_irqsave(&mdev->clock_lock, flags);
        timecounter_read(&mdev->clock);
        mdev->cycles.mult = neg_adj ? mult - diff : mult + diff;
-       write_unlock_irqrestore(&mdev->clock_lock, flags);
+       write_sequnlock_irqrestore(&mdev->clock_lock, flags);
 
        return 0;
 }
@@ -149,9 +150,9 @@ static int mlx4_en_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
                                                ptp_clock_info);
        unsigned long flags;
 
-       write_lock_irqsave(&mdev->clock_lock, flags);
+       write_seqlock_irqsave(&mdev->clock_lock, flags);
        timecounter_adjtime(&mdev->clock, delta);
-       write_unlock_irqrestore(&mdev->clock_lock, flags);
+       write_sequnlock_irqrestore(&mdev->clock_lock, flags);
 
        return 0;
 }
@@ -172,9 +173,9 @@ static int mlx4_en_phc_gettime(struct ptp_clock_info *ptp,
        unsigned long flags;
        u64 ns;
 
-       write_lock_irqsave(&mdev->clock_lock, flags);
+       write_seqlock_irqsave(&mdev->clock_lock, flags);
        ns = timecounter_read(&mdev->clock);
-       write_unlock_irqrestore(&mdev->clock_lock, flags);
+       write_sequnlock_irqrestore(&mdev->clock_lock, flags);
 
        *ts = ns_to_timespec64(ns);
 
@@ -198,9 +199,9 @@ static int mlx4_en_phc_settime(struct ptp_clock_info *ptp,
        unsigned long flags;
 
        /* reset the timecounter */
-       write_lock_irqsave(&mdev->clock_lock, flags);
+       write_seqlock_irqsave(&mdev->clock_lock, flags);
        timecounter_init(&mdev->clock, &mdev->cycles, ns);
-       write_unlock_irqrestore(&mdev->clock_lock, flags);
+       write_sequnlock_irqrestore(&mdev->clock_lock, flags);
 
        return 0;
 }
@@ -266,7 +267,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
        if (mdev->ptp_clock)
                return;
 
-       rwlock_init(&mdev->clock_lock);
+       seqlock_init(&mdev->clock_lock);
 
        memset(&mdev->cycles, 0, sizeof(mdev->cycles));
        mdev->cycles.read = mlx4_en_read_clock;
@@ -276,10 +277,10 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
                clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
        mdev->nominal_c_mult = mdev->cycles.mult;
 
-       write_lock_irqsave(&mdev->clock_lock, flags);
+       write_seqlock_irqsave(&mdev->clock_lock, flags);
        timecounter_init(&mdev->clock, &mdev->cycles,
                         ktime_to_ns(ktime_get_real()));
-       write_unlock_irqrestore(&mdev->clock_lock, flags);
+       write_sequnlock_irqrestore(&mdev->clock_lock, flags);
 
        /* Calculate period in seconds to call the overflow watchdog - to make
         * sure counter is checked at least once every wrap around.
index 9aa4226919542f6496fedce45a09e09685433efe..c4d714fcc7dae759998a49a1f90f9ab1ee9bdda3 100644 (file)
@@ -902,6 +902,7 @@ mlx4_en_set_link_ksettings(struct net_device *dev,
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_ptys_reg ptys_reg;
        __be32 proto_admin;
+       u8 cur_autoneg;
        int ret;
 
        u32 ptys_adv = ethtool2ptys_link_modes(
@@ -931,10 +932,21 @@ mlx4_en_set_link_ksettings(struct net_device *dev,
                return 0;
        }
 
-       proto_admin = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
-               cpu_to_be32(ptys_adv) :
-               speed_set_ptys_admin(priv, speed,
-                                    ptys_reg.eth_proto_cap);
+       cur_autoneg = ptys_reg.flags & MLX4_PTYS_AN_DISABLE_ADMIN ?
+                               AUTONEG_DISABLE : AUTONEG_ENABLE;
+
+       if (link_ksettings->base.autoneg == AUTONEG_DISABLE) {
+               proto_admin = speed_set_ptys_admin(priv, speed,
+                                                  ptys_reg.eth_proto_cap);
+               if ((be32_to_cpu(proto_admin) &
+                    (MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII) |
+                     MLX4_PROT_MASK(MLX4_1000BASE_KX))) &&
+                   (ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP))
+                       ptys_reg.flags |= MLX4_PTYS_AN_DISABLE_ADMIN;
+       } else {
+               proto_admin = cpu_to_be32(ptys_adv);
+               ptys_reg.flags &= ~MLX4_PTYS_AN_DISABLE_ADMIN;
+       }
 
        proto_admin &= ptys_reg.eth_proto_cap;
        if (!proto_admin) {
@@ -942,7 +954,9 @@ mlx4_en_set_link_ksettings(struct net_device *dev,
                return -EINVAL; /* nothing to change due to bad input */
        }
 
-       if (proto_admin == ptys_reg.eth_proto_admin)
+       if ((proto_admin == ptys_reg.eth_proto_admin) &&
+           ((ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP) &&
+            (link_ksettings->base.autoneg == cur_autoneg)))
                return 0; /* Nothing to change */
 
        en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
@@ -1788,7 +1802,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
        netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
        netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
 
-       if (dev->num_tc)
+       if (netdev_get_num_tc(dev))
                mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
 
        en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]);
@@ -1980,7 +1994,7 @@ static int mlx4_en_get_module_info(struct net_device *dev,
                modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
                break;
        default:
-               return -ENOSYS;
+               return -EINVAL;
        }
 
        return 0;
index 3b4961a8e8e44d6987ebd23f9239e747c7fc6cd5..748e9f65c386b6e49ce8112cda6f47af6047cba8 100644 (file)
@@ -1321,7 +1321,7 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
 }
 
 
-static struct rtnl_link_stats64 *
+static void
 mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1330,8 +1330,6 @@ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
        mlx4_en_fold_software_stats(dev);
        netdev_stats_to_stats64(stats, &dev->stats);
        spin_unlock_bh(&priv->stats_lock);
-
-       return stats;
 }
 
 static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
@@ -1697,6 +1695,14 @@ int mlx4_en_start_port(struct net_device *dev)
                       priv->port, err);
                goto tx_err;
        }
+
+       err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu);
+       if (err) {
+               en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n",
+                      dev->mtu, priv->port, err);
+               goto tx_err;
+       }
+
        /* Set default qp number */
        err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
        if (err) {
index 040da4b16b1c65ee3448598550ee6b5726ccdb76..930f961fee42140cb7c03b1cf4a843911657b02d 100644 (file)
@@ -35,7 +35,6 @@
 #define _MLX4_EN_PORT_H_
 
 
-#define SET_PORT_GEN_ALL_VALID 0x7
 #define SET_PORT_PROMISC_SHIFT 31
 #define SET_PORT_MC_PROMISC_SHIFT      30
 
index cc003fdf0ed929a981b1403f6a7d0099825fec4b..d85e6446f9d99e38c75b97d7fba29bd057e0a16f 100644 (file)
@@ -33,6 +33,7 @@
 
 #include <net/busy_poll.h>
 #include <linux/bpf.h>
+#include <linux/bpf_trace.h>
 #include <linux/mlx4/cq.h>
 #include <linux/slab.h>
 #include <linux/mlx4/qp.h>
@@ -709,7 +710,8 @@ static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
        do {
                if (mlx4_en_prepare_rx_desc(priv, ring,
                                            ring->prod & ring->size_mask,
-                                           GFP_ATOMIC | __GFP_COLD))
+                                           GFP_ATOMIC | __GFP_COLD |
+                                           __GFP_MEMALLOC))
                        break;
                ring->prod++;
        } while (--missing);
@@ -928,10 +930,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                                                        length, cq->ring,
                                                        &doorbell_pending)))
                                        goto consumed;
+                               trace_xdp_exception(dev, xdp_prog, act);
                                goto xdp_drop_no_cnt; /* Drop on xmit failure */
                        default:
                                bpf_warn_invalid_xdp_action(act);
                        case XDP_ABORTED:
+                               trace_xdp_exception(dev, xdp_prog, act);
                        case XDP_DROP:
                                ring->xdp_drop++;
 xdp_drop_no_cnt:
index 5886ad78058f2dc02afcf7ae8851572138fc372a..3ed42199d3f1275f77560e92a430c0dde181e95a 100644 (file)
@@ -710,7 +710,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
        u16 rings_p_up = priv->num_tx_rings_p_up;
        u8 up = 0;
 
-       if (dev->num_tc)
+       if (netdev_get_num_tc(dev))
                return skb_tx_hash(dev, skb);
 
        if (skb_vlan_tag_present(skb))
index 0509996957d9664b612358dd805359f4bc67b8dc..39232b6a974f4b4b961d3b0b8634f04e6b9d0caa 100644 (file)
@@ -494,7 +494,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_eqe *eqe;
-       int cqn = -1;
+       int cqn;
        int eqes_found = 0;
        int set_ci = 0;
        int port;
@@ -840,13 +840,6 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
 
        eq_set_ci(eq, 1);
 
-       /* cqn is 24bit wide but is initialized such that its higher bits
-        * are ones too. Thus, if we got any event, cqn's high bits should be off
-        * and we need to schedule the tasklet.
-        */
-       if (!(cqn & ~0xffffff))
-               tasklet_schedule(&eq->tasklet_ctx.task);
-
        return eqes_found;
 }
 
index 84bab9f0732ea239bce5adaac7eb52d0298cc751..3fe885ce1902d2ca1221029438892d1bcb53f7ab 100644 (file)
@@ -672,7 +672,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
        MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
        func_cap->physical_port = field;
        if (func_cap->physical_port != gen_or_port) {
-               err = -ENOSYS;
+               err = -EINVAL;
                goto out;
        }
 
@@ -1875,7 +1875,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
        *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
 
        *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
-               (ilog2(cache_line_size()) - 4) << 5;
+               ((ilog2(cache_line_size()) - 4) << 5) | (1 << 4);
 
 #if defined(__LITTLE_ENDIAN)
        *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
@@ -2983,7 +2983,7 @@ static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
                return PTR_ERR(mailbox);
        context = mailbox->buf;
 
-       context->v_ignore_fcs |=  SET_PORT_GEN_PHV_VALID;
+       context->flags2 |=  SET_PORT_GEN_PHV_VALID;
        if (phv_bit)
                context->phv_en |=  SET_PORT_GEN_PHV_EN;
 
index bffa6f345f2f40e35ebab8e546237da4fbe6b6a8..15ef787e71ba1072d2f9693392ae5e0269e06066 100644 (file)
@@ -838,7 +838,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
         */
        if (hca_param.global_caps) {
                mlx4_err(dev, "Unknown hca global capabilities\n");
-               return -ENOSYS;
+               return -EINVAL;
        }
 
        mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
@@ -896,7 +896,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
            PF_CONTEXT_BEHAVIOUR_MASK) {
                mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
                         func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK);
-               return -ENOSYS;
+               return -EINVAL;
        }
 
        dev->caps.num_ports             = func_cap.num_ports;
@@ -3492,7 +3492,7 @@ slave_start:
        mlx4_enable_msi_x(dev);
        if ((mlx4_is_mfunc(dev)) &&
            !(dev->flags & MLX4_FLAG_MSI_X)) {
-               err = -ENOSYS;
+               err = -EOPNOTSUPP;
                mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
                goto err_free_eq;
        }
index 086920b615af7180e891893ffd00928c0bd0238f..b4f1bc56cc68ff48b1beb7f31ce50124818ac1f5 100644 (file)
@@ -487,6 +487,7 @@ struct mlx4_slave_state {
        bool vst_qinq_supported;
        u8 function;
        dma_addr_t vhcr_dma;
+       u16 user_mtu[MLX4_MAX_PORTS + 1];
        u16 mtu[MLX4_MAX_PORTS + 1];
        __be32 ib_cap_mask[MLX4_MAX_PORTS + 1];
        struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
@@ -590,6 +591,7 @@ struct mlx4_mfunc_master_ctx {
        struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
        int                     init_port_ref[MLX4_MAX_PORTS + 1];
        u16                     max_mtu[MLX4_MAX_PORTS + 1];
+       u16                     max_user_mtu[MLX4_MAX_PORTS + 1];
        u8                      pptx;
        u8                      pprx;
        int                     disable_mcast_ref[MLX4_MAX_PORTS + 1];
@@ -774,7 +776,9 @@ struct mlx4_vlan_table {
        int                     max;
 };
 
-#define SET_PORT_GEN_ALL_VALID         0x7
+#define SET_PORT_GEN_ALL_VALID (MLX4_FLAG_V_MTU_MASK   | \
+                                MLX4_FLAG_V_PPRX_MASK  | \
+                                MLX4_FLAG_V_PPTX_MASK)
 #define SET_PORT_PROMISC_SHIFT         31
 #define SET_PORT_MC_PROMISC_SHIFT      30
 
@@ -787,7 +791,7 @@ enum {
 
 struct mlx4_set_port_general_context {
        u16 reserved1;
-       u8 v_ignore_fcs;
+       u8 flags2;
        u8 flags;
        union {
                u8 ignore_fcs;
@@ -803,7 +807,8 @@ struct mlx4_set_port_general_context {
        u16 reserved4;
        u32 reserved5;
        u8 phv_en;
-       u8 reserved6[3];
+       u8 reserved6[5];
+       __be16 user_mtu;
 };
 
 struct mlx4_set_port_rqp_calc_context {
index cec59bc264c9ac197048fd7c98bcd5cf25de0efd..d8ca6d1794ef60d7f57a3cb3724de6fa26cedf3c 100644 (file)
@@ -424,9 +424,9 @@ struct mlx4_en_dev {
        u32                     priv_pdn;
        spinlock_t              uar_lock;
        u8                      mac_removed[MLX4_MAX_PORTS + 1];
-       rwlock_t                clock_lock;
        u32                     nominal_c_mult;
        struct cyclecounter     cycles;
+       seqlock_t               clock_lock;
        struct timecounter      clock;
        unsigned long           last_overflow_check;
        unsigned long           overflow_period;
index b656dd5772e5b9ae3412d11dc8791c49fb10a78f..4e36e287d60567920866a2027a748f1c46fa110d 100644 (file)
 #define MLX4_STATS_ERROR_COUNTERS_MASK         0x1ffc30ULL
 #define MLX4_STATS_PORT_COUNTERS_MASK          0x1fe00000ULL
 
-#define MLX4_FLAG_V_IGNORE_FCS_MASK            0x2
+#define MLX4_FLAG2_V_IGNORE_FCS_MASK           BIT(1)
+#define MLX4_FLAG2_V_USER_MTU_MASK             BIT(5)
+#define MLX4_FLAG_V_MTU_MASK                   BIT(0)
+#define MLX4_FLAG_V_PPRX_MASK                  BIT(1)
+#define MLX4_FLAG_V_PPTX_MASK                  BIT(2)
 #define MLX4_IGNORE_FCS_MASK                   0x1
 #define MLX4_TC_MAX_NUMBER                     8
 
@@ -1239,13 +1243,96 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
        return;
 }
 
+static void
+mlx4_en_set_port_mtu(struct mlx4_dev *dev, int slave, int port,
+                    struct mlx4_set_port_general_context *gen_context)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+       struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+       u16 mtu, prev_mtu;
+
+       /* Mtu is configured as the max USER_MTU among all
+        * the functions on the port.
+        */
+       mtu = be16_to_cpu(gen_context->mtu);
+       mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
+                   ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+       prev_mtu = slave_st->mtu[port];
+       slave_st->mtu[port] = mtu;
+       if (mtu > master->max_mtu[port])
+               master->max_mtu[port] = mtu;
+       if (mtu < prev_mtu && prev_mtu == master->max_mtu[port]) {
+               int i;
+
+               slave_st->mtu[port] = mtu;
+               master->max_mtu[port] = mtu;
+               for (i = 0; i < dev->num_slaves; i++)
+                       master->max_mtu[port] =
+                               max_t(u16, master->max_mtu[port],
+                                     master->slave_state[i].mtu[port]);
+       }
+       gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+}
+
+static void
+mlx4_en_set_port_user_mtu(struct mlx4_dev *dev, int slave, int port,
+                         struct mlx4_set_port_general_context *gen_context)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+       struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+       u16 user_mtu, prev_user_mtu;
+
+       /* User Mtu is configured as the max USER_MTU among all
+        * the functions on the port.
+        */
+       user_mtu = be16_to_cpu(gen_context->user_mtu);
+       user_mtu = min_t(int, user_mtu, dev->caps.eth_mtu_cap[port]);
+       prev_user_mtu = slave_st->user_mtu[port];
+       slave_st->user_mtu[port] = user_mtu;
+       if (user_mtu > master->max_user_mtu[port])
+               master->max_user_mtu[port] = user_mtu;
+       if (user_mtu < prev_user_mtu &&
+           prev_user_mtu == master->max_user_mtu[port]) {
+               int i;
+
+               slave_st->user_mtu[port] = user_mtu;
+               master->max_user_mtu[port] = user_mtu;
+               for (i = 0; i < dev->num_slaves; i++)
+                       master->max_user_mtu[port] =
+                               max_t(u16, master->max_user_mtu[port],
+                                     master->slave_state[i].user_mtu[port]);
+       }
+       gen_context->user_mtu = cpu_to_be16(master->max_user_mtu[port]);
+}
+
+static void
+mlx4_en_set_port_global_pause(struct mlx4_dev *dev, int slave,
+                             struct mlx4_set_port_general_context *gen_context)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+
+       /* Slave cannot change Global Pause configuration */
+       if (slave != mlx4_master_func_num(dev) &&
+           (gen_context->pptx != master->pptx ||
+            gen_context->pprx != master->pprx)) {
+               gen_context->pptx = master->pptx;
+               gen_context->pprx = master->pprx;
+               mlx4_warn(dev, "denying Global Pause change for slave:%d\n",
+                         slave);
+       } else {
+               master->pptx = gen_context->pptx;
+               master->pprx = gen_context->pprx;
+       }
+}
+
 static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
                                u8 op_mod, struct mlx4_cmd_mailbox *inbox)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_port_info *port_info;
-       struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
-       struct mlx4_slave_state *slave_st = &master->slave_state[slave];
        struct mlx4_set_port_rqp_calc_context *qpn_context;
        struct mlx4_set_port_general_context *gen_context;
        struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
@@ -1256,7 +1343,6 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
        int base;
        u32 in_modifier;
        u32 promisc;
-       u16 mtu, prev_mtu;
        int err;
        int i, j;
        int offset;
@@ -1269,7 +1355,9 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
        is_eth = op_mod;
        port_info = &priv->port[port];
 
-       /* Slaves cannot perform SET_PORT operations except changing MTU */
+       /* Slaves cannot perform SET_PORT operations,
+        * except for changing MTU and USER_MTU.
+        */
        if (is_eth) {
                if (slave != dev->caps.function &&
                    in_modifier != MLX4_SET_PORT_GENERAL &&
@@ -1297,40 +1385,20 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
                        break;
                case MLX4_SET_PORT_GENERAL:
                        gen_context = inbox->buf;
-                       /* Mtu is configured as the max MTU among all the
-                        * the functions on the port. */
-                       mtu = be16_to_cpu(gen_context->mtu);
-                       mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
-                                   ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
-                       prev_mtu = slave_st->mtu[port];
-                       slave_st->mtu[port] = mtu;
-                       if (mtu > master->max_mtu[port])
-                               master->max_mtu[port] = mtu;
-                       if (mtu < prev_mtu && prev_mtu ==
-                                               master->max_mtu[port]) {
-                               slave_st->mtu[port] = mtu;
-                               master->max_mtu[port] = mtu;
-                               for (i = 0; i < dev->num_slaves; i++) {
-                                       master->max_mtu[port] =
-                                       max(master->max_mtu[port],
-                                           master->slave_state[i].mtu[port]);
-                               }
-                       }
 
-                       gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
-                       /* Slave cannot change Global Pause configuration */
-                       if (slave != mlx4_master_func_num(dev) &&
-                           ((gen_context->pptx != master->pptx) ||
-                            (gen_context->pprx != master->pprx))) {
-                               gen_context->pptx = master->pptx;
-                               gen_context->pprx = master->pprx;
-                               mlx4_warn(dev,
-                                         "denying Global Pause change for slave:%d\n",
-                                         slave);
-                       } else {
-                               master->pptx = gen_context->pptx;
-                               master->pprx = gen_context->pprx;
-                       }
+                       if (gen_context->flags & MLX4_FLAG_V_MTU_MASK)
+                               mlx4_en_set_port_mtu(dev, slave, port,
+                                                    gen_context);
+
+                       if (gen_context->flags2 & MLX4_FLAG2_V_USER_MTU_MASK)
+                               mlx4_en_set_port_user_mtu(dev, slave, port,
+                                                         gen_context);
+
+                       if (gen_context->flags &
+                           (MLX4_FLAG_V_PPRX_MASK | MLX4_FLAG_V_PPTX_MASK))
+                               mlx4_en_set_port_global_pause(dev, slave,
+                                                             gen_context);
+
                        break;
                case MLX4_SET_PORT_GID_TABLE:
                        /* change to MULTIPLE entries: number of guest's gids
@@ -1608,6 +1676,30 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
 }
 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
 
+int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu)
+{
+       struct mlx4_cmd_mailbox *mailbox;
+       struct mlx4_set_port_general_context *context;
+       u32 in_mod;
+       int err;
+
+       mailbox = mlx4_alloc_cmd_mailbox(dev);
+       if (IS_ERR(mailbox))
+               return PTR_ERR(mailbox);
+       context = mailbox->buf;
+       context->flags2 |= MLX4_FLAG2_V_USER_MTU_MASK;
+       context->user_mtu = cpu_to_be16(user_mtu);
+
+       in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+       err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+                      MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+                      MLX4_CMD_WRAPPED);
+
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_user_mtu);
+
 int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
 {
        struct mlx4_cmd_mailbox *mailbox;
@@ -1619,7 +1711,7 @@ int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
        if (IS_ERR(mailbox))
                return PTR_ERR(mailbox);
        context = mailbox->buf;
-       context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK;
+       context->flags2 |= MLX4_FLAG2_V_IGNORE_FCS_MASK;
        if (ignore_fcs_value)
                context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
        else
index 1822382212eed5d77fb290598fbe2d0834480aa9..6fe9f76ae656b6b11056154eafcc34e83e844666 100644 (file)
@@ -77,6 +77,7 @@ struct res_common {
        int                     from_state;
        int                     to_state;
        int                     removing;
+       const char              *func_name;
 };
 
 enum {
@@ -236,8 +237,8 @@ static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
        struct rb_node *node = root->rb_node;
 
        while (node) {
-               struct res_common *res = container_of(node, struct res_common,
-                                                     node);
+               struct res_common *res = rb_entry(node, struct res_common,
+                                                 node);
 
                if (res_id < res->res_id)
                        node = node->rb_left;
@@ -255,8 +256,8 @@ static int res_tracker_insert(struct rb_root *root, struct res_common *res)
 
        /* Figure out where to put new node */
        while (*new) {
-               struct res_common *this = container_of(*new, struct res_common,
-                                                      node);
+               struct res_common *this = rb_entry(*new, struct res_common,
+                                                  node);
 
                parent = *new;
                if (res->res_id < this->res_id)
@@ -837,6 +838,36 @@ static int mpt_mask(struct mlx4_dev *dev)
        return dev->caps.num_mpts - 1;
 }
 
+static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
+{
+       switch (t) {
+       case RES_QP:
+               return "QP";
+       case RES_CQ:
+               return "CQ";
+       case RES_SRQ:
+               return "SRQ";
+       case RES_XRCD:
+               return "XRCD";
+       case RES_MPT:
+               return "MPT";
+       case RES_MTT:
+               return "MTT";
+       case RES_MAC:
+               return "MAC";
+       case RES_VLAN:
+               return "VLAN";
+       case RES_COUNTER:
+               return "COUNTER";
+       case RES_FS_RULE:
+               return "FS_RULE";
+       case RES_EQ:
+               return "EQ";
+       default:
+               return "INVALID RESOURCE";
+       }
+}
+
 static void *find_res(struct mlx4_dev *dev, u64 res_id,
                      enum mlx4_resource type)
 {
@@ -846,9 +877,9 @@ static void *find_res(struct mlx4_dev *dev, u64 res_id,
                                  res_id);
 }
 
-static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
-                  enum mlx4_resource type,
-                  void *res)
+static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
+                   enum mlx4_resource type,
+                   void *res, const char *func_name)
 {
        struct res_common *r;
        int err = 0;
@@ -861,6 +892,10 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
        }
 
        if (r->state == RES_ANY_BUSY) {
+               mlx4_warn(dev,
+                         "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
+                         func_name, slave, res_id, mlx4_resource_type_to_str(type),
+                         r->func_name);
                err = -EBUSY;
                goto exit;
        }
@@ -872,6 +907,7 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
 
        r->from_state = r->state;
        r->state = RES_ANY_BUSY;
+       r->func_name = func_name;
 
        if (res)
                *((struct res_common **)res) = r;
@@ -881,6 +917,9 @@ exit:
        return err;
 }
 
+#define get_res(dev, slave, res_id, type, res) \
+       _get_res((dev), (slave), (res_id), (type), (res), __func__)
+
 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
                                    enum mlx4_resource type,
                                    u64 res_id, int *slave)
@@ -911,8 +950,10 @@ static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
 
        spin_lock_irq(mlx4_tlock(dev));
        r = find_res(dev, res_id, type);
-       if (r)
+       if (r) {
                r->state = r->from_state;
+               r->func_name = "";
+       }
        spin_unlock_irq(mlx4_tlock(dev));
 }
 
@@ -1396,7 +1437,7 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
        case RES_MTT:
                return remove_mtt_ok((struct res_mtt *)res, extra);
        case RES_MAC:
-               return -ENOSYS;
+               return -EOPNOTSUPP;
        case RES_EQ:
                return remove_eq_ok((struct res_eq *)res);
        case RES_COUNTER:
index 32d4af9b594d6950b5f91ee64a1445cb81ebe182..336d4738b807f863d14edb594a8b94292eb8cbf2 100644 (file)
@@ -179,6 +179,8 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
                mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
                              cq->cqn);
 
+       cq->uar = dev->priv.uar;
+
        return 0;
 
 err_cmd:
index a9dbc28f6b97ab0357240c229ec7fec8c5b01363..a62f4b6a21a50ff4fc85d18dd41dce49f7bf2cd2 100644 (file)
@@ -71,6 +71,16 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
        if (dev_ctx->context) {
                spin_lock_irq(&priv->ctx_lock);
                list_add_tail(&dev_ctx->list, &priv->ctx_list);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+               if (dev_ctx->intf->pfault) {
+                       if (priv->pfault) {
+                               mlx5_core_err(dev, "multiple page fault handlers not supported");
+                       } else {
+                               priv->pfault_ctx = dev_ctx->context;
+                               priv->pfault = dev_ctx->intf->pfault;
+                       }
+               }
+#endif
                spin_unlock_irq(&priv->ctx_lock);
        } else {
                kfree(dev_ctx);
@@ -97,6 +107,15 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
        if (!dev_ctx)
                return;
 
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       spin_lock_irq(&priv->ctx_lock);
+       if (priv->pfault == dev_ctx->intf->pfault)
+               priv->pfault = NULL;
+       spin_unlock_irq(&priv->ctx_lock);
+
+       synchronize_srcu(&priv->pfault_srcu);
+#endif
+
        spin_lock_irq(&priv->ctx_lock);
        list_del(&dev_ctx->list);
        spin_unlock_irq(&priv->ctx_lock);
@@ -329,6 +348,20 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
        spin_unlock_irqrestore(&priv->ctx_lock, flags);
 }
 
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+void mlx5_core_page_fault(struct mlx5_core_dev *dev,
+                         struct mlx5_pagefault *pfault)
+{
+       struct mlx5_priv *priv = &dev->priv;
+       int srcu_idx;
+
+       srcu_idx = srcu_read_lock(&priv->pfault_srcu);
+       if (priv->pfault)
+               priv->pfault(dev, priv->pfault_ctx, pfault);
+       srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
+}
+#endif
+
 void mlx5_dev_list_lock(void)
 {
        mutex_lock(&mlx5_intf_mutex);
index d5ecb8f53fd43684f185d590c8dc5553a4f25ab4..95ca03c0d9f54b543e3527fd7b80cebdb8d76d73 100644 (file)
@@ -51,6 +51,9 @@
 
 #define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
 
+#define MLX5E_HW2SW_MTU(hwmtu) ((hwmtu) - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+#define MLX5E_SW2HW_MTU(swmtu) ((swmtu) + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+
 #define MLX5E_MAX_NUM_TC       8
 
 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
 
 #define MLX5_RX_HEADROOM NET_SKB_PAD
 
-#define MLX5_MPWRQ_LOG_STRIDE_SIZE             6  /* >= 6, HW restriction */
-#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS        8  /* >= 6, HW restriction */
+#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
+       (6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
+#define MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, req) \
+       max_t(u32, MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), req)
+#define MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev)       MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 6)
+#define MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) MLX5_MPWRQ_LOG_STRIDE_SZ(mdev, 8)
+
 #define MLX5_MPWRQ_LOG_WQE_SZ                  18
 #define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
                                    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
 
 #define MLX5E_LOG_INDIR_RQT_SIZE       0x7
 #define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
+#define MLX5E_MIN_NUM_CHANNELS         0x1
 #define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
 #define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
 #define MLX5E_TX_CQ_POLL_BUDGET        128
 #define MLX5E_XDP_IHS_DS_COUNT \
        DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
 #define MLX5E_XDP_TX_DS_COUNT \
-       (MLX5E_XDP_IHS_DS_COUNT + \
-        (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
+       ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
 #define MLX5E_XDP_TX_WQEBBS \
        DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
 
@@ -259,6 +267,7 @@ struct mlx5e_tstamp {
        struct mlx5_core_dev      *mdev;
        struct ptp_clock          *ptp;
        struct ptp_clock_info      ptp_info;
+       u8                        *pps_pin_caps;
 };
 
 enum {
@@ -369,6 +378,7 @@ struct mlx5e_rq {
 
        unsigned long          state;
        int                    ix;
+       u16                    rx_headroom;
 
        struct mlx5e_rx_am     am; /* Adaptive Moderation */
        struct bpf_prog       *xdp_prog;
@@ -479,7 +489,7 @@ struct mlx5e_sq {
 
        /* control path */
        struct mlx5_wq_ctrl        wq_ctrl;
-       struct mlx5_uar            uar;
+       struct mlx5_sq_bfreg       bfreg;
        struct mlx5e_channel      *channel;
        int                        tc;
        u32                        rate_limit;
@@ -568,8 +578,9 @@ struct mlx5e_vlan_table {
        unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
        struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID];
        struct mlx5_flow_handle *untagged_rule;
-       struct mlx5_flow_handle *any_vlan_rule;
-       bool            filter_disabled;
+       struct mlx5_flow_handle *any_cvlan_rule;
+       struct mlx5_flow_handle *any_svlan_rule;
+       bool                    filter_disabled;
 };
 
 struct mlx5e_l2_table {
@@ -777,9 +788,11 @@ void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
                        struct skb_shared_hwtstamps *hwts);
 void mlx5e_timestamp_init(struct mlx5e_priv *priv);
 void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
+void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
+                            struct ptp_clock_event *event);
 int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
 int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
-void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val);
+void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
                          u16 vid);
@@ -807,7 +820,7 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
 static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
                                      struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
 {
-       u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
+       u16 ofst = sq->bf_offset;
 
        /* ensure wqe is visible to device before updating doorbell record */
        dma_wmb();
@@ -833,7 +846,7 @@ static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
        struct mlx5_core_cq *mcq;
 
        mcq = &cq->mcq;
-       mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
+       mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
 }
 
 static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
@@ -841,12 +854,6 @@ static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
        return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
 }
 
-static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
-{
-       return min_t(int, mdev->priv.eq_table.num_comp_vectors,
-                    MLX5E_MAX_NUM_CHANNELS);
-}
-
 extern const struct ethtool_ops mlx5e_ethtool_ops;
 #ifdef CONFIG_MLX5_CORE_EN_DCB
 extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
index 746a92c1364435da17275527f8a2c06f6f48624b..37e66eef6fb5ea62576e0a8b012b04e6ba579d56 100644 (file)
@@ -37,6 +37,22 @@ enum {
        MLX5E_CYCLES_SHIFT      = 23
 };
 
+enum {
+       MLX5E_PIN_MODE_IN               = 0x0,
+       MLX5E_PIN_MODE_OUT              = 0x1,
+};
+
+enum {
+       MLX5E_OUT_PATTERN_PULSE         = 0x0,
+       MLX5E_OUT_PATTERN_PERIODIC      = 0x1,
+};
+
+enum {
+       MLX5E_EVENT_MODE_DISABLE        = 0x0,
+       MLX5E_EVENT_MODE_REPETETIVE     = 0x1,
+       MLX5E_EVENT_MODE_ONCE_TILL_ARM  = 0x2,
+};
+
 void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
                        struct skb_shared_hwtstamps *hwts)
 {
@@ -90,11 +106,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
                return -ERANGE;
        }
 
+       mutex_lock(&priv->state_lock);
        /* RX HW timestamp */
        switch (config.rx_filter) {
        case HWTSTAMP_FILTER_NONE:
                /* Reset CQE compression to Admin default */
-               mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_def);
+               mlx5e_modify_rx_cqe_compression_locked(priv, priv->params.rx_cqe_compress_def);
                break;
        case HWTSTAMP_FILTER_ALL:
        case HWTSTAMP_FILTER_SOME:
@@ -112,14 +129,16 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
                /* Disable CQE compression */
                netdev_warn(dev, "Disabling cqe compression");
-               mlx5e_modify_rx_cqe_compression(priv, false);
+               mlx5e_modify_rx_cqe_compression_locked(priv, false);
                config.rx_filter = HWTSTAMP_FILTER_ALL;
                break;
        default:
+               mutex_unlock(&priv->state_lock);
                return -ERANGE;
        }
 
        memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config));
+       mutex_unlock(&priv->state_lock);
 
        return copy_to_user(ifr->ifr_data, &config,
                            sizeof(config)) ? -EFAULT : 0;
@@ -189,6 +208,18 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
        int neg_adj = 0;
        struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
                                                  ptp_info);
+       struct mlx5e_priv *priv =
+               container_of(tstamp, struct mlx5e_priv, tstamp);
+
+       if (MLX5_CAP_GEN(priv->mdev, pps_modify)) {
+               u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+
+               /* For future use need to add a loop for finding all 1PPS out pins */
+               MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
+               MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF);
+
+               mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+       }
 
        if (delta < 0) {
                neg_adj = 1;
@@ -208,6 +239,124 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
        return 0;
 }
 
+static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
+                                struct ptp_clock_request *rq,
+                                int on)
+{
+       struct mlx5e_tstamp *tstamp =
+               container_of(ptp, struct mlx5e_tstamp, ptp_info);
+       struct mlx5e_priv *priv =
+               container_of(tstamp, struct mlx5e_priv, tstamp);
+       u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+       u8 pattern = 0;
+       int pin = -1;
+       int err = 0;
+
+       if (!MLX5_CAP_GEN(priv->mdev, pps) ||
+           !MLX5_CAP_GEN(priv->mdev, pps_modify))
+               return -EOPNOTSUPP;
+
+       if (rq->extts.index >= tstamp->ptp_info.n_pins)
+               return -EINVAL;
+
+       if (on) {
+               pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
+               if (pin < 0)
+                       return -EBUSY;
+       }
+
+       if (rq->extts.flags & PTP_FALLING_EDGE)
+               pattern = 1;
+
+       MLX5_SET(mtpps_reg, in, pin, pin);
+       MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN);
+       MLX5_SET(mtpps_reg, in, pattern, pattern);
+       MLX5_SET(mtpps_reg, in, enable, on);
+
+       err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+       if (err)
+               return err;
+
+       return mlx5_set_mtppse(priv->mdev, pin, 0,
+                              MLX5E_EVENT_MODE_REPETETIVE & on);
+}
+
+static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
+                                 struct ptp_clock_request *rq,
+                                 int on)
+{
+       struct mlx5e_tstamp *tstamp =
+               container_of(ptp, struct mlx5e_tstamp, ptp_info);
+       struct mlx5e_priv *priv =
+               container_of(tstamp, struct mlx5e_priv, tstamp);
+       u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+       u64 nsec_now, nsec_delta, time_stamp;
+       u64 cycles_now, cycles_delta;
+       struct timespec64 ts;
+       unsigned long flags;
+       int pin = -1;
+       s64 ns;
+
+       if (!MLX5_CAP_GEN(priv->mdev, pps_modify))
+               return -EOPNOTSUPP;
+
+       if (rq->perout.index >= tstamp->ptp_info.n_pins)
+               return -EINVAL;
+
+       if (on) {
+               pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT,
+                                  rq->perout.index);
+               if (pin < 0)
+                       return -EBUSY;
+       }
+
+       ts.tv_sec = rq->perout.period.sec;
+       ts.tv_nsec = rq->perout.period.nsec;
+       ns = timespec64_to_ns(&ts);
+       if (on)
+               if ((ns >> 1) != 500000000LL)
+                       return -EINVAL;
+       ts.tv_sec = rq->perout.start.sec;
+       ts.tv_nsec = rq->perout.start.nsec;
+       ns = timespec64_to_ns(&ts);
+       cycles_now = mlx5_read_internal_timer(tstamp->mdev);
+       write_lock_irqsave(&tstamp->lock, flags);
+       nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
+       nsec_delta = ns - nsec_now;
+       cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
+                                tstamp->cycles.mult);
+       write_unlock_irqrestore(&tstamp->lock, flags);
+       time_stamp = cycles_now + cycles_delta;
+       MLX5_SET(mtpps_reg, in, pin, pin);
+       MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
+       MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC);
+       MLX5_SET(mtpps_reg, in, enable, on);
+       MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
+
+       return mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+}
+
+static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
+                           struct ptp_clock_request *rq,
+                           int on)
+{
+       switch (rq->type) {
+       case PTP_CLK_REQ_EXTTS:
+               return mlx5e_extts_configure(ptp, rq, on);
+       case PTP_CLK_REQ_PEROUT:
+               return mlx5e_perout_configure(ptp, rq, on);
+       default:
+               return -EOPNOTSUPP;
+       }
+       return 0;
+}
+
+static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+                           enum ptp_pin_function func, unsigned int chan)
+{
+       return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
+}
+
 static const struct ptp_clock_info mlx5e_ptp_clock_info = {
        .owner          = THIS_MODULE,
        .max_adj        = 100000000,
@@ -221,6 +370,7 @@ static const struct ptp_clock_info mlx5e_ptp_clock_info = {
        .gettime64      = mlx5e_ptp_gettime,
        .settime64      = mlx5e_ptp_settime,
        .enable         = NULL,
+       .verify         = NULL,
 };
 
 static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
@@ -229,6 +379,62 @@ static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
        tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
 }
 
+static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
+{
+       int i;
+
+       tstamp->ptp_info.pin_config =
+               kzalloc(sizeof(*tstamp->ptp_info.pin_config) *
+                              tstamp->ptp_info.n_pins, GFP_KERNEL);
+       if (!tstamp->ptp_info.pin_config)
+               return -ENOMEM;
+       tstamp->ptp_info.enable = mlx5e_ptp_enable;
+       tstamp->ptp_info.verify = mlx5e_ptp_verify;
+
+       for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
+               snprintf(tstamp->ptp_info.pin_config[i].name,
+                        sizeof(tstamp->ptp_info.pin_config[i].name),
+                        "mlx5_pps%d", i);
+               tstamp->ptp_info.pin_config[i].index = i;
+               tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE;
+               tstamp->ptp_info.pin_config[i].chan = i;
+       }
+
+       return 0;
+}
+
+static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
+                              struct mlx5e_tstamp *tstamp)
+{
+       u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+
+       mlx5_query_mtpps(priv->mdev, out, sizeof(out));
+
+       tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
+                                          cap_number_of_pps_pins);
+       tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
+                                            cap_max_num_of_pps_in_pins);
+       tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
+                                             cap_max_num_of_pps_out_pins);
+
+       tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
+       tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
+       tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
+       tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
+       tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
+       tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
+       tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
+       tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
+}
+
+void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
+                            struct ptp_clock_event *event)
+{
+       struct mlx5e_tstamp *tstamp = &priv->tstamp;
+
+       ptp_clock_event(tstamp->ptp, event);
+}
+
 void mlx5e_timestamp_init(struct mlx5e_priv *priv)
 {
        struct mlx5e_tstamp *tstamp = &priv->tstamp;
@@ -272,6 +478,18 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
        tstamp->ptp_info = mlx5e_ptp_clock_info;
        snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
 
+       /* Initialize 1PPS data structures */
+#define MAX_PIN_NUM    8
+       tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL);
+       if (tstamp->pps_pin_caps) {
+               if (MLX5_CAP_GEN(priv->mdev, pps))
+                       mlx5e_get_pps_caps(priv, tstamp);
+               if (tstamp->ptp_info.n_pins)
+                       mlx5e_init_pin_config(tstamp);
+       } else {
+               mlx5_core_warn(priv->mdev, "1PPS initialization failed\n");
+       }
+
        tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
                                         &priv->mdev->pdev->dev);
        if (IS_ERR(tstamp->ptp)) {
@@ -293,5 +511,8 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
                priv->tstamp.ptp = NULL;
        }
 
+       kfree(tstamp->pps_pin_caps);
+       kfree(tstamp->ptp_info.pin_config);
+
        cancel_delayed_work_sync(&tstamp->overflow_work);
 }
index f175518ff07aa9163e6e84022a4e9ae4c4c9f9da..bd898d8deda0ce0c4d6dca7f1ac26722eacf96c4 100644 (file)
@@ -89,16 +89,10 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
        struct mlx5e_resources *res = &mdev->mlx5e_res;
        int err;
 
-       err = mlx5_alloc_map_uar(mdev, &res->cq_uar, false);
-       if (err) {
-               mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
-               return err;
-       }
-
        err = mlx5_core_alloc_pd(mdev, &res->pdn);
        if (err) {
                mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
-               goto err_unmap_free_uar;
+               return err;
        }
 
        err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn);
@@ -121,9 +115,6 @@ err_dealloc_transport_domain:
        mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
 err_dealloc_pd:
        mlx5_core_dealloc_pd(mdev, res->pdn);
-err_unmap_free_uar:
-       mlx5_unmap_free_uar(mdev, &res->cq_uar);
-
        return err;
 }
 
@@ -134,7 +125,6 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
        mlx5_core_destroy_mkey(mdev, &res->mkey);
        mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
        mlx5_core_dealloc_pd(mdev, res->pdn);
-       mlx5_unmap_free_uar(mdev, &res->cq_uar);
 }
 
 int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
index bb67863aa361168a8566349ef356d9a991d411be..cc80522b585429c56c0836e6d1a035573cc146e7 100644 (file)
@@ -170,7 +170,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
        case ETH_SS_STATS:
                return NUM_SW_COUNTERS +
                       MLX5E_NUM_Q_CNTRS(priv) +
-                      NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
+                      NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) +
+                      NUM_PCIE_COUNTERS(priv) +
                       MLX5E_NUM_RQ_STATS(priv) +
                       MLX5E_NUM_SQ_STATS(priv) +
                       MLX5E_NUM_PFC_COUNTERS(priv) +
@@ -218,6 +219,14 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
                strcpy(data + (idx++) * ETH_GSTRING_LEN,
                       pport_2819_stats_desc[i].format);
 
+       for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
+               strcpy(data + (idx++) * ETH_GSTRING_LEN,
+                      pport_phy_statistical_stats_desc[i].format);
+
+       for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
+               strcpy(data + (idx++) * ETH_GSTRING_LEN,
+                      pcie_perf_stats_desc[i].format);
+
        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
                for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
                        sprintf(data + (idx++) * ETH_GSTRING_LEN,
@@ -330,6 +339,14 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
                data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
                                                  pport_2819_stats_desc, i);
 
+       for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
+               data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
+                                                 pport_phy_statistical_stats_desc, i);
+
+       for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
+               data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
+                                                 pcie_perf_stats_desc, i);
+
        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
                for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
                        data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
@@ -535,7 +552,7 @@ static void mlx5e_get_channels(struct net_device *dev,
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
-       ch->max_combined   = mlx5e_get_max_num_channels(priv->mdev);
+       ch->max_combined   = priv->profile->max_nch(priv->mdev);
        ch->combined_count = priv->params.num_channels;
 }
 
@@ -1459,8 +1476,6 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
-       int err = 0;
-       bool reset;
 
        if (!MLX5_CAP_GEN(mdev, cqe_compression))
                return -EOPNOTSUPP;
@@ -1470,17 +1485,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
                return -EINVAL;
        }
 
-       reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
-
-       if (reset)
-               mlx5e_close_locked(netdev);
-
-       MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, enable);
+       mlx5e_modify_rx_cqe_compression_locked(priv, enable);
        priv->params.rx_cqe_compress_def = enable;
 
-       if (reset)
-               err = mlx5e_open_locked(netdev);
-       return err;
+       return 0;
 }
 
 static int mlx5e_handle_pflag(struct net_device *netdev,
index a0e5a69402b30a349b196eaa72ce1a413b5479b2..f2762e45c8ae2aadd5366ea467a5ce3b4edb3d7e 100644 (file)
@@ -150,7 +150,8 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
 
 enum mlx5e_vlan_rule_type {
        MLX5E_VLAN_RULE_TYPE_UNTAGGED,
-       MLX5E_VLAN_RULE_TYPE_ANY_VID,
+       MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
+       MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
        MLX5E_VLAN_RULE_TYPE_MATCH_VID,
 };
 
@@ -172,19 +173,31 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
        dest.ft = priv->fs.l2.ft.t;
 
        spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
+
 
        switch (rule_type) {
        case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
                rule_p = &priv->fs.vlan.untagged_rule;
+               MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                                outer_headers.cvlan_tag);
                break;
-       case MLX5E_VLAN_RULE_TYPE_ANY_VID:
-               rule_p = &priv->fs.vlan.any_vlan_rule;
-               MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
+       case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
+               rule_p = &priv->fs.vlan.any_cvlan_rule;
+               MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                                outer_headers.cvlan_tag);
+               MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
+               break;
+       case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
+               rule_p = &priv->fs.vlan.any_svlan_rule;
+               MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                                outer_headers.svlan_tag);
+               MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
                break;
        default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
                rule_p = &priv->fs.vlan.active_vlans_rule[vid];
-               MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
+               MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                                outer_headers.cvlan_tag);
+               MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
                                 outer_headers.first_vid);
                MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
@@ -235,10 +248,16 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
                        priv->fs.vlan.untagged_rule = NULL;
                }
                break;
-       case MLX5E_VLAN_RULE_TYPE_ANY_VID:
-               if (priv->fs.vlan.any_vlan_rule) {
-                       mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule);
-                       priv->fs.vlan.any_vlan_rule = NULL;
+       case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
+               if (priv->fs.vlan.any_cvlan_rule) {
+                       mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
+                       priv->fs.vlan.any_cvlan_rule = NULL;
+               }
+               break;
+       case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
+               if (priv->fs.vlan.any_svlan_rule) {
+                       mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
+                       priv->fs.vlan.any_svlan_rule = NULL;
                }
                break;
        case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
@@ -252,6 +271,23 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
        }
 }
 
+static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
+{
+       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+}
+
+static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
+{
+       int err;
+
+       err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+       if (err)
+               return err;
+
+       return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+}
+
 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
 {
        if (!priv->fs.vlan.filter_disabled)
@@ -260,7 +296,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
        priv->fs.vlan.filter_disabled = false;
        if (priv->netdev->flags & IFF_PROMISC)
                return;
-       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+       mlx5e_del_any_vid_rules(priv);
 }
 
 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
@@ -271,7 +307,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
        priv->fs.vlan.filter_disabled = true;
        if (priv->netdev->flags & IFF_PROMISC)
                return;
-       mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+       mlx5e_add_any_vid_rules(priv);
 }
 
 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -308,7 +344,7 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
 
        if (priv->fs.vlan.filter_disabled &&
            !(priv->netdev->flags & IFF_PROMISC))
-               mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+               mlx5e_add_any_vid_rules(priv);
 }
 
 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
@@ -323,7 +359,7 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
 
        if (priv->fs.vlan.filter_disabled &&
            !(priv->netdev->flags & IFF_PROMISC))
-               mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+               mlx5e_del_any_vid_rules(priv);
 }
 
 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
@@ -503,8 +539,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
        if (enable_promisc) {
                mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
                if (!priv->fs.vlan.filter_disabled)
-                       mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
-                                           0);
+                       mlx5e_add_any_vid_rules(priv);
        }
        if (enable_allmulti)
                mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
@@ -519,8 +554,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
                mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
        if (disable_promisc) {
                if (!priv->fs.vlan.filter_disabled)
-                       mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
-                                           0);
+                       mlx5e_del_any_vid_rules(priv);
                mlx5e_del_l2_flow_rule(priv, &ea->promisc);
        }
 
@@ -976,11 +1010,13 @@ err_destroy_flow_table:
        return err;
 }
 
-#define MLX5E_NUM_VLAN_GROUPS  2
+#define MLX5E_NUM_VLAN_GROUPS  3
 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
+#define MLX5E_VLAN_GROUP2_SIZE BIT(0)
 #define MLX5E_VLAN_TABLE_SIZE  (MLX5E_VLAN_GROUP0_SIZE +\
-                                MLX5E_VLAN_GROUP1_SIZE)
+                                MLX5E_VLAN_GROUP1_SIZE +\
+                                MLX5E_VLAN_GROUP2_SIZE)
 
 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
                                            int inlen)
@@ -991,7 +1027,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
 
        memset(in, 0, inlen);
        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
        MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
        MLX5_SET_CFG(in, start_flow_index, ix);
        ix += MLX5E_VLAN_GROUP0_SIZE;
@@ -1003,7 +1039,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
 
        memset(in, 0, inlen);
        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
        MLX5_SET_CFG(in, start_flow_index, ix);
        ix += MLX5E_VLAN_GROUP1_SIZE;
        MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -1012,6 +1048,17 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
                goto err_destroy_groups;
        ft->num_groups++;
 
+       memset(in, 0, inlen);
+       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_VLAN_GROUP2_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err_destroy_groups;
+       ft->num_groups++;
+
        return 0;
 
 err_destroy_groups:
index f33f72d0237c1bafc702f4066dab31ab22963a47..d55fff0ba388f746809ac601fc3863e94309fc12 100644 (file)
@@ -237,9 +237,9 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
        if ((fs->flow_type & FLOW_EXT) &&
            (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
                MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
-                        vlan_tag, 1);
+                        cvlan_tag, 1);
                MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
-                        vlan_tag, 1);
+                        cvlan_tag, 1);
                MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
                         first_vid, 0xfff);
                MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
index f14ca3385fdd683b12f434e289cc8e264040c1ed..3cce6281e075d8a8dc13ac18b563bd2c8b981c0e 100644 (file)
@@ -31,6 +31,7 @@
  */
 
 #include <net/tc_act/tc_gact.h>
+#include <linux/crash_dump.h>
 #include <net/pkt_cls.h>
 #include <linux/mlx5/fs.h>
 #include <net/vxlan.h>
@@ -83,16 +84,20 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
        priv->params.rq_wq_type = rq_type;
        switch (priv->params.rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+               priv->params.log_rq_size = is_kdump_kernel() ?
+                       MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
+                       MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
                priv->params.mpwqe_log_stride_sz =
                        MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
-                       MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
-                       MLX5_MPWRQ_LOG_STRIDE_SIZE;
+                       MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(priv->mdev) :
+                       MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(priv->mdev);
                priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
                        priv->params.mpwqe_log_stride_sz;
                break;
        default: /* MLX5_WQ_TYPE_LINKED_LIST */
-               priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+               priv->params.log_rq_size = is_kdump_kernel() ?
+                       MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
+                       MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
        }
        priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
                                               BIT(priv->params.log_rq_size));
@@ -268,6 +273,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
        MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 
+       if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
+               out = pstats->phy_statistical_counters;
+               MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+               mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+       }
+
        MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
                out = pstats->per_prio_counters[prio];
@@ -291,11 +302,34 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
                                      &qcnt->rx_out_of_buffer);
 }
 
+static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
+{
+       struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
+       void *out;
+       u32 *in;
+
+       if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
+               return;
+
+       in = mlx5_vzalloc(sz);
+       if (!in)
+               return;
+
+       out = pcie_stats->pcie_perf_counters;
+       MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
+       mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
+
+       kvfree(in);
+}
+
 void mlx5e_update_stats(struct mlx5e_priv *priv)
 {
-       mlx5e_update_q_counter(priv);
-       mlx5e_update_vport_counters(priv);
+       mlx5e_update_pcie_counters(priv);
        mlx5e_update_pport_counters(priv);
+       mlx5e_update_vport_counters(priv);
+       mlx5e_update_q_counter(priv);
        mlx5e_update_sw_counters(priv);
 }
 
@@ -317,6 +351,8 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
                              enum mlx5_dev_event event, unsigned long param)
 {
        struct mlx5e_priv *priv = vpriv;
+       struct ptp_clock_event ptp_event;
+       struct mlx5_eqe *eqe = NULL;
 
        if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
                return;
@@ -326,7 +362,15 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
        case MLX5_DEV_EVENT_PORT_DOWN:
                queue_work(priv->wq, &priv->update_carrier_work);
                break;
-
+       case MLX5_DEV_EVENT_PPS:
+               eqe = (struct mlx5_eqe *)param;
+               ptp_event.type = PTP_CLOCK_EXTTS;
+               ptp_event.index = eqe->data.pps.pin;
+               ptp_event.timestamp =
+                       timecounter_cyc2time(&priv->tstamp.clock,
+                                            be64_to_cpu(eqe->data.pps.time_stamp));
+               mlx5e_pps_event_handler(vpriv, &ptp_event);
+               break;
        default:
                break;
        }
@@ -343,9 +387,6 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
        synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
 }
 
-#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-
 static inline int mlx5e_get_wqe_mtt_sz(void)
 {
        /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
@@ -372,7 +413,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
        cseg->imm       = rq->mkey_be;
 
        ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
-       ucseg->klm_octowords =
+       ucseg->xlt_octowords =
                cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
        ucseg->bsf_octowords =
                cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
@@ -534,9 +575,13 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                goto err_rq_wq_destroy;
        }
 
-       rq->buff.map_dir = DMA_FROM_DEVICE;
-       if (rq->xdp_prog)
+       if (rq->xdp_prog) {
                rq->buff.map_dir = DMA_BIDIRECTIONAL;
+               rq->rx_headroom = XDP_PACKET_HEADROOM;
+       } else {
+               rq->buff.map_dir = DMA_FROM_DEVICE;
+               rq->rx_headroom = MLX5_RX_HEADROOM;
+       }
 
        switch (priv->params.rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
@@ -586,7 +631,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                byte_count = rq->buff.wqe_sz;
 
                /* calc the required page order */
-               frag_sz = MLX5_RX_HEADROOM +
+               frag_sz = rq->rx_headroom +
                          byte_count /* packet data */ +
                          SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
                frag_sz = SKB_DATA_ALIGN(frag_sz);
@@ -967,10 +1012,11 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
        sq->channel   = c;
        sq->tc        = tc;
 
-       err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
+       err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false);
        if (err)
                return err;
 
+       sq->uar_map = sq->bfreg.map;
        param->wq.db_numa_node = cpu_to_node(c->cpu);
 
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
@@ -979,17 +1025,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
                goto err_unmap_free_uar;
 
        sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
-       if (sq->uar.bf_map) {
+       if (sq->bfreg.wc)
                set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
-               sq->uar_map = sq->uar.bf_map;
-       } else {
-               sq->uar_map = sq->uar.map;
-       }
+
        sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
        sq->max_inline  = param->max_inline;
-       sq->min_inline_mode =
-               MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT ?
-               param->min_inline_mode : 0;
+       sq->min_inline_mode = param->min_inline_mode;
 
        err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
        if (err)
@@ -1012,7 +1053,7 @@ err_sq_wq_destroy:
        mlx5_wq_destroy(&sq->wq_ctrl);
 
 err_unmap_free_uar:
-       mlx5_unmap_free_uar(mdev, &sq->uar);
+       mlx5_free_bfreg(mdev, &sq->bfreg);
 
        return err;
 }
@@ -1024,7 +1065,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
 
        mlx5e_free_sq_db(sq);
        mlx5_wq_destroy(&sq->wq_ctrl);
-       mlx5_unmap_free_uar(priv->mdev, &sq->uar);
+       mlx5_free_bfreg(priv->mdev, &sq->bfreg);
 }
 
 static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
@@ -1053,12 +1094,15 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
        MLX5_SET(sqc,  sqc, tis_num_0, param->type == MLX5E_SQ_ICO ?
                                       0 : priv->tisn[sq->tc]);
        MLX5_SET(sqc,  sqc, cqn,                sq->cq.mcq.cqn);
-       MLX5_SET(sqc,  sqc, min_wqe_inline_mode, sq->min_inline_mode);
+
+       if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
+               MLX5_SET(sqc,  sqc, min_wqe_inline_mode, sq->min_inline_mode);
+
        MLX5_SET(sqc,  sqc, state,              MLX5_SQC_STATE_RST);
        MLX5_SET(sqc,  sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
 
        MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
-       MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
+       MLX5_SET(wq,   wq, uar_page,      sq->bfreg.index);
        MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
                                          MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
@@ -1216,7 +1260,6 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
        mcq->comp       = mlx5e_completion_event;
        mcq->event      = mlx5e_cq_error_event;
        mcq->irqn       = irqn;
-       mcq->uar        = &mdev->mlx5e_res.cq_uar;
 
        for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
                struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
@@ -1265,7 +1308,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
 
        MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
        MLX5_SET(cqc,   cqc, c_eqn,         eqn);
-       MLX5_SET(cqc,   cqc, uar_page,      mcq->uar->index);
+       MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
        MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
                                            MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
@@ -1472,6 +1515,14 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
        return err;
 }
 
+static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
+{
+       return is_kdump_kernel() ?
+               MLX5E_MIN_NUM_CHANNELS :
+               min_t(int, mdev->priv.eq_table.num_comp_vectors,
+                     MLX5E_MAX_NUM_CHANNELS);
+}
+
 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
                              struct mlx5e_channel_param *cparam,
                              struct mlx5e_channel **cp)
@@ -1677,7 +1728,7 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
 {
        void *cqc = param->cqc;
 
-       MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index);
+       MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
 }
 
 static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
@@ -1756,8 +1807,7 @@ static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
        MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
 
        param->max_inline = priv->params.tx_max_inline;
-       /* FOR XDP SQs will support only L2 inline mode */
-       param->min_inline_mode = MLX5_INLINE_MODE_NONE;
+       param->min_inline_mode = priv->params.tx_min_inline_mode;
        param->type = MLX5E_SQ_XDP;
 }
 
@@ -2393,7 +2443,6 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
        mcq->comp       = mlx5e_completion_event;
        mcq->event      = mlx5e_cq_error_event;
        mcq->irqn       = irqn;
-       mcq->uar        = &mdev->mlx5e_res.cq_uar;
 
        cq->priv = priv;
 
@@ -2686,7 +2735,7 @@ mqprio:
        return mlx5e_setup_tc(dev, tc->tc);
 }
 
-static struct rtnl_link_stats64 *
+static void
 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
@@ -2729,7 +2778,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
        stats->multicast =
                VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
 
-       return stats;
 }
 
 static void mlx5e_set_rx_mode(struct net_device *dev)
@@ -2987,11 +3035,8 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
        struct mlx5e_priv *priv = netdev_priv(dev);
        struct mlx5_core_dev *mdev = priv->mdev;
 
-       if (min_tx_rate)
-               return -EOPNOTSUPP;
-
        return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
-                                          max_tx_rate);
+                                          max_tx_rate, min_tx_rate);
 }
 
 static int mlx5_vport_link2ifla(u8 esw_link)
@@ -3159,11 +3204,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
        bool reset, was_opened;
        int i;
 
-       if (prog && prog->xdp_adjust_head) {
-               netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n");
-               return -EOPNOTSUPP;
-       }
-
        mutex_lock(&priv->state_lock);
 
        if ((netdev->features & NETIF_F_LRO) && prog) {
@@ -3432,22 +3472,6 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
                        MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
 }
 
-static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
-                                  u8 *min_inline_mode)
-{
-       switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
-       case MLX5_CAP_INLINE_MODE_L2:
-               *min_inline_mode = MLX5_INLINE_MODE_L2;
-               break;
-       case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
-               mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
-               break;
-       case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
-               *min_inline_mode = MLX5_INLINE_MODE_NONE;
-               break;
-       }
-}
-
 u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
 {
        int i;
@@ -3481,7 +3505,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
        priv->params.lro_timeout =
                mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
 
-       priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+       priv->params.log_sq_size = is_kdump_kernel() ?
+               MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
+               MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
 
        /* set CQE compression */
        priv->params.rx_cqe_compress_def = false;
@@ -3507,7 +3533,11 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
        priv->params.tx_cq_moderation.pkts =
                MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
        priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
-       mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
+       mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
+       if (priv->params.tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
+           !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
+               priv->params.tx_min_inline_mode = MLX5_INLINE_MODE_L2;
+
        priv->params.num_tc                = 1;
        priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
 
index 850378893b259c860e15f1a324fe88037b0335ad..2c864574a9d5faeaa3b329f3bc0ab0d4e0cc7b55 100644 (file)
@@ -374,13 +374,12 @@ int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
        return -EINVAL;
 }
 
-static struct rtnl_link_stats64 *
+static void
 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
        memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
-       return stats;
 }
 
 static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
index 06d5e6fecb0a5ecf255c9d6319ffb285e1a660df..b039b87742a64422454ea105a8073b6451977253 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/tcp.h>
+#include <linux/bpf_trace.h>
 #include <net/busy_poll.h>
 #include "en.h"
 #include "en_tc.h"
@@ -155,17 +156,15 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
        return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
 }
 
-void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val)
+void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val)
 {
        bool was_opened;
 
        if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
                return;
 
-       mutex_lock(&priv->state_lock);
-
        if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val)
-               goto unlock;
+               return;
 
        was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
        if (was_opened)
@@ -176,8 +175,6 @@ void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val)
        if (was_opened)
                mlx5e_open_locked(priv->netdev);
 
-unlock:
-       mutex_unlock(&priv->state_lock);
 }
 
 #define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
@@ -267,7 +264,7 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
        if (unlikely(mlx5e_page_alloc_mapped(rq, di)))
                return -ENOMEM;
 
-       wqe->data.addr = cpu_to_be64(di->addr + MLX5_RX_HEADROOM);
+       wqe->data.addr = cpu_to_be64(di->addr + rq->rx_headroom);
        return 0;
 }
 
@@ -647,10 +644,9 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
        mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
 }
 
-static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
+static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
                                        struct mlx5e_dma_info *di,
-                                       unsigned int data_offset,
-                                       int len)
+                                       const struct xdp_buff *xdp)
 {
        struct mlx5e_sq          *sq   = &rq->channel->xdp_sq;
        struct mlx5_wq_cyc       *wq   = &sq->wq;
@@ -661,10 +657,18 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
        struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
        struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
        struct mlx5_wqe_data_seg *dseg;
+       u8 ds_cnt = MLX5E_XDP_TX_DS_COUNT;
+
+       ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
+       dma_addr_t dma_addr  = di->addr + data_offset;
+       unsigned int dma_len = xdp->data_end - xdp->data;
 
-       dma_addr_t dma_addr  = di->addr + data_offset + MLX5E_XDP_MIN_INLINE;
-       unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE;
-       void *data           = page_address(di->page) + data_offset;
+       if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE ||
+                    MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) {
+               rq->stats.xdp_drop++;
+               mlx5e_page_release(rq, di, true);
+               return false;
+       }
 
        if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
                if (sq->db.xdp.doorbell) {
@@ -674,7 +678,7 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
                }
                rq->stats.xdp_tx_full++;
                mlx5e_page_release(rq, di, true);
-               return;
+               return false;
        }
 
        dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
@@ -682,11 +686,17 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
 
        memset(wqe, 0, sizeof(*wqe));
 
-       /* copy the inline part */
-       memcpy(eseg->inline_hdr_start, data, MLX5E_XDP_MIN_INLINE);
-       eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+       dseg = (struct mlx5_wqe_data_seg *)eseg + 1;
+       /* copy the inline part if required */
+       if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
+               memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE);
+               eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+               dma_len  -= MLX5E_XDP_MIN_INLINE;
+               dma_addr += MLX5E_XDP_MIN_INLINE;
 
-       dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1);
+               ds_cnt   += MLX5E_XDP_IHS_DS_COUNT;
+               dseg++;
+       }
 
        /* write the dma part */
        dseg->addr       = cpu_to_be64(dma_addr);
@@ -694,7 +704,7 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
        dseg->lkey       = sq->mkey_be;
 
        cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
-       cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | MLX5E_XDP_TX_DS_COUNT);
+       cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
 
        sq->db.xdp.di[pi] = *di;
        wi->opcode     = MLX5_OPCODE_SEND;
@@ -703,32 +713,39 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
 
        sq->db.xdp.doorbell = true;
        rq->stats.xdp_tx++;
+       return true;
 }
 
 /* returns true if packet was consumed by xdp */
-static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
-                                   const struct bpf_prog *prog,
-                                   struct mlx5e_dma_info *di,
-                                   void *data, u16 len)
+static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
+                                  struct mlx5e_dma_info *di,
+                                  void *va, u16 *rx_headroom, u32 *len)
 {
+       const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
        struct xdp_buff xdp;
        u32 act;
 
        if (!prog)
                return false;
 
-       xdp.data = data;
-       xdp.data_end = xdp.data + len;
+       xdp.data = va + *rx_headroom;
+       xdp.data_end = xdp.data + *len;
+       xdp.data_hard_start = va;
+
        act = bpf_prog_run_xdp(prog, &xdp);
        switch (act) {
        case XDP_PASS:
+               *rx_headroom = xdp.data - xdp.data_hard_start;
+               *len = xdp.data_end - xdp.data;
                return false;
        case XDP_TX:
-               mlx5e_xmit_xdp_frame(rq, di, MLX5_RX_HEADROOM, len);
+               if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
+                       trace_xdp_exception(rq->netdev, prog, act);
                return true;
        default:
                bpf_warn_invalid_xdp_action(act);
        case XDP_ABORTED:
+               trace_xdp_exception(rq->netdev, prog, act);
        case XDP_DROP:
                rq->stats.xdp_drop++;
                mlx5e_page_release(rq, di, true);
@@ -743,15 +760,16 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
        struct mlx5e_dma_info *di;
        struct sk_buff *skb;
        void *va, *data;
+       u16 rx_headroom = rq->rx_headroom;
        bool consumed;
 
        di             = &rq->dma_info[wqe_counter];
        va             = page_address(di->page);
-       data           = va + MLX5_RX_HEADROOM;
+       data           = va + rx_headroom;
 
        dma_sync_single_range_for_cpu(rq->pdev,
                                      di->addr,
-                                     MLX5_RX_HEADROOM,
+                                     rx_headroom,
                                      rq->buff.wqe_sz,
                                      DMA_FROM_DEVICE);
        prefetch(data);
@@ -763,8 +781,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
        }
 
        rcu_read_lock();
-       consumed = mlx5e_xdp_handle(rq, READ_ONCE(rq->xdp_prog), di, data,
-                                   cqe_bcnt);
+       consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt);
        rcu_read_unlock();
        if (consumed)
                return NULL; /* page/packet was consumed by XDP */
@@ -780,7 +797,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
        page_ref_inc(di->page);
        mlx5e_page_release(rq, di, true);
 
-       skb_reserve(skb, MLX5_RX_HEADROOM);
+       skb_reserve(skb, rx_headroom);
        skb_put(skb, cqe_bcnt);
 
        return skb;
index ba5db1dd23a97a7d378ec3ea8e36fa099d0a462a..53e4992d6511f578c5bb31e993afb0986789a979 100644 (file)
@@ -39,7 +39,7 @@
 #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
        (*(u32 *)((char *)ptr + dsc[i].offset))
 #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
-       be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
+       be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
 
 #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
 #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
@@ -201,6 +201,12 @@ static const struct counter_desc vport_stats_desc[] = {
 #define PPORT_2819_GET(pstats, c) \
        MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
                   counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+#define PPORT_PHY_STATISTICAL_OFF(c) \
+       MLX5_BYTE_OFF(ppcnt_reg, \
+                     counter_set.phys_layer_statistical_cntrs.c##_high)
+#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
+       MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
+                  counter_set.phys_layer_statistical_cntrs.c##_high)
 #define PPORT_PER_PRIO_OFF(c) \
        MLX5_BYTE_OFF(ppcnt_reg, \
                      counter_set.eth_per_prio_grp_data_layout.c##_high)
@@ -215,6 +221,7 @@ struct mlx5e_pport_stats {
        __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
        __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
        __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+       __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
 };
 
 static const struct counter_desc pport_802_3_stats_desc[] = {
@@ -260,6 +267,11 @@ static const struct counter_desc pport_2819_stats_desc[] = {
        { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
 };
 
+static const struct counter_desc pport_phy_statistical_stats_desc[] = {
+       { "rx_symbol_errors_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
+       { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
+};
+
 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
        { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
        { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
@@ -276,6 +288,21 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
        { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
 };
 
+#define PCIE_PERF_OFF(c) \
+       MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
+#define PCIE_PERF_GET(pcie_stats, c) \
+       MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
+                counter_set.pcie_perf_cntrs_grp_data_layout.c)
+
+struct mlx5e_pcie_stats {
+       __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
+};
+
+static const struct counter_desc pcie_perf_stats_desc[] = {
+       { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
+       { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
+};
+
 struct mlx5e_rq_stats {
        u64 packets;
        u64 bytes;
@@ -360,15 +387,23 @@ static const struct counter_desc sq_stats_desc[] = {
 #define NUM_PPORT_802_3_COUNTERS       ARRAY_SIZE(pport_802_3_stats_desc)
 #define NUM_PPORT_2863_COUNTERS                ARRAY_SIZE(pport_2863_stats_desc)
 #define NUM_PPORT_2819_COUNTERS                ARRAY_SIZE(pport_2819_stats_desc)
+#define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \
+       (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \
+        MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+#define NUM_PCIE_PERF_COUNTERS(priv) \
+       (ARRAY_SIZE(pcie_perf_stats_desc) * \
+        MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
        ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
        ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
-#define NUM_PPORT_COUNTERS             (NUM_PPORT_802_3_COUNTERS + \
+#define NUM_PPORT_COUNTERS(priv)       (NUM_PPORT_802_3_COUNTERS + \
                                         NUM_PPORT_2863_COUNTERS  + \
                                         NUM_PPORT_2819_COUNTERS  + \
+                                        NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \
                                         NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
                                         NUM_PPORT_PRIO)
+#define NUM_PCIE_COUNTERS(priv)                NUM_PCIE_PERF_COUNTERS(priv)
 #define NUM_RQ_STATS                   ARRAY_SIZE(rq_stats_desc)
 #define NUM_SQ_STATS                   ARRAY_SIZE(sq_stats_desc)
 
@@ -378,6 +413,7 @@ struct mlx5e_stats {
        struct mlx5e_vport_stats vport;
        struct mlx5e_pport_stats pport;
        struct rtnl_link_stats64 vf_vport;
+       struct mlx5e_pcie_stats pcie;
 };
 
 static const struct counter_desc mlx5e_pme_status_desc[] = {
index 2ebbe80d8126521cd324090344ecc3865a16657b..44406a5ec15d96a6ca45d30b609864f8cccb07e1 100644 (file)
@@ -298,6 +298,32 @@ vxlan_match_offload_err:
 
                MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
                MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
+       } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+               struct flow_dissector_key_ipv6_addrs *key =
+                       skb_flow_dissector_target(f->dissector,
+                                                 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
+                                                 f->key);
+               struct flow_dissector_key_ipv6_addrs *mask =
+                       skb_flow_dissector_target(f->dissector,
+                                                 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
+                                                 f->mask);
+
+               memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+                                   src_ipv4_src_ipv6.ipv6_layout.ipv6),
+                      &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+               memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+                                   src_ipv4_src_ipv6.ipv6_layout.ipv6),
+                      &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+
+               memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+                                   dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+                      &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+               memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+                                   dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+                      &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
        }
 
        /* Enforce DMAC when offloading incoming tunneled flows.
@@ -358,12 +384,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                                                  f->key);
                switch (key->addr_type) {
                case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+               case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
                        if (parse_tunnel_attr(priv, spec, f))
                                return -EOPNOTSUPP;
                        break;
-               case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
-                       netdev_warn(priv->netdev,
-                                   "IPv6 tunnel decap offload isn't supported\n");
                default:
                        return -EOPNOTSUPP;
                }
@@ -460,8 +484,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                                                  FLOW_DISSECTOR_KEY_VLAN,
                                                  f->mask);
                if (mask->vlan_id || mask->vlan_priority) {
-                       MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
-                       MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
+                       MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
 
                        MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
                        MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
@@ -644,15 +668,15 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        return 0;
 }
 
-static inline int cmp_encap_info(struct mlx5_encap_info *a,
-                                struct mlx5_encap_info *b)
+static inline int cmp_encap_info(struct ip_tunnel_key *a,
+                                struct ip_tunnel_key *b)
 {
        return memcmp(a, b, sizeof(*a));
 }
 
-static inline int hash_encap_info(struct mlx5_encap_info *info)
+static inline int hash_encap_info(struct ip_tunnel_key *key)
 {
-       return jhash(info, sizeof(*info), 0);
+       return jhash(key, sizeof(*key), 0);
 }
 
 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
@@ -660,13 +684,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
                                   struct net_device **out_dev,
                                   struct flowi4 *fl4,
                                   struct neighbour **out_n,
-                                  __be32 *saddr,
                                   int *out_ttl)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct rtable *rt;
        struct neighbour *n = NULL;
-       int ttl;
 
 #if IS_ENABLED(CONFIG_INET)
        int ret;
@@ -684,16 +706,54 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
        else
                *out_dev = rt->dst.dev;
 
-       ttl = ip4_dst_hoplimit(&rt->dst);
+       *out_ttl = ip4_dst_hoplimit(&rt->dst);
        n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
        ip_rt_put(rt);
        if (!n)
                return -ENOMEM;
 
        *out_n = n;
-       *saddr = fl4->saddr;
-       *out_ttl = ttl;
+       return 0;
+}
+
+static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
+                                  struct net_device *mirred_dev,
+                                  struct net_device **out_dev,
+                                  struct flowi6 *fl6,
+                                  struct neighbour **out_n,
+                                  int *out_ttl)
+{
+       struct neighbour *n = NULL;
+       struct dst_entry *dst;
+
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       int ret;
+
+       dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
+       ret = dst->error;
+       if (ret) {
+               dst_release(dst);
+               return ret;
+       }
+
+       *out_ttl = ip6_dst_hoplimit(dst);
 
+       /* if the egress device isn't on the same HW e-switch, we use the uplink */
+       if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
+               *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+       else
+               *out_dev = dst->dev;
+#else
+       return -EOPNOTSUPP;
+#endif
+
+       n = dst_neigh_lookup(dst, &fl6->daddr);
+       dst_release(dst);
+       if (!n)
+               return -ENOMEM;
+
+       *out_n = n;
        return 0;
 }
 
@@ -733,19 +793,52 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev,
        return encap_size;
 }
 
+static int gen_vxlan_header_ipv6(struct net_device *out_dev,
+                                char buf[],
+                                unsigned char h_dest[ETH_ALEN],
+                                int ttl,
+                                struct in6_addr *daddr,
+                                struct in6_addr *saddr,
+                                __be16 udp_dst_port,
+                                __be32 vx_vni)
+{
+       int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
+       struct ethhdr *eth = (struct ethhdr *)buf;
+       struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
+       struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
+       struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
+
+       memset(buf, 0, encap_size);
+
+       ether_addr_copy(eth->h_dest, h_dest);
+       ether_addr_copy(eth->h_source, out_dev->dev_addr);
+       eth->h_proto = htons(ETH_P_IPV6);
+
+       ip6_flow_hdr(ip6h, 0, 0);
+       /* the HW fills up ipv6 payload len */
+       ip6h->nexthdr     = IPPROTO_UDP;
+       ip6h->hop_limit   = ttl;
+       ip6h->daddr       = *daddr;
+       ip6h->saddr       = *saddr;
+
+       udp->dest = udp_dst_port;
+       vxh->vx_flags = VXLAN_HF_VNI;
+       vxh->vx_vni = vxlan_vni_field(vx_vni);
+
+       return encap_size;
+}
+
 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
                                          struct net_device *mirred_dev,
                                          struct mlx5_encap_entry *e,
                                          struct net_device **out_dev)
 {
        int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+       struct ip_tunnel_key *tun_key = &e->tun_info.key;
+       int encap_size, ttl, err;
        struct neighbour *n = NULL;
        struct flowi4 fl4 = {};
        char *encap_header;
-       int encap_size;
-       __be32 saddr;
-       int ttl;
-       int err;
 
        encap_header = kzalloc(max_encap_size, GFP_KERNEL);
        if (!encap_header)
@@ -754,37 +847,108 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
        switch (e->tunnel_type) {
        case MLX5_HEADER_TYPE_VXLAN:
                fl4.flowi4_proto = IPPROTO_UDP;
-               fl4.fl4_dport = e->tun_info.tp_dst;
+               fl4.fl4_dport = tun_key->tp_dst;
                break;
        default:
                err = -EOPNOTSUPP;
                goto out;
        }
-       fl4.daddr = e->tun_info.daddr;
+       fl4.flowi4_tos = tun_key->tos;
+       fl4.daddr = tun_key->u.ipv4.dst;
+       fl4.saddr = tun_key->u.ipv4.src;
 
        err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
-                                     &fl4, &n, &saddr, &ttl);
+                                     &fl4, &n, &ttl);
        if (err)
                goto out;
 
+       if (!(n->nud_state & NUD_VALID)) {
+               pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
        e->n = n;
        e->out_dev = *out_dev;
 
+       neigh_ha_snapshot(e->h_dest, n, *out_dev);
+
+       switch (e->tunnel_type) {
+       case MLX5_HEADER_TYPE_VXLAN:
+               encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
+                                                  e->h_dest, ttl,
+                                                  fl4.daddr,
+                                                  fl4.saddr, tun_key->tp_dst,
+                                                  tunnel_id_to_key32(tun_key->tun_id));
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+       err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
+                              encap_size, encap_header, &e->encap_id);
+out:
+       if (err && n)
+               neigh_release(n);
+       kfree(encap_header);
+       return err;
+}
+
+static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
+                                         struct net_device *mirred_dev,
+                                         struct mlx5_encap_entry *e,
+                                         struct net_device **out_dev)
+
+{
+       int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+       struct ip_tunnel_key *tun_key = &e->tun_info.key;
+       int encap_size, err, ttl = 0;
+       struct neighbour *n = NULL;
+       struct flowi6 fl6 = {};
+       char *encap_header;
+
+       encap_header = kzalloc(max_encap_size, GFP_KERNEL);
+       if (!encap_header)
+               return -ENOMEM;
+
+       switch (e->tunnel_type) {
+       case MLX5_HEADER_TYPE_VXLAN:
+               fl6.flowi6_proto = IPPROTO_UDP;
+               fl6.fl6_dport = tun_key->tp_dst;
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+       fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+       fl6.daddr = tun_key->u.ipv6.dst;
+       fl6.saddr = tun_key->u.ipv6.src;
+
+       err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev,
+                                     &fl6, &n, &ttl);
+       if (err)
+               goto out;
+
        if (!(n->nud_state & NUD_VALID)) {
-               pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
+               pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
                err = -EOPNOTSUPP;
                goto out;
        }
 
+       e->n = n;
+       e->out_dev = *out_dev;
+
        neigh_ha_snapshot(e->h_dest, n, *out_dev);
 
        switch (e->tunnel_type) {
        case MLX5_HEADER_TYPE_VXLAN:
-               encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
+               encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
                                                   e->h_dest, ttl,
-                                                  e->tun_info.daddr,
-                                                  saddr, e->tun_info.tp_dst,
-                                                  e->tun_info.tun_id);
+                                                  &fl6.daddr,
+                                                  &fl6.saddr, tun_key->tp_dst,
+                                                  tunnel_id_to_key32(tun_key->tun_id));
                break;
        default:
                err = -EOPNOTSUPP;
@@ -808,13 +972,11 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        unsigned short family = ip_tunnel_info_af(tun_info);
        struct ip_tunnel_key *key = &tun_info->key;
-       struct mlx5_encap_info info;
        struct mlx5_encap_entry *e;
        struct net_device *out_dev;
+       int tunnel_type, err = -EOPNOTSUPP;
        uintptr_t hash_key;
        bool found = false;
-       int tunnel_type;
-       int err;
 
        /* udp dst port must be set */
        if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
@@ -830,8 +992,6 @@ vxlan_encap_offload_err:
 
        if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
            MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
-               info.tp_dst = key->tp_dst;
-               info.tun_id = tunnel_id_to_key32(key->tun_id);
                tunnel_type = MLX5_HEADER_TYPE_VXLAN;
        } else {
                netdev_warn(priv->netdev,
@@ -839,22 +999,11 @@ vxlan_encap_offload_err:
                return -EOPNOTSUPP;
        }
 
-       switch (family) {
-       case AF_INET:
-               info.daddr = key->u.ipv4.dst;
-               break;
-       case AF_INET6:
-               netdev_warn(priv->netdev,
-                           "IPv6 tunnel encap offload isn't supported\n");
-       default:
-               return -EOPNOTSUPP;
-       }
-
-       hash_key = hash_encap_info(&info);
+       hash_key = hash_encap_info(key);
 
        hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
                                   encap_hlist, hash_key) {
-               if (!cmp_encap_info(&e->tun_info, &info)) {
+               if (!cmp_encap_info(&e->tun_info.key, key)) {
                        found = true;
                        break;
                }
@@ -869,11 +1018,15 @@ vxlan_encap_offload_err:
        if (!e)
                return -ENOMEM;
 
-       e->tun_info = info;
+       e->tun_info = *tun_info;
        e->tunnel_type = tunnel_type;
        INIT_LIST_HEAD(&e->flows);
 
-       err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
+       if (family == AF_INET)
+               err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
+       else if (family == AF_INET6)
+               err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev);
+
        if (err)
                goto out_err;
 
index cfb68371c397e0b19b231ba2730d4d54179d0fd5..f193128bac4b8c18504ec1f5905def3baa5c4633 100644 (file)
@@ -154,6 +154,8 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
        int hlen;
 
        switch (mode) {
+       case MLX5_INLINE_MODE_NONE:
+               return 0;
        case MLX5_INLINE_MODE_TCP_UDP:
                hlen = eth_get_headlen(skb->data, skb_headlen(skb));
                if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
@@ -283,21 +285,23 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
 
        wi->num_bytes = num_bytes;
 
-       if (skb_vlan_tag_present(skb)) {
-               mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data,
-                                 &skb_len);
-               ihs += VLAN_HLEN;
-       } else {
-               memcpy(eseg->inline_hdr_start, skb_data, ihs);
-               mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
+       ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+       if (ihs) {
+               if (skb_vlan_tag_present(skb)) {
+                       mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
+                       ihs += VLAN_HLEN;
+               } else {
+                       memcpy(eseg->inline_hdr.start, skb_data, ihs);
+                       mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
+               }
+               eseg->inline_hdr.sz = cpu_to_be16(ihs);
+               ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
+       } else if (skb_vlan_tag_present(skb)) {
+               eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
+               eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
        }
 
-       eseg->inline_hdr_sz = cpu_to_be16(ihs);
-
-       ds_cnt  = sizeof(*wqe) / MLX5_SEND_WQE_DS;
-       ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
-                              MLX5_SEND_WQE_DS);
-       dseg    = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
+       dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
 
        wi->num_dma = 0;
 
index 8ffcc8808e50015c93296b38d0c78037a484d398..ea5d8d37a75c465cf022a4c29d89918802f97bbb 100644 (file)
@@ -54,6 +54,7 @@ enum {
        MLX5_NUM_SPARE_EQE      = 0x80,
        MLX5_NUM_ASYNC_EQE      = 0x100,
        MLX5_NUM_CMD_EQE        = 32,
+       MLX5_NUM_PF_DRAIN       = 64,
 };
 
 enum {
@@ -153,6 +154,8 @@ static const char *eqe_type_str(u8 type)
                return "MLX5_EVENT_TYPE_PAGE_REQUEST";
        case MLX5_EVENT_TYPE_PAGE_FAULT:
                return "MLX5_EVENT_TYPE_PAGE_FAULT";
+       case MLX5_EVENT_TYPE_PPS_EVENT:
+               return "MLX5_EVENT_TYPE_PPS_EVENT";
        default:
                return "Unrecognized event";
        }
@@ -188,10 +191,193 @@ static void eq_update_ci(struct mlx5_eq *eq, int arm)
        mb();
 }
 
-static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+static void eqe_pf_action(struct work_struct *work)
+{
+       struct mlx5_pagefault *pfault = container_of(work,
+                                                    struct mlx5_pagefault,
+                                                    work);
+       struct mlx5_eq *eq = pfault->eq;
+
+       mlx5_core_page_fault(eq->dev, pfault);
+       mempool_free(pfault, eq->pf_ctx.pool);
+}
+
+static void eq_pf_process(struct mlx5_eq *eq)
+{
+       struct mlx5_core_dev *dev = eq->dev;
+       struct mlx5_eqe_page_fault *pf_eqe;
+       struct mlx5_pagefault *pfault;
+       struct mlx5_eqe *eqe;
+       int set_ci = 0;
+
+       while ((eqe = next_eqe_sw(eq))) {
+               pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC);
+               if (!pfault) {
+                       schedule_work(&eq->pf_ctx.work);
+                       break;
+               }
+
+               dma_rmb();
+               pf_eqe = &eqe->data.page_fault;
+               pfault->event_subtype = eqe->sub_type;
+               pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
+
+               mlx5_core_dbg(dev,
+                             "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
+                             eqe->sub_type, pfault->bytes_committed);
+
+               switch (eqe->sub_type) {
+               case MLX5_PFAULT_SUBTYPE_RDMA:
+                       /* RDMA based event */
+                       pfault->type =
+                               be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
+                       pfault->token =
+                               be32_to_cpu(pf_eqe->rdma.pftype_token) &
+                               MLX5_24BIT_MASK;
+                       pfault->rdma.r_key =
+                               be32_to_cpu(pf_eqe->rdma.r_key);
+                       pfault->rdma.packet_size =
+                               be16_to_cpu(pf_eqe->rdma.packet_length);
+                       pfault->rdma.rdma_op_len =
+                               be32_to_cpu(pf_eqe->rdma.rdma_op_len);
+                       pfault->rdma.rdma_va =
+                               be64_to_cpu(pf_eqe->rdma.rdma_va);
+                       mlx5_core_dbg(dev,
+                                     "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
+                                     pfault->type, pfault->token,
+                                     pfault->rdma.r_key);
+                       mlx5_core_dbg(dev,
+                                     "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
+                                     pfault->rdma.rdma_op_len,
+                                     pfault->rdma.rdma_va);
+                       break;
+
+               case MLX5_PFAULT_SUBTYPE_WQE:
+                       /* WQE based event */
+                       pfault->type =
+                               be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
+                       pfault->token =
+                               be32_to_cpu(pf_eqe->wqe.token);
+                       pfault->wqe.wq_num =
+                               be32_to_cpu(pf_eqe->wqe.pftype_wq) &
+                               MLX5_24BIT_MASK;
+                       pfault->wqe.wqe_index =
+                               be16_to_cpu(pf_eqe->wqe.wqe_index);
+                       pfault->wqe.packet_size =
+                               be16_to_cpu(pf_eqe->wqe.packet_length);
+                       mlx5_core_dbg(dev,
+                                     "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
+                                     pfault->type, pfault->token,
+                                     pfault->wqe.wq_num,
+                                     pfault->wqe.wqe_index);
+                       break;
+
+               default:
+                       mlx5_core_warn(dev,
+                                      "Unsupported page fault event sub-type: 0x%02hhx\n",
+                                      eqe->sub_type);
+                       /* Unsupported page faults should still be
+                        * resolved by the page fault handler
+                        */
+               }
+
+               pfault->eq = eq;
+               INIT_WORK(&pfault->work, eqe_pf_action);
+               queue_work(eq->pf_ctx.wq, &pfault->work);
+
+               ++eq->cons_index;
+               ++set_ci;
+
+               if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
+                       eq_update_ci(eq, 0);
+                       set_ci = 0;
+               }
+       }
+
+       eq_update_ci(eq, 1);
+}
+
+static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr)
+{
+       struct mlx5_eq *eq = eq_ptr;
+       unsigned long flags;
+
+       if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) {
+               eq_pf_process(eq);
+               spin_unlock_irqrestore(&eq->pf_ctx.lock, flags);
+       } else {
+               schedule_work(&eq->pf_ctx.work);
+       }
+
+       return IRQ_HANDLED;
+}
+
+/* mempool_refill() was proposed but unfortunately wasn't accepted
+ * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
+ * Chip workaround.
+ */
+static void mempool_refill(mempool_t *pool)
+{
+       while (pool->curr_nr < pool->min_nr)
+               mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
+}
+
+static void eq_pf_action(struct work_struct *work)
+{
+       struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work);
+
+       mempool_refill(eq->pf_ctx.pool);
+
+       spin_lock_irq(&eq->pf_ctx.lock);
+       eq_pf_process(eq);
+       spin_unlock_irq(&eq->pf_ctx.lock);
+}
+
+static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name)
+{
+       spin_lock_init(&pf_ctx->lock);
+       INIT_WORK(&pf_ctx->work, eq_pf_action);
+
+       pf_ctx->wq = alloc_ordered_workqueue(name,
+                                            WQ_MEM_RECLAIM);
+       if (!pf_ctx->wq)
+               return -ENOMEM;
+
+       pf_ctx->pool = mempool_create_kmalloc_pool
+               (MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault));
+       if (!pf_ctx->pool)
+               goto err_wq;
+
+       return 0;
+err_wq:
+       destroy_workqueue(pf_ctx->wq);
+       return -ENOMEM;
+}
+
+int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
+                               u32 wq_num, u8 type, int error)
+{
+       u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
+       u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)]   = {0};
+
+       MLX5_SET(page_fault_resume_in, in, opcode,
+                MLX5_CMD_OP_PAGE_FAULT_RESUME);
+       MLX5_SET(page_fault_resume_in, in, error, !!error);
+       MLX5_SET(page_fault_resume_in, in, page_fault_type, type);
+       MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
+       MLX5_SET(page_fault_resume_in, in, token, token);
+
+       return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
+#endif
+
+static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
 {
+       struct mlx5_eq *eq = eq_ptr;
+       struct mlx5_core_dev *dev = eq->dev;
        struct mlx5_eqe *eqe;
-       int eqes_found = 0;
        int set_ci = 0;
        u32 cqn = -1;
        u32 rsn;
@@ -276,12 +462,6 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
                        }
                        break;
 
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-               case MLX5_EVENT_TYPE_PAGE_FAULT:
-                       mlx5_eq_pagefault(dev, eqe);
-                       break;
-#endif
-
 #ifdef CONFIG_MLX5_CORE_EN
                case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
                        mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
@@ -292,6 +472,10 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
                        mlx5_port_module_event(dev, eqe);
                        break;
 
+               case MLX5_EVENT_TYPE_PPS_EVENT:
+                       if (dev->event)
+                               dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe);
+                       break;
                default:
                        mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
                                       eqe->type, eq->eqn);
@@ -299,7 +483,6 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
                }
 
                ++eq->cons_index;
-               eqes_found = 1;
                ++set_ci;
 
                /* The HCA will think the queue has overflowed if we
@@ -319,17 +502,6 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
        if (cqn != -1)
                tasklet_schedule(&eq->tasklet_ctx.task);
 
-       return eqes_found;
-}
-
-static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr)
-{
-       struct mlx5_eq *eq = eq_ptr;
-       struct mlx5_core_dev *dev = eq->dev;
-
-       mlx5_eq_int(dev, eq);
-
-       /* MSI-X vectors always belong to us */
        return IRQ_HANDLED;
 }
 
@@ -345,22 +517,32 @@ static void init_eq_buf(struct mlx5_eq *eq)
 }
 
 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
-                      int nent, u64 mask, const char *name, struct mlx5_uar *uar)
+                      int nent, u64 mask, const char *name,
+                      enum mlx5_eq_type type)
 {
        u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
        struct mlx5_priv *priv = &dev->priv;
+       irq_handler_t handler;
        __be64 *pas;
        void *eqc;
        int inlen;
        u32 *in;
        int err;
 
+       eq->type = type;
        eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
        eq->cons_index = 0;
        err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
        if (err)
                return err;
 
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       if (type == MLX5_EQ_TYPE_PF)
+               handler = mlx5_eq_pf_int;
+       else
+#endif
+               handler = mlx5_eq_int;
+
        init_eq_buf(eq);
 
        inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
@@ -380,7 +562,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
 
        eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
        MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
-       MLX5_SET(eqc, eqc, uar_page, uar->index);
+       MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
        MLX5_SET(eqc, eqc, intr, vecidx);
        MLX5_SET(eqc, eqc, log_page_size,
                 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
@@ -395,8 +577,8 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
        eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
        eq->irqn = priv->msix_arr[vecidx].vector;
        eq->dev = dev;
-       eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
-       err = request_irq(eq->irqn, mlx5_msix_handler, 0,
+       eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
+       err = request_irq(eq->irqn, handler, 0,
                          priv->irq_info[vecidx].name, eq);
        if (err)
                goto err_eq;
@@ -405,11 +587,20 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
        if (err)
                goto err_irq;
 
-       INIT_LIST_HEAD(&eq->tasklet_ctx.list);
-       INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
-       spin_lock_init(&eq->tasklet_ctx.lock);
-       tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
-                    (unsigned long)&eq->tasklet_ctx);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       if (type == MLX5_EQ_TYPE_PF) {
+               err = init_pf_ctx(&eq->pf_ctx, name);
+               if (err)
+                       goto err_irq;
+       } else
+#endif
+       {
+               INIT_LIST_HEAD(&eq->tasklet_ctx.list);
+               INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
+               spin_lock_init(&eq->tasklet_ctx.lock);
+               tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
+                            (unsigned long)&eq->tasklet_ctx);
+       }
 
        /* EQs are created in ARMED state
         */
@@ -444,7 +635,16 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
                mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
                               eq->eqn);
        synchronize_irq(eq->irqn);
-       tasklet_disable(&eq->tasklet_ctx.task);
+
+       if (eq->type == MLX5_EQ_TYPE_COMP) {
+               tasklet_disable(&eq->tasklet_ctx.task);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       } else if (eq->type == MLX5_EQ_TYPE_PF) {
+               cancel_work_sync(&eq->pf_ctx.work);
+               destroy_workqueue(eq->pf_ctx.wq);
+               mempool_destroy(eq->pf_ctx.pool);
+#endif
+       }
        mlx5_buf_free(dev, &eq->buf);
 
        return err;
@@ -479,8 +679,6 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
        u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
        int err;
 
-       if (MLX5_CAP_GEN(dev, pg))
-               async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
 
        if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
            MLX5_CAP_GEN(dev, vport_group_manager) &&
@@ -492,9 +690,12 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
        else
                mlx5_core_dbg(dev, "port_module_event is not set\n");
 
+       if (MLX5_CAP_GEN(dev, pps))
+               async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
+
        err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
                                 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
-                                "mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
+                                "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
        if (err) {
                mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
                return err;
@@ -504,7 +705,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
 
        err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
                                 MLX5_NUM_ASYNC_EQE, async_event_mask,
-                                "mlx5_async_eq", &dev->priv.uuari.uars[0]);
+                                "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC);
        if (err) {
                mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
                goto err1;
@@ -514,13 +715,33 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
                                 MLX5_EQ_VEC_PAGES,
                                 /* TODO: sriov max_vf + */ 1,
                                 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
-                                &dev->priv.uuari.uars[0]);
+                                MLX5_EQ_TYPE_ASYNC);
        if (err) {
                mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
                goto err2;
        }
 
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       if (MLX5_CAP_GEN(dev, pg)) {
+               err = mlx5_create_map_eq(dev, &table->pfault_eq,
+                                        MLX5_EQ_VEC_PFAULT,
+                                        MLX5_NUM_ASYNC_EQE,
+                                        1 << MLX5_EVENT_TYPE_PAGE_FAULT,
+                                        "mlx5_page_fault_eq",
+                                        MLX5_EQ_TYPE_PF);
+               if (err) {
+                       mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
+                                      err);
+                       goto err3;
+               }
+       }
+
        return err;
+err3:
+       mlx5_destroy_unmap_eq(dev, &table->pages_eq);
+#else
+       return err;
+#endif
 
 err2:
        mlx5_destroy_unmap_eq(dev, &table->async_eq);
@@ -536,6 +757,14 @@ int mlx5_stop_eqs(struct mlx5_core_dev *dev)
        struct mlx5_eq_table *table = &dev->priv.eq_table;
        int err;
 
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       if (MLX5_CAP_GEN(dev, pg)) {
+               err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
+               if (err)
+                       return err;
+       }
+#endif
+
        err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
        if (err)
                return err;
index d0c8bf014453ea38736182c03ba7b2d9c5bcd4d7..fcd5bc7e31db5432bb8da33bc335694412e737a9 100644 (file)
@@ -979,7 +979,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
 
        MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
        match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-       MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
        MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
@@ -1098,7 +1098,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
        match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
 
        MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-       MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
        MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
        MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
@@ -1115,7 +1115,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
 
        memset(flow_group_in, 0, inlen);
        MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-       MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+       MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
 
@@ -1254,7 +1254,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
        }
 
        if (vport->info.vlan || vport->info.qos)
-               MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
+               MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
 
        if (vport->info.spoofchk) {
                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
@@ -1335,8 +1335,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
        }
 
        /* Allowed vlan rule */
-       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
-       MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
        MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
        MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
 
@@ -1415,7 +1415,7 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw)
 }
 
 static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
-                               u32 initial_max_rate)
+                               u32 initial_max_rate, u32 initial_bw_share)
 {
        u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
        struct mlx5_vport *vport = &esw->vports[vport_num];
@@ -1439,6 +1439,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
                 esw->qos.root_tsar_id);
        MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
                 initial_max_rate);
+       MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share);
 
        err = mlx5_create_scheduling_element_cmd(dev,
                                                 SCHEDULING_HIERARCHY_E_SWITCH,
@@ -1473,7 +1474,7 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
 }
 
 static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
-                               u32 max_rate)
+                               u32 max_rate, u32 bw_share)
 {
        u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
        struct mlx5_vport *vport = &esw->vports[vport_num];
@@ -1497,7 +1498,9 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
                 esw->qos.root_tsar_id);
        MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
                 max_rate);
+       MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share);
        bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+       bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
 
        err = mlx5_modify_scheduling_element_cmd(dev,
                                                 SCHEDULING_HIERARCHY_E_SWITCH,
@@ -1563,7 +1566,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
        esw_apply_vport_conf(esw, vport);
 
        /* Attach vport to the eswitch rate limiter */
-       if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate))
+       if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate,
+                                vport->qos.bw_share))
                esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
 
        /* Sync with current vport context */
@@ -1952,6 +1956,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
        ivi->qos = evport->info.qos;
        ivi->spoofchk = evport->info.spoofchk;
        ivi->trusted = evport->info.trusted;
+       ivi->min_tx_rate = evport->info.min_rate;
        ivi->max_tx_rate = evport->info.max_rate;
        mutex_unlock(&esw->state_lock);
 
@@ -2046,23 +2051,103 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
        return 0;
 }
 
-int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw,
-                               int vport, u32 max_rate)
+static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
 {
+       u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
        struct mlx5_vport *evport;
+       u32 max_guarantee = 0;
+       int i;
+
+       for (i = 0; i <= esw->total_vports; i++) {
+               evport = &esw->vports[i];
+               if (!evport->enabled || evport->info.min_rate < max_guarantee)
+                       continue;
+               max_guarantee = evport->info.min_rate;
+       }
+
+       return max_t(u32, max_guarantee / fw_max_bw_share, 1);
+}
+
+static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
+{
+       u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+       struct mlx5_vport *evport;
+       u32 vport_max_rate;
+       u32 vport_min_rate;
+       u32 bw_share;
+       int err;
+       int i;
+
+       for (i = 0; i <= esw->total_vports; i++) {
+               evport = &esw->vports[i];
+               if (!evport->enabled)
+                       continue;
+               vport_min_rate = evport->info.min_rate;
+               vport_max_rate = evport->info.max_rate;
+               bw_share = MLX5_MIN_BW_SHARE;
+
+               if (vport_min_rate)
+                       bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
+                                                        divider,
+                                                        fw_max_bw_share);
+
+               if (bw_share == evport->qos.bw_share)
+                       continue;
+
+               err = esw_vport_qos_config(esw, i, vport_max_rate,
+                                          bw_share);
+               if (!err)
+                       evport->qos.bw_share = bw_share;
+               else
+                       return err;
+       }
+
+       return 0;
+}
+
+int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
+                               u32 max_rate, u32 min_rate)
+{
+       u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+       bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
+                                       fw_max_bw_share >= MLX5_MIN_BW_SHARE;
+       bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
+       struct mlx5_vport *evport;
+       u32 previous_min_rate;
+       u32 divider;
        int err = 0;
 
        if (!ESW_ALLOWED(esw))
                return -EPERM;
        if (!LEGAL_VPORT(esw, vport))
                return -EINVAL;
+       if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
+               return -EOPNOTSUPP;
 
        mutex_lock(&esw->state_lock);
        evport = &esw->vports[vport];
-       err = esw_vport_qos_config(esw, vport, max_rate);
+
+       if (min_rate == evport->info.min_rate)
+               goto set_max_rate;
+
+       previous_min_rate = evport->info.min_rate;
+       evport->info.min_rate = min_rate;
+       divider = calculate_vports_min_rate_divider(esw);
+       err = normalize_vports_min_rate(esw, divider);
+       if (err) {
+               evport->info.min_rate = previous_min_rate;
+               goto unlock;
+       }
+
+set_max_rate:
+       if (max_rate == evport->info.max_rate)
+               goto unlock;
+
+       err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share);
        if (!err)
                evport->info.max_rate = max_rate;
 
+unlock:
        mutex_unlock(&esw->state_lock);
        return err;
 }
index 8661dd3f542c4cda5d875720eeb8c1042d5621da..5b78883d565413ec59a00ecba4ddb483e4eecd3f 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/if_ether.h>
 #include <linux/if_link.h>
 #include <net/devlink.h>
+#include <net/ip_tunnels.h>
 #include <linux/mlx5/device.h>
 
 #define MLX5_MAX_UC_PER_VPORT(dev) \
 
 #define FDB_UPLINK_VPORT 0xffff
 
+#define MLX5_MIN_BW_SHARE 1
+
+#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
+       min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit)
+
 /* L2 -mac address based- hash helpers */
 struct l2addr_node {
        struct hlist_node hlist;
@@ -115,6 +121,7 @@ struct mlx5_vport_info {
        u8                      qos;
        u64                     node_guid;
        int                     link_state;
+       u32                     min_rate;
        u32                     max_rate;
        bool                    spoofchk;
        bool                    trusted;
@@ -137,6 +144,7 @@ struct mlx5_vport {
        struct {
                bool            enabled;
                u32             esw_tsar_ix;
+               u32             bw_share;
        } qos;
 
        bool                    enabled;
@@ -248,8 +256,8 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
                                    int vport, bool spoofchk);
 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
                                 int vport_num, bool setting);
-int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw,
-                               int vport, u32 max_rate);
+int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
+                               u32 max_rate, u32 min_rate);
 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
                                  int vport, struct ifla_vf_info *ivi);
 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
@@ -274,18 +282,12 @@ enum {
 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP  0x40
 #define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
 
-struct mlx5_encap_info {
-       __be32 daddr;
-       __be32 tun_id;
-       __be16 tp_dst;
-};
-
 struct mlx5_encap_entry {
        struct hlist_node encap_hlist;
        struct list_head flows;
        u32 encap_id;
        struct neighbour *n;
-       struct mlx5_encap_info tun_info;
+       struct ip_tunnel_info tun_info;
        unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
 
        struct net_device *out_dev;
index 595f7c7383b399440aedec593ae0fb0c37bb6748..4f5b0d47d5f38237129a7c90a1240b8615615d32 100644 (file)
@@ -402,19 +402,18 @@ out:
 }
 
 #define MAX_PF_SQ 256
-#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
 #define ESW_OFFLOADS_NUM_GROUPS  4
 
 static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
 {
        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       int table_size, ix, esw_size, err = 0;
        struct mlx5_core_dev *dev = esw->dev;
        struct mlx5_flow_namespace *root_ns;
        struct mlx5_flow_table *fdb = NULL;
        struct mlx5_flow_group *g;
        u32 *flow_group_in;
        void *match_criteria;
-       int table_size, ix, err = 0;
        u32 flags = 0;
 
        flow_group_in = mlx5_vzalloc(inlen);
@@ -428,15 +427,19 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
                goto ns_err;
        }
 
-       esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
-                 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+       esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
+                 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
+                 MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS);
+
+       esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS,
+                        1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
 
        if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
            MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
                flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
 
        fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
-                                                 ESW_OFFLOADS_NUM_ENTRIES,
+                                                 esw_size,
                                                  ESW_OFFLOADS_NUM_GROUPS, 0,
                                                  flags);
        if (IS_ERR(fdb)) {
index b53fc85a2375778ddd02ac07d21d88b56c49e432..b64a781c7e855fd1d38cb7303d26a27073626435 100644 (file)
@@ -473,10 +473,13 @@ int mlx5_encap_alloc(struct mlx5_core_dev *dev,
        int err;
        u32 *in;
 
-       if (size > MLX5_CAP_ESW(dev, max_encap_header_size))
+       if (size > max_encap_size) {
+               mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
+                              size, max_encap_size);
                return -EINVAL;
+       }
 
-       in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + max_encap_size,
+       in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size,
                     GFP_KERNEL);
        if (!in)
                return -ENOMEM;
index 6346a8f5883bcc911ef422cf572fd1891ddf73c9..ce3d92106386b31d5aa44dae7a347c9d0830c49e 100644 (file)
@@ -1665,7 +1665,7 @@ static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
 
 #define FLOW_TABLE_BIT_SZ 1
 #define GET_FLOW_TABLE_CAP(dev, offset) \
-       ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) +    \
+       ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) +    \
                        offset / 32)) >>                                        \
          (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
index 5718aada66055c9d439ccac237f287f834ab5a2c..d0bbefa08af78cac0b5b095ea283ab6e50ecd7e4 100644 (file)
@@ -91,6 +91,20 @@ out:
 }
 EXPORT_SYMBOL(mlx5_core_query_vendor_id);
 
+static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev)
+{
+       return mlx5_query_pcam_reg(dev, dev->caps.pcam,
+                                  MLX5_PCAM_FEATURE_ENHANCED_FEATURES,
+                                  MLX5_PCAM_REGS_5000_TO_507F);
+}
+
+static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
+{
+       return mlx5_query_mcam_reg(dev, dev->caps.mcam,
+                                  MLX5_MCAM_FEATURE_ENHANCED_FEATURES,
+                                  MLX5_MCAM_REGS_FIRST_128);
+}
+
 int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
 {
        int err;
@@ -154,6 +168,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
                        return err;
        }
 
+       if (MLX5_CAP_GEN(dev, pcam_reg))
+               mlx5_get_pcam_reg(dev);
+
+       if (MLX5_CAP_GEN(dev, mcam_reg))
+               mlx5_get_mcam_reg(dev);
+
        return 0;
 }
 
index 5bcf93422ee0b28337040138d026c2ab443642a9..d0515391d33bbc57961311f648ed5ef25f457c28 100644 (file)
@@ -231,21 +231,6 @@ static const char *hsynd_str(u8 synd)
        }
 }
 
-static u16 get_maj(u32 fw)
-{
-       return fw >> 28;
-}
-
-static u16 get_min(u32 fw)
-{
-       return fw >> 16 & 0xfff;
-}
-
-static u16 get_sub(u32 fw)
-{
-       return fw & 0xffff;
-}
-
 static void print_health_info(struct mlx5_core_dev *dev)
 {
        struct mlx5_core_health *health = &dev->priv.health;
@@ -263,13 +248,14 @@ static void print_health_info(struct mlx5_core_dev *dev)
 
        dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
        dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
-       fw = ioread32be(&h->fw_ver);
-       sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw));
+       sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
        dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
        dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
        dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
        dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
        dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
+       fw = ioread32be(&h->fw_ver);
+       dev_err(&dev->pdev->dev, "raw fw_ver 0x%08x\n", fw);
 }
 
 static unsigned long get_next_poll_jiffies(void)
index 3c315eb8d270f6f94ecaea2c8ee4d78ed1244658..c4242a4e81309f0d90a0cae8bdfc09fd39da5649 100644 (file)
@@ -152,6 +152,26 @@ static struct mlx5_profile profile[] = {
                        .size   = 8,
                        .limit  = 4
                },
+               .mr_cache[16]   = {
+                       .size   = 8,
+                       .limit  = 4
+               },
+               .mr_cache[17]   = {
+                       .size   = 8,
+                       .limit  = 4
+               },
+               .mr_cache[18]   = {
+                       .size   = 8,
+                       .limit  = 4
+               },
+               .mr_cache[19]   = {
+                       .size   = 4,
+                       .limit  = 2
+               },
+               .mr_cache[20]   = {
+                       .size   = 4,
+                       .limit  = 2
+               },
        },
 };
 
@@ -398,11 +418,11 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
 
        switch (cap_mode) {
        case HCA_CAP_OPMOD_GET_MAX:
-               memcpy(dev->hca_caps_max[cap_type], hca_caps,
+               memcpy(dev->caps.hca_max[cap_type], hca_caps,
                       MLX5_UN_SZ_BYTES(hca_cap_union));
                break;
        case HCA_CAP_OPMOD_GET_CUR:
-               memcpy(dev->hca_caps_cur[cap_type], hca_caps,
+               memcpy(dev->caps.hca_cur[cap_type], hca_caps,
                       MLX5_UN_SZ_BYTES(hca_cap_union));
                break;
        default:
@@ -493,7 +513,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
 
        set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
                                   capability);
-       memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
+       memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL],
               MLX5_ST_SZ_BYTES(cmd_hca_cap));
 
        mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
@@ -517,8 +537,18 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        /* disable cmdif checksum */
        MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
 
+       /* If the HCA supports 4K UARs use it */
+       if (MLX5_CAP_GEN_MAX(dev, uar_4k))
+               MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
+
        MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
 
+       if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte))
+               MLX5_SET(cmd_hca_cap,
+                        set_hca_cap,
+                        cache_line_128byte,
+                        cache_line_size() == 128 ? 1 : 0);
+
        err = set_caps(dev, set_ctx, set_sz,
                       MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
 
@@ -739,7 +769,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
                snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
                err = mlx5_create_map_eq(dev, eq,
                                         i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
-                                        name, &dev->priv.uuari.uars[0]);
+                                        name, MLX5_EQ_TYPE_COMP);
                if (err) {
                        kfree(eq);
                        goto clean;
@@ -899,8 +929,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
                goto out;
        }
 
-       MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
-
        err = mlx5_init_cq_table(dev);
        if (err) {
                dev_err(&pdev->dev, "failed to initialize cq table\n");
@@ -1079,8 +1107,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
                goto err_cleanup_once;
        }
 
-       err = mlx5_alloc_uuars(dev, &priv->uuari);
-       if (err) {
+       dev->priv.uar = mlx5_get_uars_page(dev);
+       if (!dev->priv.uar) {
                dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
                goto err_disable_msix;
        }
@@ -1088,7 +1116,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
        err = mlx5_start_eqs(dev);
        if (err) {
                dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
-               goto err_free_uar;
+               goto err_put_uars;
        }
 
        err = alloc_comp_eqs(dev);
@@ -1154,8 +1182,8 @@ err_affinity_hints:
 err_stop_eqs:
        mlx5_stop_eqs(dev);
 
-err_free_uar:
-       mlx5_free_uuars(dev, &priv->uuari);
+err_put_uars:
+       mlx5_put_uars_page(dev, priv->uar);
 
 err_disable_msix:
        mlx5_disable_msix(dev);
@@ -1218,7 +1246,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
        mlx5_irq_clear_affinity_hints(dev);
        free_comp_eqs(dev);
        mlx5_stop_eqs(dev);
-       mlx5_free_uuars(dev, &priv->uuari);
+       mlx5_put_uars_page(dev, priv->uar);
        mlx5_disable_msix(dev);
        if (cleanup)
                mlx5_cleanup_once(dev);
@@ -1284,10 +1312,24 @@ static int init_one(struct pci_dev *pdev,
        spin_lock_init(&priv->ctx_lock);
        mutex_init(&dev->pci_status_mutex);
        mutex_init(&dev->intf_state_mutex);
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       err = init_srcu_struct(&priv->pfault_srcu);
+       if (err) {
+               dev_err(&pdev->dev, "init_srcu_struct failed with error code %d\n",
+                       err);
+               goto clean_dev;
+       }
+#endif
+       mutex_init(&priv->bfregs.reg_head.lock);
+       mutex_init(&priv->bfregs.wc_head.lock);
+       INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
+       INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
+
        err = mlx5_pci_init(dev, priv);
        if (err) {
                dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
-               goto clean_dev;
+               goto clean_srcu;
        }
 
        err = mlx5_health_init(dev);
@@ -1304,9 +1346,7 @@ static int init_one(struct pci_dev *pdev,
                goto clean_health;
        }
 
-       err = request_module_nowait(MLX5_IB_MOD);
-       if (err)
-               pr_info("failed request module on %s\n", MLX5_IB_MOD);
+       request_module_nowait(MLX5_IB_MOD);
 
        err = devlink_register(devlink, &pdev->dev);
        if (err)
@@ -1321,7 +1361,11 @@ clean_health:
        mlx5_health_cleanup(dev);
 close_pci:
        mlx5_pci_close(dev, priv);
+clean_srcu:
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       cleanup_srcu_struct(&priv->pfault_srcu);
 clean_dev:
+#endif
        pci_set_drvdata(pdev, NULL);
        devlink_free(devlink);
 
@@ -1346,6 +1390,9 @@ static void remove_one(struct pci_dev *pdev)
        mlx5_pagealloc_cleanup(dev);
        mlx5_health_cleanup(dev);
        mlx5_pci_close(dev, priv);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       cleanup_srcu_struct(&priv->pfault_srcu);
+#endif
        pci_set_drvdata(pdev, NULL);
        devlink_free(devlink);
 }
index d4a99c9757cbef7bd0a102da9e5277088ef31720..b3dabe6e88366133fd07dab68f059d4f5d7e5e3a 100644 (file)
@@ -86,6 +86,8 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
                     unsigned long param);
+void mlx5_core_page_fault(struct mlx5_core_dev *dev,
+                         struct mlx5_pagefault *pfault);
 void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
 void mlx5_enter_error_state(struct mlx5_core_dev *dev);
 void mlx5_disable_device(struct mlx5_core_dev *dev);
@@ -111,6 +113,11 @@ u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
 struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
 void mlx5_cq_tasklet_cb(unsigned long data);
 
+int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
+                       u8 access_reg_group);
+int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
+                       u8 access_reg_group);
+
 void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
 void mlx5_lag_remove(struct mlx5_core_dev *dev);
 
@@ -136,6 +143,11 @@ void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
 
 bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
 
+int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
+int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
+int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
+int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
+
 void mlx5e_init(void);
 void mlx5e_cleanup(void);
 
index fd12e0a377a567c693c7f174d7762dd6071ff925..141583daf5a279b5f54d0934a527a14cda86f167 100644 (file)
@@ -74,6 +74,30 @@ out:
 }
 EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
 
+int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
+                       u8 access_reg_group)
+{
+       u32 in[MLX5_ST_SZ_DW(pcam_reg)] = {0};
+       int sz = MLX5_ST_SZ_BYTES(pcam_reg);
+
+       MLX5_SET(pcam_reg, in, feature_group, feature_group);
+       MLX5_SET(pcam_reg, in, access_reg_group, access_reg_group);
+
+       return mlx5_core_access_reg(dev, in, sz, pcam, sz, MLX5_REG_PCAM, 0, 0);
+}
+
+int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcam, u8 feature_group,
+                       u8 access_reg_group)
+{
+       u32 in[MLX5_ST_SZ_DW(mcam_reg)] = {0};
+       int sz = MLX5_ST_SZ_BYTES(mcam_reg);
+
+       MLX5_SET(mcam_reg, in, feature_group, feature_group);
+       MLX5_SET(mcam_reg, in, access_reg_group, access_reg_group);
+
+       return mlx5_core_access_reg(dev, in, sz, mcam, sz, MLX5_REG_MCAM, 0, 0);
+}
+
 struct mlx5_reg_pcap {
        u8                      rsvd0;
        u8                      port_num;
@@ -866,3 +890,51 @@ void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
                               module_num, mlx5_pme_status[module_status - 1],
                               mlx5_pme_error[error_type]);
 }
+
+int mlx5_query_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size)
+{
+       u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+
+       return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
+                                   mtpps_size, MLX5_REG_MTPPS, 0, 0);
+}
+
+int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size)
+{
+       u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+
+       return mlx5_core_access_reg(mdev, mtpps, mtpps_size, out,
+                                   sizeof(out), MLX5_REG_MTPPS, 0, 1);
+}
+
+int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode)
+{
+       u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
+       u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
+       int err = 0;
+
+       MLX5_SET(mtppse_reg, in, pin, pin);
+
+       err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_MTPPSE, 0, 0);
+       if (err)
+               return err;
+
+       *arm = MLX5_GET(mtppse_reg, in, event_arm);
+       *mode = MLX5_GET(mtppse_reg, in, event_generation_mode);
+
+       return err;
+}
+
+int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode)
+{
+       u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
+       u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
+
+       MLX5_SET(mtppse_reg, in, pin, pin);
+       MLX5_SET(mtppse_reg, in, event_arm, arm);
+       MLX5_SET(mtppse_reg, in, event_generation_mode, mode);
+
+       return mlx5_core_access_reg(mdev, in, sizeof(in), out,
+                                   sizeof(out), MLX5_REG_MTPPSE, 0, 1);
+}
index d0a4005fe63a425869b71feb3626e998a34b0d39..cbbcef2884be46bf8835e01e3b364b8a82d0b31e 100644 (file)
@@ -143,95 +143,6 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
        mlx5_core_put_rsc(common);
 }
 
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
-{
-       struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault;
-       int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK;
-       struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn);
-       struct mlx5_core_qp *qp =
-               container_of(common, struct mlx5_core_qp, common);
-       struct mlx5_pagefault pfault;
-
-       if (!qp) {
-               mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n",
-                              qpn);
-               return;
-       }
-
-       pfault.event_subtype = eqe->sub_type;
-       pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) &
-               (MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA);
-       pfault.bytes_committed = be32_to_cpu(
-               pf_eqe->bytes_committed);
-
-       mlx5_core_dbg(dev,
-                     "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
-                     eqe->sub_type, pfault.flags);
-
-       switch (eqe->sub_type) {
-       case MLX5_PFAULT_SUBTYPE_RDMA:
-               /* RDMA based event */
-               pfault.rdma.r_key =
-                       be32_to_cpu(pf_eqe->rdma.r_key);
-               pfault.rdma.packet_size =
-                       be16_to_cpu(pf_eqe->rdma.packet_length);
-               pfault.rdma.rdma_op_len =
-                       be32_to_cpu(pf_eqe->rdma.rdma_op_len);
-               pfault.rdma.rdma_va =
-                       be64_to_cpu(pf_eqe->rdma.rdma_va);
-               mlx5_core_dbg(dev,
-                             "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
-                             qpn, pfault.rdma.r_key);
-               mlx5_core_dbg(dev,
-                             "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
-                             pfault.rdma.rdma_op_len);
-               mlx5_core_dbg(dev,
-                             "PAGE_FAULT: rdma_va: 0x%016llx,\n",
-                             pfault.rdma.rdma_va);
-               mlx5_core_dbg(dev,
-                             "PAGE_FAULT: bytes_committed: 0x%06x\n",
-                             pfault.bytes_committed);
-               break;
-
-       case MLX5_PFAULT_SUBTYPE_WQE:
-               /* WQE based event */
-               pfault.wqe.wqe_index =
-                       be16_to_cpu(pf_eqe->wqe.wqe_index);
-               pfault.wqe.packet_size =
-                       be16_to_cpu(pf_eqe->wqe.packet_length);
-               mlx5_core_dbg(dev,
-                             "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
-                             qpn, pfault.wqe.wqe_index);
-               mlx5_core_dbg(dev,
-                             "PAGE_FAULT: bytes_committed: 0x%06x\n",
-                             pfault.bytes_committed);
-               break;
-
-       default:
-               mlx5_core_warn(dev,
-                              "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
-                              eqe->sub_type, qpn);
-               /* Unsupported page faults should still be resolved by the
-                * page fault handler
-                */
-       }
-
-       if (qp->pfault_handler) {
-               qp->pfault_handler(qp, &pfault);
-       } else {
-               mlx5_core_err(dev,
-                             "ODP event for QP %08x, without a fault handler in QP\n",
-                             qpn);
-               /* Page fault will remain unresolved. QP will hang until it is
-                * destroyed
-                */
-       }
-
-       mlx5_core_put_rsc(common);
-}
-#endif
-
 static int create_qprqsq_common(struct mlx5_core_dev *dev,
                                struct mlx5_core_qp *qp,
                                int rsc_type)
@@ -506,31 +417,6 @@ int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
 }
 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
 
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
-                               u8 flags, int error)
-{
-       u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
-       u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)]   = {0};
-
-       MLX5_SET(page_fault_resume_in, in, opcode,
-                MLX5_CMD_OP_PAGE_FAULT_RESUME);
-       MLX5_SET(page_fault_resume_in, in, qpn, qpn);
-
-       if (flags & MLX5_PAGE_FAULT_RESUME_REQUESTOR)
-               MLX5_SET(page_fault_resume_in, in, req_res, 1);
-       if (flags & MLX5_PAGE_FAULT_RESUME_WRITE)
-               MLX5_SET(page_fault_resume_in, in, read_write, 1);
-       if (flags & MLX5_PAGE_FAULT_RESUME_RDMA)
-               MLX5_SET(page_fault_resume_in, in, rdma, 1);
-       if (error)
-               MLX5_SET(page_fault_resume_in, in, error, 1);
-
-       return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
-#endif
-
 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
                                struct mlx5_core_qp *rq)
 {
index ab0b896621a0452badce6fcab41dd07d1f9cf7d3..2e6b0f290ddc2cbf3beeb2f5c1fe813378691c69 100644 (file)
 #include <linux/mlx5/cmd.h>
 #include "mlx5_core.h"
 
-enum {
-       NUM_DRIVER_UARS         = 4,
-       NUM_LOW_LAT_UUARS       = 4,
-};
-
 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
 {
        u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
@@ -67,167 +62,269 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
 }
 EXPORT_SYMBOL(mlx5_cmd_free_uar);
 
-static int need_uuar_lock(int uuarn)
+static int uars_per_sys_page(struct mlx5_core_dev *mdev)
 {
-       int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
-
-       if (uuarn == 0 || tot_uuars - NUM_LOW_LAT_UUARS)
-               return 0;
+       if (MLX5_CAP_GEN(mdev, uar_4k))
+               return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
 
        return 1;
 }
 
-int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
+static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
 {
-       int tot_uuars = NUM_DRIVER_UARS * MLX5_BF_REGS_PER_PAGE;
-       struct mlx5_bf *bf;
-       phys_addr_t addr;
-       int err;
+       u32 system_page_index;
+
+       if (MLX5_CAP_GEN(mdev, uar_4k))
+               system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT);
+       else
+               system_page_index = index;
+
+       return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index;
+}
+
+static void up_rel_func(struct kref *kref)
+{
+       struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
+
+       list_del(&up->list);
+       if (mlx5_cmd_free_uar(up->mdev, up->index))
+               mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
+       kfree(up->reg_bitmap);
+       kfree(up->fp_bitmap);
+       kfree(up);
+}
+
+static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
+                                             bool map_wc)
+{
+       struct mlx5_uars_page *up;
+       int err = -ENOMEM;
+       phys_addr_t pfn;
+       int bfregs;
        int i;
 
-       uuari->num_uars = NUM_DRIVER_UARS;
-       uuari->num_low_latency_uuars = NUM_LOW_LAT_UUARS;
+       bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
+       up = kzalloc(sizeof(*up), GFP_KERNEL);
+       if (!up)
+               return ERR_PTR(err);
 
-       mutex_init(&uuari->lock);
-       uuari->uars = kcalloc(uuari->num_uars, sizeof(*uuari->uars), GFP_KERNEL);
-       if (!uuari->uars)
-               return -ENOMEM;
+       up->mdev = mdev;
+       up->reg_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL);
+       if (!up->reg_bitmap)
+               goto error1;
 
-       uuari->bfs = kcalloc(tot_uuars, sizeof(*uuari->bfs), GFP_KERNEL);
-       if (!uuari->bfs) {
-               err = -ENOMEM;
-               goto out_uars;
-       }
+       up->fp_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL);
+       if (!up->fp_bitmap)
+               goto error1;
 
-       uuari->bitmap = kcalloc(BITS_TO_LONGS(tot_uuars), sizeof(*uuari->bitmap),
-                               GFP_KERNEL);
-       if (!uuari->bitmap) {
-               err = -ENOMEM;
-               goto out_bfs;
-       }
+       for (i = 0; i < bfregs; i++)
+               if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR)
+                       set_bit(i, up->reg_bitmap);
+               else
+                       set_bit(i, up->fp_bitmap);
 
-       uuari->count = kcalloc(tot_uuars, sizeof(*uuari->count), GFP_KERNEL);
-       if (!uuari->count) {
-               err = -ENOMEM;
-               goto out_bitmap;
-       }
+       up->bfregs = bfregs;
+       up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
+       up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
 
-       for (i = 0; i < uuari->num_uars; i++) {
-               err = mlx5_cmd_alloc_uar(dev, &uuari->uars[i].index);
-               if (err)
-                       goto out_count;
+       err = mlx5_cmd_alloc_uar(mdev, &up->index);
+       if (err) {
+               mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
+               goto error1;
+       }
 
-               addr = dev->iseg_base + ((phys_addr_t)(uuari->uars[i].index) << PAGE_SHIFT);
-               uuari->uars[i].map = ioremap(addr, PAGE_SIZE);
-               if (!uuari->uars[i].map) {
-                       mlx5_cmd_free_uar(dev, uuari->uars[i].index);
+       pfn = uar2pfn(mdev, up->index);
+       if (map_wc) {
+               up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
+               if (!up->map) {
+                       err = -EAGAIN;
+                       goto error2;
+               }
+       } else {
+               up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+               if (!up->map) {
                        err = -ENOMEM;
-                       goto out_count;
+                       goto error2;
                }
-               mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n",
-                             uuari->uars[i].index, uuari->uars[i].map);
-       }
-
-       for (i = 0; i < tot_uuars; i++) {
-               bf = &uuari->bfs[i];
-
-               bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
-               bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
-               bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
-               bf->reg = NULL; /* Add WC support */
-               bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
-                            (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
-                            MLX5_BF_OFFSET;
-               bf->need_lock = need_uuar_lock(i);
-               spin_lock_init(&bf->lock);
-               spin_lock_init(&bf->lock32);
-               bf->uuarn = i;
        }
+       kref_init(&up->ref_count);
+       mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
+                     up->index, up->bfregs);
+       return up;
+
+error2:
+       if (mlx5_cmd_free_uar(mdev, up->index))
+               mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
+error1:
+       kfree(up->fp_bitmap);
+       kfree(up->reg_bitmap);
+       kfree(up);
+       return ERR_PTR(err);
+}
 
-       return 0;
-
-out_count:
-       for (i--; i >= 0; i--) {
-               iounmap(uuari->uars[i].map);
-               mlx5_cmd_free_uar(dev, uuari->uars[i].index);
+struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
+{
+       struct mlx5_uars_page *ret;
+
+       mutex_lock(&mdev->priv.bfregs.reg_head.lock);
+       if (list_empty(&mdev->priv.bfregs.reg_head.list)) {
+               ret = alloc_uars_page(mdev, false);
+               if (IS_ERR(ret)) {
+                       ret = NULL;
+                       goto out;
+               }
+               list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
+       } else {
+               ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
+                                      struct mlx5_uars_page, list);
+               kref_get(&ret->ref_count);
        }
-       kfree(uuari->count);
+out:
+       mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
 
-out_bitmap:
-       kfree(uuari->bitmap);
-
-out_bfs:
-       kfree(uuari->bfs);
+       return ret;
+}
+EXPORT_SYMBOL(mlx5_get_uars_page);
 
-out_uars:
-       kfree(uuari->uars);
-       return err;
+void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
+{
+       mutex_lock(&mdev->priv.bfregs.reg_head.lock);
+       kref_put(&up->ref_count, up_rel_func);
+       mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
 }
+EXPORT_SYMBOL(mlx5_put_uars_page);
 
-int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
+static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
 {
-       int i = uuari->num_uars;
+       /* return the offset in bytes from the start of the page to the
+        * blue flame area of the UAR
+        */
+       return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
+              (dbi % MLX5_BFREGS_PER_UAR) *
+              (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
+}
 
-       for (i--; i >= 0; i--) {
-               iounmap(uuari->uars[i].map);
-               mlx5_cmd_free_uar(dev, uuari->uars[i].index);
+static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
+                      bool map_wc, bool fast_path)
+{
+       struct mlx5_bfreg_data *bfregs;
+       struct mlx5_uars_page *up;
+       struct list_head *head;
+       unsigned long *bitmap;
+       unsigned int *avail;
+       struct mutex *lock;  /* pointer to right mutex */
+       int dbi;
+
+       bfregs = &mdev->priv.bfregs;
+       if (map_wc) {
+               head = &bfregs->wc_head.list;
+               lock = &bfregs->wc_head.lock;
+       } else {
+               head = &bfregs->reg_head.list;
+               lock = &bfregs->reg_head.lock;
        }
-
-       kfree(uuari->count);
-       kfree(uuari->bitmap);
-       kfree(uuari->bfs);
-       kfree(uuari->uars);
+       mutex_lock(lock);
+       if (list_empty(head)) {
+               up = alloc_uars_page(mdev, map_wc);
+               if (IS_ERR(up)) {
+                       mutex_unlock(lock);
+                       return PTR_ERR(up);
+               }
+               list_add(&up->list, head);
+       } else {
+               up = list_entry(head->next, struct mlx5_uars_page, list);
+               kref_get(&up->ref_count);
+       }
+       if (fast_path) {
+               bitmap = up->fp_bitmap;
+               avail = &up->fp_avail;
+       } else {
+               bitmap = up->reg_bitmap;
+               avail = &up->reg_avail;
+       }
+       dbi = find_first_bit(bitmap, up->bfregs);
+       clear_bit(dbi, bitmap);
+       (*avail)--;
+       if (!(*avail))
+               list_del(&up->list);
+
+       bfreg->map = up->map + map_offset(mdev, dbi);
+       bfreg->up = up;
+       bfreg->wc = map_wc;
+       bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR;
+       mutex_unlock(lock);
 
        return 0;
 }
 
-int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
-                      bool map_wc)
+int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
+                    bool map_wc, bool fast_path)
 {
-       phys_addr_t pfn;
-       phys_addr_t uar_bar_start;
        int err;
 
-       err = mlx5_cmd_alloc_uar(mdev, &uar->index);
-       if (err) {
-               mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
-               return err;
-       }
+       err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
+       if (!err)
+               return 0;
 
-       uar_bar_start = pci_resource_start(mdev->pdev, 0);
-       pfn           = (uar_bar_start >> PAGE_SHIFT) + uar->index;
+       if (err == -EAGAIN && map_wc)
+               return alloc_bfreg(mdev, bfreg, false, fast_path);
 
-       if (map_wc) {
-               uar->bf_map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
-               if (!uar->bf_map) {
-                       mlx5_core_warn(mdev, "ioremap_wc() failed\n");
-                       uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
-                       if (!uar->map)
-                               goto err_free_uar;
-               }
-       } else {
-               uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
-               if (!uar->map)
-                       goto err_free_uar;
-       }
+       return err;
+}
+EXPORT_SYMBOL(mlx5_alloc_bfreg);
 
-       return 0;
+static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev,
+                                          struct mlx5_uars_page *up,
+                                          struct mlx5_sq_bfreg *bfreg)
+{
+       unsigned int uar_idx;
+       unsigned int bfreg_idx;
+       unsigned int bf_reg_size;
 
-err_free_uar:
-       mlx5_core_warn(mdev, "ioremap() failed\n");
-       err = -ENOMEM;
-       mlx5_cmd_free_uar(mdev, uar->index);
+       bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size);
 
-       return err;
+       uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
+       bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
+
+       return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx;
 }
-EXPORT_SYMBOL(mlx5_alloc_map_uar);
 
-void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
 {
-       if (uar->map)
-               iounmap(uar->map);
-       else
-               iounmap(uar->bf_map);
-       mlx5_cmd_free_uar(mdev, uar->index);
+       struct mlx5_bfreg_data *bfregs;
+       struct mlx5_uars_page *up;
+       struct mutex *lock; /* pointer to right mutex */
+       unsigned int dbi;
+       bool fp;
+       unsigned int *avail;
+       unsigned long *bitmap;
+       struct list_head *head;
+
+       bfregs = &mdev->priv.bfregs;
+       if (bfreg->wc) {
+               head = &bfregs->wc_head.list;
+               lock = &bfregs->wc_head.lock;
+       } else {
+               head = &bfregs->reg_head.list;
+               lock = &bfregs->reg_head.lock;
+       }
+       up = bfreg->up;
+       dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
+       fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR;
+       if (fp) {
+               avail = &up->fp_avail;
+               bitmap = up->fp_bitmap;
+       } else {
+               avail = &up->reg_avail;
+               bitmap = up->reg_bitmap;
+       }
+       mutex_lock(lock);
+       (*avail)++;
+       set_bit(dbi, bitmap);
+       if (*avail == 1)
+               list_add_tail(&up->list, head);
+
+       kref_put(&up->ref_count, up_rel_func);
+       mutex_unlock(lock);
 }
-EXPORT_SYMBOL(mlx5_unmap_free_uar);
+EXPORT_SYMBOL(mlx5_free_bfreg);
index 7129c30a2ab477d23be1b8b8d34e7190618e0f9f..15c2294dd2b40eb88cae6b2bfa6c27c700d641bc 100644 (file)
@@ -127,6 +127,23 @@ int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
 }
 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
 
+void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
+                          u8 *min_inline_mode)
+{
+       switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
+       case MLX5_CAP_INLINE_MODE_L2:
+               *min_inline_mode = MLX5_INLINE_MODE_L2;
+               break;
+       case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+               mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
+               break;
+       case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
+               *min_inline_mode = MLX5_INLINE_MODE_NONE;
+               break;
+       }
+}
+EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
+
 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
                                     u16 vport, u8 min_inline)
 {
index 16f44b9aa07611cddf35b128d4e75fda20857161..ef23eaedc2ff44c61e9dc9dadee0ba3e515fbefe 100644 (file)
@@ -73,6 +73,8 @@ config MLXSW_SWITCHX2
 config MLXSW_SPECTRUM
        tristate "Mellanox Technologies Spectrum support"
        depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q
+       depends on PSAMPLE || PSAMPLE=n
+       select PARMAN
        default m
        ---help---
          This driver supports Mellanox Technologies Spectrum Ethernet
index fe8dadba15abe7dfd9e706364ad3497abbef7b14..6b6c30deee83ca289ba0bfcb924678efe55e65e7 100644 (file)
@@ -1,5 +1,6 @@
 obj-$(CONFIG_MLXSW_CORE)       += mlxsw_core.o
-mlxsw_core-objs                        := core.o
+mlxsw_core-objs                        := core.o core_acl_flex_keys.o \
+                                  core_acl_flex_actions.o
 mlxsw_core-$(CONFIG_MLXSW_CORE_HWMON) += core_hwmon.o
 mlxsw_core-$(CONFIG_MLXSW_CORE_THERMAL) += core_thermal.o
 obj-$(CONFIG_MLXSW_PCI)                += mlxsw_pci.o
@@ -13,7 +14,8 @@ mlxsw_switchx2-objs           := switchx2.o
 obj-$(CONFIG_MLXSW_SPECTRUM)   += mlxsw_spectrum.o
 mlxsw_spectrum-objs            := spectrum.o spectrum_buffers.o \
                                   spectrum_switchdev.o spectrum_router.o \
-                                  spectrum_kvdl.o
+                                  spectrum_kvdl.o spectrum_acl_tcam.o \
+                                  spectrum_acl.o spectrum_flower.o
 mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB)    += spectrum_dcb.o
 obj-$(CONFIG_MLXSW_MINIMAL)    += mlxsw_minimal.o
 mlxsw_minimal-objs             := minimal.o
index 56e19b0d2f8f272d9833159264b2f139c5cee13b..a1b48421648a3c11e25e7a5c148a25bf07d322c0 100644 (file)
@@ -1132,12 +1132,12 @@ static inline int mlxsw_cmd_sw2hw_eq(struct mlxsw_core *mlxsw_core,
  */
 MLXSW_ITEM32(cmd_mbox, sw2hw_eq, int_msix, 0x00, 24, 1);
 
-/* cmd_mbox_sw2hw_eq_int_oi
+/* cmd_mbox_sw2hw_eq_oi
  * When set, overrun ignore is enabled.
  */
 MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
 
-/* cmd_mbox_sw2hw_eq_int_st
+/* cmd_mbox_sw2hw_eq_st
  * Event delivery state machine
  * 0x0 - FIRED
  * 0x1 - ARMED (Request for Notification)
@@ -1146,19 +1146,19 @@ MLXSW_ITEM32(cmd_mbox, sw2hw_eq, oi, 0x00, 12, 1);
  */
 MLXSW_ITEM32(cmd_mbox, sw2hw_eq, st, 0x00, 8, 2);
 
-/* cmd_mbox_sw2hw_eq_int_log_eq_size
+/* cmd_mbox_sw2hw_eq_log_eq_size
  * Log (base 2) of the EQ size (in entries).
  */
 MLXSW_ITEM32(cmd_mbox, sw2hw_eq, log_eq_size, 0x00, 0, 4);
 
-/* cmd_mbox_sw2hw_eq_int_producer_counter
+/* cmd_mbox_sw2hw_eq_producer_counter
  * Producer Counter. The counter is incremented for each EQE that is written
  * by the HW to the EQ.
  * Maintained by HW (valid for the QUERY_EQ command only)
  */
 MLXSW_ITEM32(cmd_mbox, sw2hw_eq, producer_counter, 0x04, 0, 16);
 
-/* cmd_mbox_sw2hw_eq_int_pa
+/* cmd_mbox_sw2hw_eq_pa
  * Physical Address.
  */
 MLXSW_ITEM64_INDEXED(cmd_mbox, sw2hw_eq, pa, 0x10, 11, 53, 0x08, 0x00, true);
index 57a98849551b31fc2d145b7aea0e5c7936134e2b..a4c07841aaf6254c844eb8d8512687b447928ba8 100644 (file)
@@ -1901,11 +1901,11 @@ int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
 }
 EXPORT_SYMBOL(mlxsw_core_schedule_dw);
 
-int mlxsw_core_schedule_odw(struct delayed_work *dwork, unsigned long delay)
+bool mlxsw_core_schedule_work(struct work_struct *work)
 {
-       return queue_delayed_work(mlxsw_owq, dwork, delay);
+       return queue_work(mlxsw_owq, work);
 }
-EXPORT_SYMBOL(mlxsw_core_schedule_odw);
+EXPORT_SYMBOL(mlxsw_core_schedule_work);
 
 void mlxsw_core_flush_owq(void)
 {
index a7f94fbc898ba866c0be87e21813cc6e39f52cad..cf38cf9027f80a95a4f8a744de7551cb0810bf51 100644 (file)
@@ -207,7 +207,7 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
                                                u8 local_port);
 
 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
-int mlxsw_core_schedule_odw(struct delayed_work *dwork, unsigned long delay);
+bool mlxsw_core_schedule_work(struct work_struct *work);
 void mlxsw_core_flush_owq(void);
 
 #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
new file mode 100644 (file)
index 0000000..5f33771
--- /dev/null
@@ -0,0 +1,679 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/rhashtable.h>
+#include <linux/list.h>
+
+#include "item.h"
+#include "core_acl_flex_actions.h"
+
+enum mlxsw_afa_set_type {
+       MLXSW_AFA_SET_TYPE_NEXT,
+       MLXSW_AFA_SET_TYPE_GOTO,
+};
+
+/* afa_set_type
+ * Type of the record at the end of the action set.
+ */
+MLXSW_ITEM32(afa, set, type, 0xA0, 28, 4);
+
+/* afa_set_next_action_set_ptr
+ * A pointer to the next action set in the KVD Centralized database.
+ */
+MLXSW_ITEM32(afa, set, next_action_set_ptr, 0xA4, 0, 24);
+
+/* afa_set_goto_g
+ * group - When set, the binding is of an ACL group. When cleared,
+ * the binding is of an ACL.
+ * Must be set to 1 for Spectrum.
+ */
+MLXSW_ITEM32(afa, set, goto_g, 0xA4, 29, 1);
+
+enum mlxsw_afa_set_goto_binding_cmd {
+       /* continue go the next binding point */
+       MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE,
+       /* jump to the next binding point no return */
+       MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP,
+       /* terminate the acl binding */
+       MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM = 4,
+};
+
+/* afa_set_goto_binding_cmd */
+MLXSW_ITEM32(afa, set, goto_binding_cmd, 0xA4, 24, 3);
+
+/* afa_set_goto_next_binding
+ * ACL/ACL group identifier. If the g bit is set, this field should hold
+ * the acl_group_id, else it should hold the acl_id.
+ */
+MLXSW_ITEM32(afa, set, goto_next_binding, 0xA4, 0, 16);
+
+/* afa_all_action_type
+ * Action Type.
+ */
+MLXSW_ITEM32(afa, all, action_type, 0x00, 24, 6);
+
+struct mlxsw_afa {
+       unsigned int max_acts_per_set;
+       const struct mlxsw_afa_ops *ops;
+       void *ops_priv;
+       struct rhashtable set_ht;
+       struct rhashtable fwd_entry_ht;
+};
+
+#define MLXSW_AFA_SET_LEN 0xA8
+
+struct mlxsw_afa_set_ht_key {
+       char enc_actions[MLXSW_AFA_SET_LEN]; /* Encoded set */
+       bool is_first;
+};
+
+/* Set structure holds one action set record. It contains up to three
+ * actions (depends on size of particular actions). The set is either
+ * put directly to a rule, or it is stored in KVD linear area.
+ * To prevent duplicate entries in KVD linear area, a hashtable is
+ * used to track sets that were previously inserted and may be shared.
+ */
+
+struct mlxsw_afa_set {
+       struct rhash_head ht_node;
+       struct mlxsw_afa_set_ht_key ht_key;
+       u32 kvdl_index;
+       bool shared; /* Inserted in hashtable (doesn't mean that
+                     * kvdl_index is valid).
+                     */
+       unsigned int ref_count;
+       struct mlxsw_afa_set *next; /* Pointer to the next set. */
+       struct mlxsw_afa_set *prev; /* Pointer to the previous set,
+                                    * note that set may have multiple
+                                    * sets from multiple blocks
+                                    * pointing at it. This is only
+                                    * usable until commit.
+                                    */
+};
+
+static const struct rhashtable_params mlxsw_afa_set_ht_params = {
+       .key_len = sizeof(struct mlxsw_afa_set_ht_key),
+       .key_offset = offsetof(struct mlxsw_afa_set, ht_key),
+       .head_offset = offsetof(struct mlxsw_afa_set, ht_node),
+       .automatic_shrinking = true,
+};
+
+struct mlxsw_afa_fwd_entry_ht_key {
+       u8 local_port;
+};
+
+struct mlxsw_afa_fwd_entry {
+       struct rhash_head ht_node;
+       struct mlxsw_afa_fwd_entry_ht_key ht_key;
+       u32 kvdl_index;
+       unsigned int ref_count;
+};
+
+static const struct rhashtable_params mlxsw_afa_fwd_entry_ht_params = {
+       .key_len = sizeof(struct mlxsw_afa_fwd_entry_ht_key),
+       .key_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_key),
+       .head_offset = offsetof(struct mlxsw_afa_fwd_entry, ht_node),
+       .automatic_shrinking = true,
+};
+
+struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
+                                  const struct mlxsw_afa_ops *ops,
+                                  void *ops_priv)
+{
+       struct mlxsw_afa *mlxsw_afa;
+       int err;
+
+       mlxsw_afa = kzalloc(sizeof(*mlxsw_afa), GFP_KERNEL);
+       if (!mlxsw_afa)
+               return ERR_PTR(-ENOMEM);
+       err = rhashtable_init(&mlxsw_afa->set_ht, &mlxsw_afa_set_ht_params);
+       if (err)
+               goto err_set_rhashtable_init;
+       err = rhashtable_init(&mlxsw_afa->fwd_entry_ht,
+                             &mlxsw_afa_fwd_entry_ht_params);
+       if (err)
+               goto err_fwd_entry_rhashtable_init;
+       mlxsw_afa->max_acts_per_set = max_acts_per_set;
+       mlxsw_afa->ops = ops;
+       mlxsw_afa->ops_priv = ops_priv;
+       return mlxsw_afa;
+
+err_fwd_entry_rhashtable_init:
+       rhashtable_destroy(&mlxsw_afa->set_ht);
+err_set_rhashtable_init:
+       kfree(mlxsw_afa);
+       return ERR_PTR(err);
+}
+EXPORT_SYMBOL(mlxsw_afa_create);
+
+void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa)
+{
+       rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
+       rhashtable_destroy(&mlxsw_afa->set_ht);
+       kfree(mlxsw_afa);
+}
+EXPORT_SYMBOL(mlxsw_afa_destroy);
+
+static void mlxsw_afa_set_goto_set(struct mlxsw_afa_set *set,
+                                  enum mlxsw_afa_set_goto_binding_cmd cmd,
+                                  u16 group_id)
+{
+       char *actions = set->ht_key.enc_actions;
+
+       mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_GOTO);
+       mlxsw_afa_set_goto_g_set(actions, true);
+       mlxsw_afa_set_goto_binding_cmd_set(actions, cmd);
+       mlxsw_afa_set_goto_next_binding_set(actions, group_id);
+}
+
+static void mlxsw_afa_set_next_set(struct mlxsw_afa_set *set,
+                                  u32 next_set_kvdl_index)
+{
+       char *actions = set->ht_key.enc_actions;
+
+       mlxsw_afa_set_type_set(actions, MLXSW_AFA_SET_TYPE_NEXT);
+       mlxsw_afa_set_next_action_set_ptr_set(actions, next_set_kvdl_index);
+}
+
+static struct mlxsw_afa_set *mlxsw_afa_set_create(bool is_first)
+{
+       struct mlxsw_afa_set *set;
+
+       set = kzalloc(sizeof(*set), GFP_KERNEL);
+       if (!set)
+               return NULL;
+       /* Need to initialize the set to pass by default */
+       mlxsw_afa_set_goto_set(set, MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
+       set->ht_key.is_first = is_first;
+       set->ref_count = 1;
+       return set;
+}
+
+static void mlxsw_afa_set_destroy(struct mlxsw_afa_set *set)
+{
+       kfree(set);
+}
+
+static int mlxsw_afa_set_share(struct mlxsw_afa *mlxsw_afa,
+                              struct mlxsw_afa_set *set)
+{
+       int err;
+
+       err = rhashtable_insert_fast(&mlxsw_afa->set_ht, &set->ht_node,
+                                    mlxsw_afa_set_ht_params);
+       if (err)
+               return err;
+       err = mlxsw_afa->ops->kvdl_set_add(mlxsw_afa->ops_priv,
+                                          &set->kvdl_index,
+                                          set->ht_key.enc_actions,
+                                          set->ht_key.is_first);
+       if (err)
+               goto err_kvdl_set_add;
+       set->shared = true;
+       set->prev = NULL;
+       return 0;
+
+err_kvdl_set_add:
+       rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
+                              mlxsw_afa_set_ht_params);
+       return err;
+}
+
+static void mlxsw_afa_set_unshare(struct mlxsw_afa *mlxsw_afa,
+                                 struct mlxsw_afa_set *set)
+{
+       mlxsw_afa->ops->kvdl_set_del(mlxsw_afa->ops_priv,
+                                    set->kvdl_index,
+                                    set->ht_key.is_first);
+       rhashtable_remove_fast(&mlxsw_afa->set_ht, &set->ht_node,
+                              mlxsw_afa_set_ht_params);
+       set->shared = false;
+}
+
+static void mlxsw_afa_set_put(struct mlxsw_afa *mlxsw_afa,
+                             struct mlxsw_afa_set *set)
+{
+       if (--set->ref_count)
+               return;
+       if (set->shared)
+               mlxsw_afa_set_unshare(mlxsw_afa, set);
+       mlxsw_afa_set_destroy(set);
+}
+
+static struct mlxsw_afa_set *mlxsw_afa_set_get(struct mlxsw_afa *mlxsw_afa,
+                                              struct mlxsw_afa_set *orig_set)
+{
+       struct mlxsw_afa_set *set;
+       int err;
+
+       /* There is a hashtable of sets maintained. If a set with the exact
+        * same encoding exists, we reuse it. Otherwise, the current set
+        * is shared by making it available to others using the hash table.
+        */
+       set = rhashtable_lookup_fast(&mlxsw_afa->set_ht, &orig_set->ht_key,
+                                    mlxsw_afa_set_ht_params);
+       if (set) {
+               set->ref_count++;
+               mlxsw_afa_set_put(mlxsw_afa, orig_set);
+       } else {
+               set = orig_set;
+               err = mlxsw_afa_set_share(mlxsw_afa, set);
+               if (err)
+                       return ERR_PTR(err);
+       }
+       return set;
+}
+
+/* Block structure holds a list of action sets. One action block
+ * represents one chain of actions executed upon match of a rule.
+ */
+
+struct mlxsw_afa_block {
+       struct mlxsw_afa *afa;
+       bool finished;
+       struct mlxsw_afa_set *first_set;
+       struct mlxsw_afa_set *cur_set;
+       unsigned int cur_act_index; /* In current set. */
+       struct list_head fwd_entry_ref_list;
+};
+
+struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa)
+{
+       struct mlxsw_afa_block *block;
+
+       block = kzalloc(sizeof(*block), GFP_KERNEL);
+       if (!block)
+               return NULL;
+       INIT_LIST_HEAD(&block->fwd_entry_ref_list);
+       block->afa = mlxsw_afa;
+
+       /* At least one action set is always present, so just create it here */
+       block->first_set = mlxsw_afa_set_create(true);
+       if (!block->first_set)
+               goto err_first_set_create;
+       block->cur_set = block->first_set;
+       return block;
+
+err_first_set_create:
+       kfree(block);
+       return NULL;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_create);
+
+static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block);
+
+void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block)
+{
+       struct mlxsw_afa_set *set = block->first_set;
+       struct mlxsw_afa_set *next_set;
+
+       do {
+               next_set = set->next;
+               mlxsw_afa_set_put(block->afa, set);
+               set = next_set;
+       } while (set);
+       mlxsw_afa_fwd_entry_refs_destroy(block);
+       kfree(block);
+}
+EXPORT_SYMBOL(mlxsw_afa_block_destroy);
+
+int mlxsw_afa_block_commit(struct mlxsw_afa_block *block)
+{
+       struct mlxsw_afa_set *set = block->cur_set;
+       struct mlxsw_afa_set *prev_set;
+
+       block->cur_set = NULL;
+       block->finished = true;
+
+       /* Go over all linked sets starting from last
+        * and try to find existing set in the hash table.
+        * In case it is not there, assign a KVD linear index
+        * and insert it.
+        */
+       do {
+               prev_set = set->prev;
+               set = mlxsw_afa_set_get(block->afa, set);
+               if (IS_ERR(set))
+                       /* No rollback is needed since the chain is
+                        * in consistent state and mlxsw_afa_block_destroy
+                        * will take care of putting it away.
+                        */
+                       return PTR_ERR(set);
+               if (prev_set) {
+                       prev_set->next = set;
+                       mlxsw_afa_set_next_set(prev_set, set->kvdl_index);
+                       set = prev_set;
+               }
+       } while (prev_set);
+
+       block->first_set = set;
+       return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_commit);
+
+char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block)
+{
+       return block->first_set->ht_key.enc_actions;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_first_set);
+
+u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block)
+{
+       return block->first_set->kvdl_index;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index);
+
+void mlxsw_afa_block_continue(struct mlxsw_afa_block *block)
+{
+       if (WARN_ON(block->finished))
+               return;
+       mlxsw_afa_set_goto_set(block->cur_set,
+                              MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0);
+       block->finished = true;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_continue);
+
+void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
+{
+       if (WARN_ON(block->finished))
+               return;
+       mlxsw_afa_set_goto_set(block->cur_set,
+                              MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id);
+       block->finished = true;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_jump);
+
+static struct mlxsw_afa_fwd_entry *
+mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
+{
+       struct mlxsw_afa_fwd_entry *fwd_entry;
+       int err;
+
+       fwd_entry = kzalloc(sizeof(*fwd_entry), GFP_KERNEL);
+       if (!fwd_entry)
+               return ERR_PTR(-ENOMEM);
+       fwd_entry->ht_key.local_port = local_port;
+       fwd_entry->ref_count = 1;
+
+       err = rhashtable_insert_fast(&mlxsw_afa->fwd_entry_ht,
+                                    &fwd_entry->ht_node,
+                                    mlxsw_afa_fwd_entry_ht_params);
+       if (err)
+               goto err_rhashtable_insert;
+
+       err = mlxsw_afa->ops->kvdl_fwd_entry_add(mlxsw_afa->ops_priv,
+                                                &fwd_entry->kvdl_index,
+                                                local_port);
+       if (err)
+               goto err_kvdl_fwd_entry_add;
+       return fwd_entry;
+
+err_kvdl_fwd_entry_add:
+       rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
+                              mlxsw_afa_fwd_entry_ht_params);
+err_rhashtable_insert:
+       kfree(fwd_entry);
+       return ERR_PTR(err);
+}
+
+static void mlxsw_afa_fwd_entry_destroy(struct mlxsw_afa *mlxsw_afa,
+                                       struct mlxsw_afa_fwd_entry *fwd_entry)
+{
+       mlxsw_afa->ops->kvdl_fwd_entry_del(mlxsw_afa->ops_priv,
+                                          fwd_entry->kvdl_index);
+       rhashtable_remove_fast(&mlxsw_afa->fwd_entry_ht, &fwd_entry->ht_node,
+                              mlxsw_afa_fwd_entry_ht_params);
+       kfree(fwd_entry);
+}
+
+static struct mlxsw_afa_fwd_entry *
+mlxsw_afa_fwd_entry_get(struct mlxsw_afa *mlxsw_afa, u8 local_port)
+{
+       struct mlxsw_afa_fwd_entry_ht_key ht_key = {0};
+       struct mlxsw_afa_fwd_entry *fwd_entry;
+
+       ht_key.local_port = local_port;
+       fwd_entry = rhashtable_lookup_fast(&mlxsw_afa->fwd_entry_ht, &ht_key,
+                                          mlxsw_afa_fwd_entry_ht_params);
+       if (fwd_entry) {
+               fwd_entry->ref_count++;
+               return fwd_entry;
+       }
+       return mlxsw_afa_fwd_entry_create(mlxsw_afa, local_port);
+}
+
+static void mlxsw_afa_fwd_entry_put(struct mlxsw_afa *mlxsw_afa,
+                                   struct mlxsw_afa_fwd_entry *fwd_entry)
+{
+       if (--fwd_entry->ref_count)
+               return;
+       mlxsw_afa_fwd_entry_destroy(mlxsw_afa, fwd_entry);
+}
+
+struct mlxsw_afa_fwd_entry_ref {
+       struct list_head list;
+       struct mlxsw_afa_fwd_entry *fwd_entry;
+};
+
+static struct mlxsw_afa_fwd_entry_ref *
+mlxsw_afa_fwd_entry_ref_create(struct mlxsw_afa_block *block, u8 local_port)
+{
+       struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
+       struct mlxsw_afa_fwd_entry *fwd_entry;
+       int err;
+
+       fwd_entry_ref = kzalloc(sizeof(*fwd_entry_ref), GFP_KERNEL);
+       if (!fwd_entry_ref)
+               return ERR_PTR(-ENOMEM);
+       fwd_entry = mlxsw_afa_fwd_entry_get(block->afa, local_port);
+       if (IS_ERR(fwd_entry)) {
+               err = PTR_ERR(fwd_entry);
+               goto err_fwd_entry_get;
+       }
+       fwd_entry_ref->fwd_entry = fwd_entry;
+       list_add(&fwd_entry_ref->list, &block->fwd_entry_ref_list);
+       return fwd_entry_ref;
+
+err_fwd_entry_get:
+       kfree(fwd_entry_ref);
+       return ERR_PTR(err);
+}
+
+static void
+mlxsw_afa_fwd_entry_ref_destroy(struct mlxsw_afa_block *block,
+                               struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref)
+{
+       list_del(&fwd_entry_ref->list);
+       mlxsw_afa_fwd_entry_put(block->afa, fwd_entry_ref->fwd_entry);
+       kfree(fwd_entry_ref);
+}
+
+static void mlxsw_afa_fwd_entry_refs_destroy(struct mlxsw_afa_block *block)
+{
+       struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
+       struct mlxsw_afa_fwd_entry_ref *tmp;
+
+       list_for_each_entry_safe(fwd_entry_ref, tmp,
+                                &block->fwd_entry_ref_list, list)
+               mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+}
+
+#define MLXSW_AFA_ONE_ACTION_LEN 32
+#define MLXSW_AFA_PAYLOAD_OFFSET 4
+
+static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
+                                          u8 action_code, u8 action_size)
+{
+       char *oneact;
+       char *actions;
+
+       if (WARN_ON(block->finished))
+               return NULL;
+       if (block->cur_act_index + action_size >
+           block->afa->max_acts_per_set) {
+               struct mlxsw_afa_set *set;
+
+               /* The appended action won't fit into the current action set,
+                * so create a new set.
+                */
+               set = mlxsw_afa_set_create(false);
+               if (!set)
+                       return NULL;
+               set->prev = block->cur_set;
+               block->cur_act_index = 0;
+               block->cur_set->next = set;
+               block->cur_set = set;
+       }
+
+       actions = block->cur_set->ht_key.enc_actions;
+       oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN;
+       block->cur_act_index += action_size;
+       mlxsw_afa_all_action_type_set(oneact, action_code);
+       return oneact + MLXSW_AFA_PAYLOAD_OFFSET;
+}
+
+/* Trap / Discard Action
+ * ---------------------
+ * The Trap / Discard action enables trapping / mirroring packets to the CPU
+ * as well as discarding packets.
+ * The ACL Trap / Discard separates the forward/discard control from CPU
+ * trap control. In addition, the Trap / Discard action enables activating
+ * SPAN (port mirroring).
+ */
+
+#define MLXSW_AFA_TRAPDISC_CODE 0x03
+#define MLXSW_AFA_TRAPDISC_SIZE 1
+
+enum mlxsw_afa_trapdisc_forward_action {
+       MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3,
+};
+
+/* afa_trapdisc_forward_action
+ * Forward Action.
+ */
+MLXSW_ITEM32(afa, trapdisc, forward_action, 0x00, 0, 4);
+
+static inline void
+mlxsw_afa_trapdisc_pack(char *payload,
+                       enum mlxsw_afa_trapdisc_forward_action forward_action)
+{
+       mlxsw_afa_trapdisc_forward_action_set(payload, forward_action);
+}
+
+int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block)
+{
+       char *act = mlxsw_afa_block_append_action(block,
+                                                 MLXSW_AFA_TRAPDISC_CODE,
+                                                 MLXSW_AFA_TRAPDISC_SIZE);
+
+       if (!act)
+               return -ENOBUFS;
+       mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD);
+       return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
+
+/* Forwarding Action
+ * -----------------
+ * Forwarding Action can be used to implement Policy Based Switching (PBS)
+ * as well as OpenFlow related "Output" action.
+ */
+
+#define MLXSW_AFA_FORWARD_CODE 0x07
+#define MLXSW_AFA_FORWARD_SIZE 1
+
+enum mlxsw_afa_forward_type {
+       /* PBS, Policy Based Switching */
+       MLXSW_AFA_FORWARD_TYPE_PBS,
+       /* Output, OpenFlow output type */
+       MLXSW_AFA_FORWARD_TYPE_OUTPUT,
+};
+
+/* afa_forward_type */
+MLXSW_ITEM32(afa, forward, type, 0x00, 24, 2);
+
+/* afa_forward_pbs_ptr
+ * A pointer to the PBS entry configured by PPBS register.
+ * Reserved when in_port is set.
+ */
+MLXSW_ITEM32(afa, forward, pbs_ptr, 0x08, 0, 24);
+
+/* afa_forward_in_port
+ * Packet is forwarded back to the ingress port.
+ */
+MLXSW_ITEM32(afa, forward, in_port, 0x0C, 0, 1);
+
+static inline void
+mlxsw_afa_forward_pack(char *payload, enum mlxsw_afa_forward_type type,
+                      u32 pbs_ptr, bool in_port)
+{
+       mlxsw_afa_forward_type_set(payload, type);
+       mlxsw_afa_forward_pbs_ptr_set(payload, pbs_ptr);
+       mlxsw_afa_forward_in_port_set(payload, in_port);
+}
+
+int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
+                              u8 local_port, bool in_port)
+{
+       struct mlxsw_afa_fwd_entry_ref *fwd_entry_ref;
+       u32 kvdl_index;
+       char *act;
+       int err;
+
+       if (in_port)
+               return -EOPNOTSUPP;
+       fwd_entry_ref = mlxsw_afa_fwd_entry_ref_create(block, local_port);
+       if (IS_ERR(fwd_entry_ref))
+               return PTR_ERR(fwd_entry_ref);
+       kvdl_index = fwd_entry_ref->fwd_entry->kvdl_index;
+
+       act = mlxsw_afa_block_append_action(block, MLXSW_AFA_FORWARD_CODE,
+                                           MLXSW_AFA_FORWARD_SIZE);
+       if (!act) {
+               err = -ENOBUFS;
+               goto err_append_action;
+       }
+       mlxsw_afa_forward_pack(act, MLXSW_AFA_FORWARD_TYPE_PBS,
+                              kvdl_index, in_port);
+       return 0;
+
+err_append_action:
+       mlxsw_afa_fwd_entry_ref_destroy(block, fwd_entry_ref);
+       return err;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
new file mode 100644 (file)
index 0000000..43f78dc
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CORE_ACL_FLEX_ACTIONS_H
+#define _MLXSW_CORE_ACL_FLEX_ACTIONS_H
+
+#include <linux/types.h>
+
+struct mlxsw_afa;
+struct mlxsw_afa_block;
+
+struct mlxsw_afa_ops {
+       int (*kvdl_set_add)(void *priv, u32 *p_kvdl_index,
+                           char *enc_actions, bool is_first);
+       void (*kvdl_set_del)(void *priv, u32 kvdl_index, bool is_first);
+       int (*kvdl_fwd_entry_add)(void *priv, u32 *p_kvdl_index, u8 local_port);
+       void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
+};
+
+struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
+                                  const struct mlxsw_afa_ops *ops,
+                                  void *ops_priv);
+void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa);
+struct mlxsw_afa_block *mlxsw_afa_block_create(struct mlxsw_afa *mlxsw_afa);
+void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_commit(struct mlxsw_afa_block *block);
+char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
+u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
+void mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
+void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
+int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
+int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
+                              u8 local_port, bool in_port);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
new file mode 100644 (file)
index 0000000..b32a009
--- /dev/null
@@ -0,0 +1,475 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+
+#include "item.h"
+#include "core_acl_flex_keys.h"
+
+struct mlxsw_afk {
+       struct list_head key_info_list;
+       unsigned int max_blocks;
+       const struct mlxsw_afk_block *blocks;
+       unsigned int blocks_count;
+};
+
+static bool mlxsw_afk_blocks_check(struct mlxsw_afk *mlxsw_afk)
+{
+       int i;
+       int j;
+
+       for (i = 0; i < mlxsw_afk->blocks_count; i++) {
+               const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i];
+
+               for (j = 0; j < block->instances_count; j++) {
+                       struct mlxsw_afk_element_inst *elinst;
+
+                       elinst = &block->instances[j];
+                       if (elinst->type != elinst->info->type ||
+                           elinst->item.size.bits !=
+                           elinst->info->item.size.bits)
+                               return false;
+               }
+       }
+       return true;
+}
+
+struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
+                                  const struct mlxsw_afk_block *blocks,
+                                  unsigned int blocks_count)
+{
+       struct mlxsw_afk *mlxsw_afk;
+
+       mlxsw_afk = kzalloc(sizeof(*mlxsw_afk), GFP_KERNEL);
+       if (!mlxsw_afk)
+               return NULL;
+       INIT_LIST_HEAD(&mlxsw_afk->key_info_list);
+       mlxsw_afk->max_blocks = max_blocks;
+       mlxsw_afk->blocks = blocks;
+       mlxsw_afk->blocks_count = blocks_count;
+       WARN_ON(!mlxsw_afk_blocks_check(mlxsw_afk));
+       return mlxsw_afk;
+}
+EXPORT_SYMBOL(mlxsw_afk_create);
+
+void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk)
+{
+       WARN_ON(!list_empty(&mlxsw_afk->key_info_list));
+       kfree(mlxsw_afk);
+}
+EXPORT_SYMBOL(mlxsw_afk_destroy);
+
+struct mlxsw_afk_key_info {
+       struct list_head list;
+       unsigned int ref_count;
+       unsigned int blocks_count;
+       int element_to_block[MLXSW_AFK_ELEMENT_MAX]; /* index is element, value
+                                                     * is index inside "blocks"
+                                                     */
+       struct mlxsw_afk_element_usage elusage;
+       const struct mlxsw_afk_block *blocks[0];
+};
+
+static bool
+mlxsw_afk_key_info_elements_eq(struct mlxsw_afk_key_info *key_info,
+                              struct mlxsw_afk_element_usage *elusage)
+{
+       return memcmp(&key_info->elusage, elusage, sizeof(*elusage)) == 0;
+}
+
+static struct mlxsw_afk_key_info *
+mlxsw_afk_key_info_find(struct mlxsw_afk *mlxsw_afk,
+                       struct mlxsw_afk_element_usage *elusage)
+{
+       struct mlxsw_afk_key_info *key_info;
+
+       list_for_each_entry(key_info, &mlxsw_afk->key_info_list, list) {
+               if (mlxsw_afk_key_info_elements_eq(key_info, elusage))
+                       return key_info;
+       }
+       return NULL;
+}
+
+struct mlxsw_afk_picker {
+       struct {
+               DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX);
+               unsigned int total;
+       } hits[0];
+};
+
+static void mlxsw_afk_picker_count_hits(struct mlxsw_afk *mlxsw_afk,
+                                       struct mlxsw_afk_picker *picker,
+                                       enum mlxsw_afk_element element)
+{
+       int i;
+       int j;
+
+       for (i = 0; i < mlxsw_afk->blocks_count; i++) {
+               const struct mlxsw_afk_block *block = &mlxsw_afk->blocks[i];
+
+               for (j = 0; j < block->instances_count; j++) {
+                       struct mlxsw_afk_element_inst *elinst;
+
+                       elinst = &block->instances[j];
+                       if (elinst->info->element == element) {
+                               __set_bit(element, picker->hits[i].element);
+                               picker->hits[i].total++;
+                       }
+               }
+       }
+}
+
+static void mlxsw_afk_picker_subtract_hits(struct mlxsw_afk *mlxsw_afk,
+                                          struct mlxsw_afk_picker *picker,
+                                          int block_index)
+{
+       DECLARE_BITMAP(hits_element, MLXSW_AFK_ELEMENT_MAX);
+       int i;
+       int j;
+
+       memcpy(&hits_element, &picker->hits[block_index].element,
+              sizeof(hits_element));
+
+       for (i = 0; i < mlxsw_afk->blocks_count; i++) {
+               for_each_set_bit(j, hits_element, MLXSW_AFK_ELEMENT_MAX) {
+                       if (__test_and_clear_bit(j, picker->hits[i].element))
+                               picker->hits[i].total--;
+               }
+       }
+}
+
+static int mlxsw_afk_picker_most_hits_get(struct mlxsw_afk *mlxsw_afk,
+                                         struct mlxsw_afk_picker *picker)
+{
+       int most_index = -EINVAL; /* Should never happen to return this */
+       int most_hits = 0;
+       int i;
+
+       for (i = 0; i < mlxsw_afk->blocks_count; i++) {
+               if (picker->hits[i].total > most_hits) {
+                       most_hits = picker->hits[i].total;
+                       most_index = i;
+               }
+       }
+       return most_index;
+}
+
+static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk,
+                                        struct mlxsw_afk_picker *picker,
+                                        int block_index,
+                                        struct mlxsw_afk_key_info *key_info)
+{
+       enum mlxsw_afk_element element;
+
+       if (key_info->blocks_count == mlxsw_afk->max_blocks)
+               return -EINVAL;
+
+       for_each_set_bit(element, picker->hits[block_index].element,
+                        MLXSW_AFK_ELEMENT_MAX) {
+               key_info->element_to_block[element] = key_info->blocks_count;
+               mlxsw_afk_element_usage_add(&key_info->elusage, element);
+       }
+
+       key_info->blocks[key_info->blocks_count] =
+                                       &mlxsw_afk->blocks[block_index];
+       key_info->blocks_count++;
+       return 0;
+}
+
+static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk,
+                           struct mlxsw_afk_key_info *key_info,
+                           struct mlxsw_afk_element_usage *elusage)
+{
+       struct mlxsw_afk_picker *picker;
+       enum mlxsw_afk_element element;
+       size_t alloc_size;
+       int err;
+
+       alloc_size = sizeof(picker->hits[0]) * mlxsw_afk->blocks_count;
+       picker = kzalloc(alloc_size, GFP_KERNEL);
+       if (!picker)
+               return -ENOMEM;
+
+       /* Since the same elements could be present in multiple blocks,
+        * we must find out optimal block list in order to make the
+        * block count as low as possible.
+        *
+        * First, we count hits. We go over all available blocks and count
+        * how many of requested elements are covered by each.
+        *
+        * Then in loop, we find block with most hits and add it to
+        * output key_info. Then we have to subtract this block hits so
+        * the next iteration will find most suitable block for
+        * the rest of requested elements.
+        */
+
+       mlxsw_afk_element_usage_for_each(element, elusage)
+               mlxsw_afk_picker_count_hits(mlxsw_afk, picker, element);
+
+       do {
+               int block_index;
+
+               block_index = mlxsw_afk_picker_most_hits_get(mlxsw_afk, picker);
+               if (block_index < 0) {
+                       err = block_index;
+                       goto out;
+               }
+               err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker,
+                                                   block_index, key_info);
+               if (err)
+                       goto out;
+               mlxsw_afk_picker_subtract_hits(mlxsw_afk, picker, block_index);
+       } while (!mlxsw_afk_key_info_elements_eq(key_info, elusage));
+
+       err = 0;
+out:
+       kfree(picker);
+       return err;
+}
+
+static struct mlxsw_afk_key_info *
+mlxsw_afk_key_info_create(struct mlxsw_afk *mlxsw_afk,
+                         struct mlxsw_afk_element_usage *elusage)
+{
+       struct mlxsw_afk_key_info *key_info;
+       size_t alloc_size;
+       int err;
+
+       alloc_size = sizeof(*key_info) +
+                    sizeof(key_info->blocks[0]) * mlxsw_afk->max_blocks;
+       key_info = kzalloc(alloc_size, GFP_KERNEL);
+       if (!key_info)
+               return ERR_PTR(-ENOMEM);
+       err = mlxsw_afk_picker(mlxsw_afk, key_info, elusage);
+       if (err)
+               goto err_picker;
+       list_add(&key_info->list, &mlxsw_afk->key_info_list);
+       key_info->ref_count = 1;
+       return key_info;
+
+err_picker:
+       kfree(key_info);
+       return ERR_PTR(err);
+}
+
+static void mlxsw_afk_key_info_destroy(struct mlxsw_afk_key_info *key_info)
+{
+       list_del(&key_info->list);
+       kfree(key_info);
+}
+
+struct mlxsw_afk_key_info *
+mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk,
+                      struct mlxsw_afk_element_usage *elusage)
+{
+       struct mlxsw_afk_key_info *key_info;
+
+       key_info = mlxsw_afk_key_info_find(mlxsw_afk, elusage);
+       if (key_info) {
+               key_info->ref_count++;
+               return key_info;
+       }
+       return mlxsw_afk_key_info_create(mlxsw_afk, elusage);
+}
+EXPORT_SYMBOL(mlxsw_afk_key_info_get);
+
+void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info)
+{
+       if (--key_info->ref_count)
+               return;
+       mlxsw_afk_key_info_destroy(key_info);
+}
+EXPORT_SYMBOL(mlxsw_afk_key_info_put);
+
+bool mlxsw_afk_key_info_subset(struct mlxsw_afk_key_info *key_info,
+                              struct mlxsw_afk_element_usage *elusage)
+{
+       return mlxsw_afk_element_usage_subset(elusage, &key_info->elusage);
+}
+EXPORT_SYMBOL(mlxsw_afk_key_info_subset);
+
+static const struct mlxsw_afk_element_inst *
+mlxsw_afk_block_elinst_get(const struct mlxsw_afk_block *block,
+                          enum mlxsw_afk_element element)
+{
+       int i;
+
+       for (i = 0; i < block->instances_count; i++) {
+               struct mlxsw_afk_element_inst *elinst;
+
+               elinst = &block->instances[i];
+               if (elinst->info->element == element)
+                       return elinst;
+       }
+       return NULL;
+}
+
+static const struct mlxsw_afk_element_inst *
+mlxsw_afk_key_info_elinst_get(struct mlxsw_afk_key_info *key_info,
+                             enum mlxsw_afk_element element,
+                             int *p_block_index)
+{
+       const struct mlxsw_afk_element_inst *elinst;
+       const struct mlxsw_afk_block *block;
+       int block_index;
+
+       if (WARN_ON(!test_bit(element, key_info->elusage.usage)))
+               return NULL;
+       block_index = key_info->element_to_block[element];
+       block = key_info->blocks[block_index];
+
+       elinst = mlxsw_afk_block_elinst_get(block, element);
+       if (WARN_ON(!elinst))
+               return NULL;
+
+       *p_block_index = block_index;
+       return elinst;
+}
+
+u16
+mlxsw_afk_key_info_block_encoding_get(const struct mlxsw_afk_key_info *key_info,
+                                     int block_index)
+{
+       return key_info->blocks[block_index]->encoding;
+}
+EXPORT_SYMBOL(mlxsw_afk_key_info_block_encoding_get);
+
+unsigned int
+mlxsw_afk_key_info_blocks_count_get(const struct mlxsw_afk_key_info *key_info)
+{
+       return key_info->blocks_count;
+}
+EXPORT_SYMBOL(mlxsw_afk_key_info_blocks_count_get);
+
+void mlxsw_afk_values_add_u32(struct mlxsw_afk_element_values *values,
+                             enum mlxsw_afk_element element,
+                             u32 key_value, u32 mask_value)
+{
+       const struct mlxsw_afk_element_info *elinfo =
+                               &mlxsw_afk_element_infos[element];
+       const struct mlxsw_item *storage_item = &elinfo->item;
+
+       if (!mask_value)
+               return;
+       if (WARN_ON(elinfo->type != MLXSW_AFK_ELEMENT_TYPE_U32))
+               return;
+       __mlxsw_item_set32(values->storage.key, storage_item, 0, key_value);
+       __mlxsw_item_set32(values->storage.mask, storage_item, 0, mask_value);
+       mlxsw_afk_element_usage_add(&values->elusage, element);
+}
+EXPORT_SYMBOL(mlxsw_afk_values_add_u32);
+
+void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
+                             enum mlxsw_afk_element element,
+                             const char *key_value, const char *mask_value,
+                             unsigned int len)
+{
+       const struct mlxsw_afk_element_info *elinfo =
+                               &mlxsw_afk_element_infos[element];
+       const struct mlxsw_item *storage_item = &elinfo->item;
+
+       if (!memchr_inv(mask_value, 0, len)) /* If mask is zero */
+               return;
+       if (WARN_ON(elinfo->type != MLXSW_AFK_ELEMENT_TYPE_BUF) ||
+           WARN_ON(elinfo->item.size.bytes != len))
+               return;
+       __mlxsw_item_memcpy_to(values->storage.key, key_value,
+                              storage_item, 0);
+       __mlxsw_item_memcpy_to(values->storage.mask, mask_value,
+                              storage_item, 0);
+       mlxsw_afk_element_usage_add(&values->elusage, element);
+}
+EXPORT_SYMBOL(mlxsw_afk_values_add_buf);
+
+static void mlxsw_afk_encode_u32(const struct mlxsw_item *storage_item,
+                                const struct mlxsw_item *output_item,
+                                char *storage, char *output_indexed)
+{
+       u32 value;
+
+       value = __mlxsw_item_get32(storage, storage_item, 0);
+       __mlxsw_item_set32(output_indexed, output_item, 0, value);
+}
+
+static void mlxsw_afk_encode_buf(const struct mlxsw_item *storage_item,
+                                const struct mlxsw_item *output_item,
+                                char *storage, char *output_indexed)
+{
+       char *storage_data = __mlxsw_item_data(storage, storage_item, 0);
+       char *output_data = __mlxsw_item_data(output_indexed, output_item, 0);
+       size_t len = output_item->size.bytes;
+
+       memcpy(output_data, storage_data, len);
+}
+
+#define MLXSW_AFK_KEY_BLOCK_SIZE 16
+
+static void mlxsw_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
+                                int block_index, char *storage, char *output)
+{
+       char *output_indexed = output + block_index * MLXSW_AFK_KEY_BLOCK_SIZE;
+       const struct mlxsw_item *storage_item = &elinst->info->item;
+       const struct mlxsw_item *output_item = &elinst->item;
+
+       if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_U32)
+               mlxsw_afk_encode_u32(storage_item, output_item,
+                                    storage, output_indexed);
+       else if (elinst->type == MLXSW_AFK_ELEMENT_TYPE_BUF)
+               mlxsw_afk_encode_buf(storage_item, output_item,
+                                    storage, output_indexed);
+}
+
+void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
+                     struct mlxsw_afk_element_values *values,
+                     char *key, char *mask)
+{
+       const struct mlxsw_afk_element_inst *elinst;
+       enum mlxsw_afk_element element;
+       int block_index;
+
+       mlxsw_afk_element_usage_for_each(element, &values->elusage) {
+               elinst = mlxsw_afk_key_info_elinst_get(key_info, element,
+                                                      &block_index);
+               if (!elinst)
+                       continue;
+               mlxsw_afk_encode_one(elinst, block_index,
+                                    values->storage.key, key);
+               mlxsw_afk_encode_one(elinst, block_index,
+                                    values->storage.mask, mask);
+       }
+}
+EXPORT_SYMBOL(mlxsw_afk_encode);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
new file mode 100644 (file)
index 0000000..e4fcba7
--- /dev/null
@@ -0,0 +1,238 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_CORE_ACL_FLEX_KEYS_H
+#define _MLXSW_CORE_ACL_FLEX_KEYS_H
+
+#include <linux/types.h>
+#include <linux/bitmap.h>
+
+#include "item.h"
+
+enum mlxsw_afk_element {
+       MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
+       MLXSW_AFK_ELEMENT_DMAC,
+       MLXSW_AFK_ELEMENT_SMAC,
+       MLXSW_AFK_ELEMENT_ETHERTYPE,
+       MLXSW_AFK_ELEMENT_IP_PROTO,
+       MLXSW_AFK_ELEMENT_SRC_IP4,
+       MLXSW_AFK_ELEMENT_DST_IP4,
+       MLXSW_AFK_ELEMENT_SRC_IP6_HI,
+       MLXSW_AFK_ELEMENT_SRC_IP6_LO,
+       MLXSW_AFK_ELEMENT_DST_IP6_HI,
+       MLXSW_AFK_ELEMENT_DST_IP6_LO,
+       MLXSW_AFK_ELEMENT_DST_L4_PORT,
+       MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+       MLXSW_AFK_ELEMENT_MAX,
+};
+
+enum mlxsw_afk_element_type {
+       MLXSW_AFK_ELEMENT_TYPE_U32,
+       MLXSW_AFK_ELEMENT_TYPE_BUF,
+};
+
+struct mlxsw_afk_element_info {
+       enum mlxsw_afk_element element; /* element ID */
+       enum mlxsw_afk_element_type type;
+       struct mlxsw_item item; /* element geometry in internal storage */
+};
+
+#define MLXSW_AFK_ELEMENT_INFO(_type, _element, _offset, _shift, _size)                \
+       [MLXSW_AFK_ELEMENT_##_element] = {                                      \
+               .element = MLXSW_AFK_ELEMENT_##_element,                        \
+               .type = _type,                                                  \
+               .item = {                                                       \
+                       .offset = _offset,                                      \
+                       .shift = _shift,                                        \
+                       .size = {.bits = _size},                                \
+                       .name = #_element,                                      \
+               },                                                              \
+       }
+
+#define MLXSW_AFK_ELEMENT_INFO_U32(_element, _offset, _shift, _size)           \
+       MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_U32,                      \
+                              _element, _offset, _shift, _size)
+
+#define MLXSW_AFK_ELEMENT_INFO_BUF(_element, _offset, _size)                   \
+       MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF,                      \
+                              _element, _offset, 0, _size)
+
+/* For the purpose of the driver, define a internal storage scratchpad
+ * that will be used to store key/mask values. For each defined element type
+ * define an internal storage geometry.
+ */
+static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
+       MLXSW_AFK_ELEMENT_INFO_U32(SRC_SYS_PORT, 0x00, 16, 16),
+       MLXSW_AFK_ELEMENT_INFO_BUF(DMAC, 0x04, 6),
+       MLXSW_AFK_ELEMENT_INFO_BUF(SMAC, 0x0A, 6),
+       MLXSW_AFK_ELEMENT_INFO_U32(ETHERTYPE, 0x00, 0, 16),
+       MLXSW_AFK_ELEMENT_INFO_U32(IP_PROTO, 0x10, 0, 8),
+       MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32),
+       MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32),
+       MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8),
+       MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_LO, 0x20, 8),
+       MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_HI, 0x28, 8),
+       MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP6_LO, 0x30, 8),
+       MLXSW_AFK_ELEMENT_INFO_U32(DST_L4_PORT, 0x14, 0, 16),
+       MLXSW_AFK_ELEMENT_INFO_U32(SRC_L4_PORT, 0x14, 16, 16),
+};
+
+#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x38
+
+struct mlxsw_afk_element_inst { /* element instance in actual block */
+       const struct mlxsw_afk_element_info *info;
+       enum mlxsw_afk_element_type type;
+       struct mlxsw_item item; /* element geometry in block */
+};
+
+#define MLXSW_AFK_ELEMENT_INST(_type, _element, _offset, _shift, _size)                \
+       {                                                                       \
+               .info = &mlxsw_afk_element_infos[MLXSW_AFK_ELEMENT_##_element], \
+               .type = _type,                                                  \
+               .item = {                                                       \
+                       .offset = _offset,                                      \
+                       .shift = _shift,                                        \
+                       .size = {.bits = _size},                                \
+                       .name = #_element,                                      \
+               },                                                              \
+       }
+
+#define MLXSW_AFK_ELEMENT_INST_U32(_element, _offset, _shift, _size)           \
+       MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_U32,                      \
+                              _element, _offset, _shift, _size)
+
+#define MLXSW_AFK_ELEMENT_INST_BUF(_element, _offset, _size)                   \
+       MLXSW_AFK_ELEMENT_INST(MLXSW_AFK_ELEMENT_TYPE_BUF,                      \
+                              _element, _offset, 0, _size)
+
+struct mlxsw_afk_block {
+       u16 encoding; /* block ID */
+       struct mlxsw_afk_element_inst *instances;
+       unsigned int instances_count;
+};
+
+#define MLXSW_AFK_BLOCK(_encoding, _instances)                                 \
+       {                                                                       \
+               .encoding = _encoding,                                          \
+               .instances = _instances,                                        \
+               .instances_count = ARRAY_SIZE(_instances),                      \
+       }
+
+struct mlxsw_afk_element_usage {
+       DECLARE_BITMAP(usage, MLXSW_AFK_ELEMENT_MAX);
+};
+
+#define mlxsw_afk_element_usage_for_each(element, elusage)                     \
+       for_each_set_bit(element, (elusage)->usage, MLXSW_AFK_ELEMENT_MAX)
+
+static inline void
+mlxsw_afk_element_usage_add(struct mlxsw_afk_element_usage *elusage,
+                           enum mlxsw_afk_element element)
+{
+       __set_bit(element, elusage->usage);
+}
+
+static inline void
+mlxsw_afk_element_usage_zero(struct mlxsw_afk_element_usage *elusage)
+{
+       bitmap_zero(elusage->usage, MLXSW_AFK_ELEMENT_MAX);
+}
+
+static inline void
+mlxsw_afk_element_usage_fill(struct mlxsw_afk_element_usage *elusage,
+                            const enum mlxsw_afk_element *elements,
+                            unsigned int elements_count)
+{
+       int i;
+
+       mlxsw_afk_element_usage_zero(elusage);
+       for (i = 0; i < elements_count; i++)
+               mlxsw_afk_element_usage_add(elusage, elements[i]);
+}
+
+static inline bool
+mlxsw_afk_element_usage_subset(struct mlxsw_afk_element_usage *elusage_small,
+                              struct mlxsw_afk_element_usage *elusage_big)
+{
+       int i;
+
+       for (i = 0; i < MLXSW_AFK_ELEMENT_MAX; i++)
+               if (test_bit(i, elusage_small->usage) &&
+                   !test_bit(i, elusage_big->usage))
+                       return false;
+       return true;
+}
+
+struct mlxsw_afk;
+
+struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
+                                  const struct mlxsw_afk_block *blocks,
+                                  unsigned int blocks_count);
+void mlxsw_afk_destroy(struct mlxsw_afk *mlxsw_afk);
+
+struct mlxsw_afk_key_info;
+
+struct mlxsw_afk_key_info *
+mlxsw_afk_key_info_get(struct mlxsw_afk *mlxsw_afk,
+                      struct mlxsw_afk_element_usage *elusage);
+void mlxsw_afk_key_info_put(struct mlxsw_afk_key_info *key_info);
+bool mlxsw_afk_key_info_subset(struct mlxsw_afk_key_info *key_info,
+                              struct mlxsw_afk_element_usage *elusage);
+
+u16
+mlxsw_afk_key_info_block_encoding_get(const struct mlxsw_afk_key_info *key_info,
+                                     int block_index);
+unsigned int
+mlxsw_afk_key_info_blocks_count_get(const struct mlxsw_afk_key_info *key_info);
+
+struct mlxsw_afk_element_values {
+       struct mlxsw_afk_element_usage elusage;
+       struct {
+               char key[MLXSW_AFK_ELEMENT_STORAGE_SIZE];
+               char mask[MLXSW_AFK_ELEMENT_STORAGE_SIZE];
+       } storage;
+};
+
+void mlxsw_afk_values_add_u32(struct mlxsw_afk_element_values *values,
+                             enum mlxsw_afk_element element,
+                             u32 key_value, u32 mask_value);
+void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
+                             enum mlxsw_afk_element element,
+                             const char *key_value, const char *mask_value,
+                             unsigned int len);
+void mlxsw_afk_encode(struct mlxsw_afk_key_info *key_info,
+                     struct mlxsw_afk_element_values *values,
+                     char *key, char *mask);
+
+#endif
index e50c8db2602a807aea7c01d4782f690f88cd3629..12c3a44491203824ef4eaa51acdee5b91dfdb194 100644 (file)
@@ -338,7 +338,7 @@ mlxsw_i2c_write(struct device *dev, size_t in_mbox_size, u8 *in_mbox, int num,
                return -EIO;
        }
 
-       return err > 0 ? 0 : err;
+       return 0;
 }
 
 /* Routine executes I2C command. */
index 3c95e3ddd9c25c18ed0daff92518194652e2d2af..28427f0758c7ed3ddb35870aee9e450cc451151a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * drivers/net/ethernet/mellanox/mlxsw/item.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
  *
  * Redistribution and use in source and binary forms, with or without
@@ -72,6 +72,40 @@ __mlxsw_item_offset(const struct mlxsw_item *item, unsigned short index,
                typesize);
 }
 
+static inline u8 __mlxsw_item_get8(const char *buf,
+                                  const struct mlxsw_item *item,
+                                  unsigned short index)
+{
+       unsigned int offset = __mlxsw_item_offset(item, index, sizeof(u8));
+       u8 *b = (u8 *) buf;
+       u8 tmp;
+
+       tmp = b[offset];
+       tmp >>= item->shift;
+       tmp &= GENMASK(item->size.bits - 1, 0);
+       if (item->no_real_shift)
+               tmp <<= item->shift;
+       return tmp;
+}
+
+static inline void __mlxsw_item_set8(char *buf, const struct mlxsw_item *item,
+                                    unsigned short index, u8 val)
+{
+       unsigned int offset = __mlxsw_item_offset(item, index,
+                                                 sizeof(u8));
+       u8 *b = (u8 *) buf;
+       u8 mask = GENMASK(item->size.bits - 1, 0) << item->shift;
+       u8 tmp;
+
+       if (!item->no_real_shift)
+               val <<= item->shift;
+       val &= mask;
+       tmp = b[offset];
+       tmp &= ~mask;
+       tmp |= val;
+       b[offset] = tmp;
+}
+
 static inline u16 __mlxsw_item_get16(const char *buf,
                                     const struct mlxsw_item *item,
                                     unsigned short index)
@@ -191,6 +225,14 @@ static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
        memcpy(&buf[offset], src, item->size.bytes);
 }
 
+static inline char *__mlxsw_item_data(char *buf, const struct mlxsw_item *item,
+                                     unsigned short index)
+{
+       unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
+
+       return &buf[offset];
+}
+
 static inline u16
 __mlxsw_item_bit_array_offset(const struct mlxsw_item *item,
                              u16 index, u8 *shift)
@@ -253,6 +295,47 @@ static inline void __mlxsw_item_bit_array_set(char *buf,
  * _iname: item name within the container
  */
 
+#define MLXSW_ITEM8(_type, _cname, _iname, _offset, _shift, _sizebits)         \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .shift = _shift,                                                        \
+       .size = {.bits = _sizebits,},                                           \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u8 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf)    \
+{                                                                              \
+       return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0);  \
+}                                                                              \
+static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val)\
+{                                                                              \
+       __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val);    \
+}
+
+#define MLXSW_ITEM8_INDEXED(_type, _cname, _iname, _offset, _shift, _sizebits, \
+                           _step, _instepoffset, _norealshift)                 \
+static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
+       .offset = _offset,                                                      \
+       .step = _step,                                                          \
+       .in_step_offset = _instepoffset,                                        \
+       .shift = _shift,                                                        \
+       .no_real_shift = _norealshift,                                          \
+       .size = {.bits = _sizebits,},                                           \
+       .name = #_type "_" #_cname "_" #_iname,                                 \
+};                                                                             \
+static inline u8                                                               \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
+{                                                                              \
+       return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname),      \
+                                index);                                        \
+}                                                                              \
+static inline void                                                             \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index,     \
+                                         u8 val)                               \
+{                                                                              \
+       __mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname),             \
+                         index, val);                                          \
+}
+
 #define MLXSW_ITEM16(_type, _cname, _iname, _offset, _shift, _sizebits)                \
 static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                        \
        .offset = _offset,                                                      \
@@ -393,6 +476,11 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src)        \
 {                                                                              \
        __mlxsw_item_memcpy_to(buf, src,                                        \
                               &__ITEM_NAME(_type, _cname, _iname), 0);         \
+}                                                                              \
+static inline char *                                                           \
+mlxsw_##_type##_##_cname##_##_iname##_data(char *buf)                          \
+{                                                                              \
+       return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0);  \
 }
 
 #define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes,     \
@@ -419,6 +507,12 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf,                 \
 {                                                                              \
        __mlxsw_item_memcpy_to(buf, src,                                        \
                               &__ITEM_NAME(_type, _cname, _iname), index);     \
+}                                                                              \
+static inline char *                                                           \
+mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index)    \
+{                                                                              \
+       return __mlxsw_item_data(buf,                                           \
+                                &__ITEM_NAME(_type, _cname, _iname), index);   \
 }
 
 #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes,       \
index 1357fe04391bbf89b5604af346b411a329cf6d6b..0899e2d310e26269a5c3d025b7afeeb1516bf21e 100644 (file)
@@ -1,9 +1,9 @@
 /*
  * drivers/net/ethernet/mellanox/mlxsw/reg.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
  * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
  * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
  *
  * Redistribution and use in source and binary forms, with or without
@@ -1757,6 +1757,505 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
        }
 }
 
+/* PPBT - Policy-Engine Port Binding Table
+ * ---------------------------------------
+ * This register is used for configuration of the Port Binding Table.
+ */
+#define MLXSW_REG_PPBT_ID 0x3002
+#define MLXSW_REG_PPBT_LEN 0x14
+
+MLXSW_REG_DEFINE(ppbt, MLXSW_REG_PPBT_ID, MLXSW_REG_PPBT_LEN);
+
+enum mlxsw_reg_pxbt_e {
+       MLXSW_REG_PXBT_E_IACL,
+       MLXSW_REG_PXBT_E_EACL,
+};
+
+/* reg_ppbt_e
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppbt, e, 0x00, 31, 1);
+
+enum mlxsw_reg_pxbt_op {
+       MLXSW_REG_PXBT_OP_BIND,
+       MLXSW_REG_PXBT_OP_UNBIND,
+};
+
+/* reg_ppbt_op
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppbt, op, 0x00, 28, 3);
+
+/* reg_ppbt_local_port
+ * Local port. Not including CPU port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppbt, local_port, 0x00, 16, 8);
+
+/* reg_ppbt_g
+ * group - When set, the binding is of an ACL group. When cleared,
+ * the binding is of an ACL.
+ * Must be set to 1 for Spectrum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppbt, g, 0x10, 31, 1);
+
+/* reg_ppbt_acl_info
+ * ACL/ACL group identifier. If the g bit is set, this field should hold
+ * the acl_group_id, else it should hold the acl_id.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppbt, acl_info, 0x10, 0, 16);
+
+static inline void mlxsw_reg_ppbt_pack(char *payload, enum mlxsw_reg_pxbt_e e,
+                                      enum mlxsw_reg_pxbt_op op,
+                                      u8 local_port, u16 acl_info)
+{
+       MLXSW_REG_ZERO(ppbt, payload);
+       mlxsw_reg_ppbt_e_set(payload, e);
+       mlxsw_reg_ppbt_op_set(payload, op);
+       mlxsw_reg_ppbt_local_port_set(payload, local_port);
+       mlxsw_reg_ppbt_g_set(payload, true);
+       mlxsw_reg_ppbt_acl_info_set(payload, acl_info);
+}
+
+/* PACL - Policy-Engine ACL Register
+ * ---------------------------------
+ * This register is used for configuration of the ACL.
+ */
+#define MLXSW_REG_PACL_ID 0x3004
+#define MLXSW_REG_PACL_LEN 0x70
+
+MLXSW_REG_DEFINE(pacl, MLXSW_REG_PACL_ID, MLXSW_REG_PACL_LEN);
+
+/* reg_pacl_v
+ * Valid. Setting the v bit makes the ACL valid. It should not be cleared
+ * while the ACL is bounded to either a port, VLAN or ACL rule.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pacl, v, 0x00, 24, 1);
+
+/* reg_pacl_acl_id
+ * An identifier representing the ACL (managed by software)
+ * Range 0 .. cap_max_acl_regions - 1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pacl, acl_id, 0x08, 0, 16);
+
+#define MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN 16
+
+/* reg_pacl_tcam_region_info
+ * Opaque object that represents a TCAM region.
+ * Obtained through PTAR register.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, pacl, tcam_region_info, 0x30,
+              MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+static inline void mlxsw_reg_pacl_pack(char *payload, u16 acl_id,
+                                      bool valid, const char *tcam_region_info)
+{
+       MLXSW_REG_ZERO(pacl, payload);
+       mlxsw_reg_pacl_acl_id_set(payload, acl_id);
+       mlxsw_reg_pacl_v_set(payload, valid);
+       mlxsw_reg_pacl_tcam_region_info_memcpy_to(payload, tcam_region_info);
+}
+
+/* PAGT - Policy-Engine ACL Group Table
+ * ------------------------------------
+ * This register is used for configuration of the ACL Group Table.
+ */
+#define MLXSW_REG_PAGT_ID 0x3005
+#define MLXSW_REG_PAGT_BASE_LEN 0x30
+#define MLXSW_REG_PAGT_ACL_LEN 4
+#define MLXSW_REG_PAGT_ACL_MAX_NUM 16
+#define MLXSW_REG_PAGT_LEN (MLXSW_REG_PAGT_BASE_LEN + \
+               MLXSW_REG_PAGT_ACL_MAX_NUM * MLXSW_REG_PAGT_ACL_LEN)
+
+MLXSW_REG_DEFINE(pagt, MLXSW_REG_PAGT_ID, MLXSW_REG_PAGT_LEN);
+
+/* reg_pagt_size
+ * Number of ACLs in the group.
+ * Size 0 invalidates a group.
+ * Range 0 .. cap_max_acl_group_size (hard coded to 16 for now)
+ * Total number of ACLs in all groups must be lower or equal
+ * to cap_max_acl_tot_groups
+ * Note: a group which is binded must not be invalidated
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pagt, size, 0x00, 0, 8);
+
+/* reg_pagt_acl_group_id
+ * An identifier (numbered from 0..cap_max_acl_groups-1) representing
+ * the ACL Group identifier (managed by software).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pagt, acl_group_id, 0x08, 0, 16);
+
+/* reg_pagt_acl_id
+ * ACL identifier
+ * Access: RW
+ */
+MLXSW_ITEM32_INDEXED(reg, pagt, acl_id, 0x30, 0, 16, 0x04, 0x00, false);
+
+static inline void mlxsw_reg_pagt_pack(char *payload, u16 acl_group_id)
+{
+       MLXSW_REG_ZERO(pagt, payload);
+       mlxsw_reg_pagt_acl_group_id_set(payload, acl_group_id);
+}
+
+static inline void mlxsw_reg_pagt_acl_id_pack(char *payload, int index,
+                                             u16 acl_id)
+{
+       u8 size = mlxsw_reg_pagt_size_get(payload);
+
+       if (index >= size)
+               mlxsw_reg_pagt_size_set(payload, index + 1);
+       mlxsw_reg_pagt_acl_id_set(payload, index, acl_id);
+}
+
+/* PTAR - Policy-Engine TCAM Allocation Register
+ * ---------------------------------------------
+ * This register is used for allocation of regions in the TCAM.
+ * Note: Query method is not supported on this register.
+ */
+#define MLXSW_REG_PTAR_ID 0x3006
+#define MLXSW_REG_PTAR_BASE_LEN 0x20
+#define MLXSW_REG_PTAR_KEY_ID_LEN 1
+#define MLXSW_REG_PTAR_KEY_ID_MAX_NUM 16
+#define MLXSW_REG_PTAR_LEN (MLXSW_REG_PTAR_BASE_LEN + \
+               MLXSW_REG_PTAR_KEY_ID_MAX_NUM * MLXSW_REG_PTAR_KEY_ID_LEN)
+
+MLXSW_REG_DEFINE(ptar, MLXSW_REG_PTAR_ID, MLXSW_REG_PTAR_LEN);
+
+enum mlxsw_reg_ptar_op {
+       /* allocate a TCAM region */
+       MLXSW_REG_PTAR_OP_ALLOC,
+       /* resize a TCAM region */
+       MLXSW_REG_PTAR_OP_RESIZE,
+       /* deallocate TCAM region */
+       MLXSW_REG_PTAR_OP_FREE,
+       /* test allocation */
+       MLXSW_REG_PTAR_OP_TEST,
+};
+
+/* reg_ptar_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ptar, op, 0x00, 28, 4);
+
+/* reg_ptar_action_set_type
+ * Type of action set to be used on this region.
+ * For Spectrum, this is always type 2 - "flexible"
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ptar, action_set_type, 0x00, 16, 8);
+
+/* reg_ptar_key_type
+ * TCAM key type for the region.
+ * For Spectrum, this is always type 0x50 - "FLEX_KEY"
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ptar, key_type, 0x00, 0, 8);
+
+/* reg_ptar_region_size
+ * TCAM region size. When allocating/resizing this is the requested size,
+ * the response is the actual size. Note that actual size may be
+ * larger than requested.
+ * Allowed range 1 .. cap_max_rules-1
+ * Reserved during op deallocate.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ptar, region_size, 0x04, 0, 16);
+
+/* reg_ptar_region_id
+ * Region identifier
+ * Range 0 .. cap_max_regions-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptar, region_id, 0x08, 0, 16);
+
+/* reg_ptar_tcam_region_info
+ * Opaque object that represents the TCAM region.
+ * Returned when allocating a region.
+ * Provided by software for ACL generation and region deallocation and resize.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ptar, tcam_region_info, 0x10,
+              MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+/* reg_ptar_flexible_key_id
+ * Identifier of the Flexible Key.
+ * Only valid if key_type == "FLEX_KEY"
+ * The key size will be rounded up to one of the following values:
+ * 9B, 18B, 36B, 54B.
+ * This field is reserved for in resize operation.
+ * Access: WO
+ */
+MLXSW_ITEM8_INDEXED(reg, ptar, flexible_key_id, 0x20, 0, 8,
+                   MLXSW_REG_PTAR_KEY_ID_LEN, 0x00, false);
+
+static inline void mlxsw_reg_ptar_pack(char *payload, enum mlxsw_reg_ptar_op op,
+                                      u16 region_size, u16 region_id,
+                                      const char *tcam_region_info)
+{
+       MLXSW_REG_ZERO(ptar, payload);
+       mlxsw_reg_ptar_op_set(payload, op);
+       mlxsw_reg_ptar_action_set_type_set(payload, 2); /* "flexible" */
+       mlxsw_reg_ptar_key_type_set(payload, 0x50); /* "FLEX_KEY" */
+       mlxsw_reg_ptar_region_size_set(payload, region_size);
+       mlxsw_reg_ptar_region_id_set(payload, region_id);
+       mlxsw_reg_ptar_tcam_region_info_memcpy_to(payload, tcam_region_info);
+}
+
+static inline void mlxsw_reg_ptar_key_id_pack(char *payload, int index,
+                                             u16 key_id)
+{
+       mlxsw_reg_ptar_flexible_key_id_set(payload, index, key_id);
+}
+
+static inline void mlxsw_reg_ptar_unpack(char *payload, char *tcam_region_info)
+{
+       mlxsw_reg_ptar_tcam_region_info_memcpy_from(payload, tcam_region_info);
+}
+
+/* PPBS - Policy-Engine Policy Based Switching Register
+ * ----------------------------------------------------
+ * This register retrieves and sets Policy Based Switching Table entries.
+ */
+#define MLXSW_REG_PPBS_ID 0x300C
+#define MLXSW_REG_PPBS_LEN 0x14
+
+MLXSW_REG_DEFINE(ppbs, MLXSW_REG_PPBS_ID, MLXSW_REG_PPBS_LEN);
+
+/* reg_ppbs_pbs_ptr
+ * Index into the PBS table.
+ * For Spectrum, the index points to the KVD Linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ppbs, pbs_ptr, 0x08, 0, 24);
+
+/* reg_ppbs_system_port
+ * Unique port identifier for the final destination of the packet.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ppbs, system_port, 0x10, 0, 16);
+
+static inline void mlxsw_reg_ppbs_pack(char *payload, u32 pbs_ptr,
+                                      u16 system_port)
+{
+       MLXSW_REG_ZERO(ppbs, payload);
+       mlxsw_reg_ppbs_pbs_ptr_set(payload, pbs_ptr);
+       mlxsw_reg_ppbs_system_port_set(payload, system_port);
+}
+
+/* PRCR - Policy-Engine Rules Copy Register
+ * ----------------------------------------
+ * This register is used for accessing rules within a TCAM region.
+ */
+#define MLXSW_REG_PRCR_ID 0x300D
+#define MLXSW_REG_PRCR_LEN 0x40
+
+MLXSW_REG_DEFINE(prcr, MLXSW_REG_PRCR_ID, MLXSW_REG_PRCR_LEN);
+
+enum mlxsw_reg_prcr_op {
+       /* Move rules. Moves the rules from "tcam_region_info" starting
+        * at offset "offset" to "dest_tcam_region_info"
+        * at offset "dest_offset."
+        */
+       MLXSW_REG_PRCR_OP_MOVE,
+       /* Copy rules. Copies the rules from "tcam_region_info" starting
+        * at offset "offset" to "dest_tcam_region_info"
+        * at offset "dest_offset."
+        */
+       MLXSW_REG_PRCR_OP_COPY,
+};
+
+/* reg_prcr_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, prcr, op, 0x00, 28, 4);
+
+/* reg_prcr_offset
+ * Offset within the source region to copy/move from.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, prcr, offset, 0x00, 0, 16);
+
+/* reg_prcr_size
+ * The number of rules to copy/move.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, prcr, size, 0x04, 0, 16);
+
+/* reg_prcr_tcam_region_info
+ * Opaque object that represents the source TCAM region.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, prcr, tcam_region_info, 0x10,
+              MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+/* reg_prcr_dest_offset
+ * Offset within the source region to copy/move to.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, prcr, dest_offset, 0x20, 0, 16);
+
+/* reg_prcr_dest_tcam_region_info
+ * Opaque object that represents the destination TCAM region.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, prcr, dest_tcam_region_info, 0x30,
+              MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+static inline void mlxsw_reg_prcr_pack(char *payload, enum mlxsw_reg_prcr_op op,
+                                      const char *src_tcam_region_info,
+                                      u16 src_offset,
+                                      const char *dest_tcam_region_info,
+                                      u16 dest_offset, u16 size)
+{
+       MLXSW_REG_ZERO(prcr, payload);
+       mlxsw_reg_prcr_op_set(payload, op);
+       mlxsw_reg_prcr_offset_set(payload, src_offset);
+       mlxsw_reg_prcr_size_set(payload, size);
+       mlxsw_reg_prcr_tcam_region_info_memcpy_to(payload,
+                                                 src_tcam_region_info);
+       mlxsw_reg_prcr_dest_offset_set(payload, dest_offset);
+       mlxsw_reg_prcr_dest_tcam_region_info_memcpy_to(payload,
+                                                      dest_tcam_region_info);
+}
+
+/* PEFA - Policy-Engine Extended Flexible Action Register
+ * ------------------------------------------------------
+ * This register is used for accessing an extended flexible action entry
+ * in the central KVD Linear Database.
+ */
+#define MLXSW_REG_PEFA_ID 0x300F
+#define MLXSW_REG_PEFA_LEN 0xB0
+
+MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN);
+
+/* reg_pefa_index
+ * Index in the KVD Linear Centralized Database.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24);
+
+#define MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN 0xA8
+
+/* reg_pefa_flex_action_set
+ * Action-set to perform when rule is matched.
+ * Must be zero padded if action set is shorter.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08,
+              MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN);
+
+static inline void mlxsw_reg_pefa_pack(char *payload, u32 index,
+                                      const char *flex_action_set)
+{
+       MLXSW_REG_ZERO(pefa, payload);
+       mlxsw_reg_pefa_index_set(payload, index);
+       mlxsw_reg_pefa_flex_action_set_memcpy_to(payload, flex_action_set);
+}
+
+/* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2
+ * -----------------------------------------------------
+ * This register is used for accessing rules within a TCAM region.
+ * It is a new version of PTCE in order to support wider key,
+ * mask and action within a TCAM region. This register is not supported
+ * by SwitchX and SwitchX-2.
+ */
+#define MLXSW_REG_PTCE2_ID 0x3017
+#define MLXSW_REG_PTCE2_LEN 0x1D8
+
+MLXSW_REG_DEFINE(ptce2, MLXSW_REG_PTCE2_ID, MLXSW_REG_PTCE2_LEN);
+
+/* reg_ptce2_v
+ * Valid.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ptce2, v, 0x00, 31, 1);
+
+/* reg_ptce2_a
+ * Activity. Set if a packet lookup has hit on the specific entry.
+ * To clear the "a" bit, use "clear activity" op or "clear on read" op.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ptce2, a, 0x00, 30, 1);
+
+enum mlxsw_reg_ptce2_op {
+       /* Read operation. */
+       MLXSW_REG_PTCE2_OP_QUERY_READ = 0,
+       /* clear on read operation. Used to read entry
+        * and clear Activity bit.
+        */
+       MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ = 1,
+       /* Write operation. Used to write a new entry to the table.
+        * All R/W fields are relevant for new entry. Activity bit is set
+        * for new entries - Note write with v = 0 will delete the entry.
+        */
+       MLXSW_REG_PTCE2_OP_WRITE_WRITE = 0,
+       /* Update action. Only action set will be updated. */
+       MLXSW_REG_PTCE2_OP_WRITE_UPDATE = 1,
+       /* Clear activity. A bit is cleared for the entry. */
+       MLXSW_REG_PTCE2_OP_WRITE_CLEAR_ACTIVITY = 2,
+};
+
+/* reg_ptce2_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ptce2, op, 0x00, 20, 3);
+
+/* reg_ptce2_offset
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ptce2, offset, 0x00, 0, 16);
+
+/* reg_ptce2_tcam_region_info
+ * Opaque object that represents the TCAM region.
+ * Access: Index
+ */
+MLXSW_ITEM_BUF(reg, ptce2, tcam_region_info, 0x10,
+              MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN);
+
+#define MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN 96
+
+/* reg_ptce2_flex_key_blocks
+ * ACL Key.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ptce2, flex_key_blocks, 0x20,
+              MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN);
+
+/* reg_ptce2_mask
+ * mask- in the same size as key. A bit that is set directs the TCAM
+ * to compare the corresponding bit in key. A bit that is clear directs
+ * the TCAM to ignore the corresponding bit in key.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80,
+              MLXSW_REG_PTCE2_FLEX_KEY_BLOCKS_LEN);
+
+/* reg_ptce2_flex_action_set
+ * ACL action set.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0,
+              MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN);
+
+static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid,
+                                       enum mlxsw_reg_ptce2_op op,
+                                       const char *tcam_region_info,
+                                       u16 offset)
+{
+       MLXSW_REG_ZERO(ptce2, payload);
+       mlxsw_reg_ptce2_v_set(payload, valid);
+       mlxsw_reg_ptce2_op_set(payload, op);
+       mlxsw_reg_ptce2_offset_set(payload, offset);
+       mlxsw_reg_ptce2_tcam_region_info_memcpy_to(payload, tcam_region_info);
+}
+
 /* QPCR - QoS Policer Configuration Register
  * -----------------------------------------
  * The QPCR register is used to create policers - that limit
@@ -3154,7 +3653,7 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
  * Configures the properties for forwarding to CPU.
  */
 #define MLXSW_REG_HTGT_ID 0x7002
-#define MLXSW_REG_HTGT_LEN 0x100
+#define MLXSW_REG_HTGT_LEN 0x20
 
 MLXSW_REG_DEFINE(htgt, MLXSW_REG_HTGT_ID, MLXSW_REG_HTGT_LEN);
 
@@ -4965,6 +5464,46 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
                                           MLXSW_REG_MLCR_DURATION_MAX : 0);
 }
 
+/* MPSC - Monitoring Packet Sampling Configuration Register
+ * --------------------------------------------------------
+ * MPSC Register is used to configure the Packet Sampling mechanism.
+ */
+#define MLXSW_REG_MPSC_ID 0x9080
+#define MLXSW_REG_MPSC_LEN 0x1C
+
+MLXSW_REG_DEFINE(mpsc, MLXSW_REG_MPSC_ID, MLXSW_REG_MPSC_LEN);
+
+/* reg_mpsc_local_port
+ * Local port number
+ * Not supported for CPU port
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mpsc, local_port, 0x00, 16, 8);
+
+/* reg_mpsc_e
+ * Enable sampling on port local_port
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpsc, e, 0x04, 30, 1);
+
+#define MLXSW_REG_MPSC_RATE_MAX 3500000000UL
+
+/* reg_mpsc_rate
+ * Sampling rate = 1 out of rate packets (with randomization around
+ * the point). Valid values are: 1 to MLXSW_REG_MPSC_RATE_MAX
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpsc, rate, 0x08, 0, 32);
+
+static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e,
+                                      u32 rate)
+{
+       MLXSW_REG_ZERO(mpsc, payload);
+       mlxsw_reg_mpsc_local_port_set(payload, local_port);
+       mlxsw_reg_mpsc_e_set(payload, e);
+       mlxsw_reg_mpsc_rate_set(payload, rate);
+}
+
 /* SBPR - Shared Buffer Pools Register
  * -----------------------------------
  * The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -5394,6 +5933,14 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
        MLXSW_REG(svpe),
        MLXSW_REG(sfmr),
        MLXSW_REG(spvmlr),
+       MLXSW_REG(ppbt),
+       MLXSW_REG(pacl),
+       MLXSW_REG(pagt),
+       MLXSW_REG(ptar),
+       MLXSW_REG(ppbs),
+       MLXSW_REG(prcr),
+       MLXSW_REG(pefa),
+       MLXSW_REG(ptce2),
        MLXSW_REG(qpcr),
        MLXSW_REG(qtct),
        MLXSW_REG(qeec),
@@ -5429,6 +5976,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
        MLXSW_REG(mpat),
        MLXSW_REG(mpar),
        MLXSW_REG(mlcr),
+       MLXSW_REG(mpsc),
        MLXSW_REG(sbpr),
        MLXSW_REG(sbcm),
        MLXSW_REG(sbpm),
index 3c2171dbdba4e0a9abc53ecc7cdc80530e5ffc32..bce8c2e006302db45ce4eaedf8b3a368ec2660c4 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * drivers/net/ethernet/mellanox/mlxsw/resources.h
- * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017 Jiri Pirko <jiri@mellanox.com>
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -48,6 +48,14 @@ enum mlxsw_res_id {
        MLXSW_RES_ID_MAX_LAG,
        MLXSW_RES_ID_MAX_LAG_MEMBERS,
        MLXSW_RES_ID_MAX_BUFFER_SIZE,
+       MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS,
+       MLXSW_RES_ID_ACL_MAX_TCAM_RULES,
+       MLXSW_RES_ID_ACL_MAX_REGIONS,
+       MLXSW_RES_ID_ACL_MAX_GROUPS,
+       MLXSW_RES_ID_ACL_MAX_GROUP_SIZE,
+       MLXSW_RES_ID_ACL_FLEX_KEYS,
+       MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE,
+       MLXSW_RES_ID_ACL_ACTIONS_PER_SET,
        MLXSW_RES_ID_MAX_CPU_POLICERS,
        MLXSW_RES_ID_MAX_VRS,
        MLXSW_RES_ID_MAX_RIFS,
@@ -72,6 +80,14 @@ static u16 mlxsw_res_ids[] = {
        [MLXSW_RES_ID_MAX_LAG] = 0x2520,
        [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
        [MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802,        /* Bytes */
+       [MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901,
+       [MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902,
+       [MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903,
+       [MLXSW_RES_ID_ACL_MAX_GROUPS] = 0x2904,
+       [MLXSW_RES_ID_ACL_MAX_GROUP_SIZE] = 0x2905,
+       [MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910,
+       [MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911,
+       [MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912,
        [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
        [MLXSW_RES_ID_MAX_VRS] = 0x2C01,
        [MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
index 003093abb1707f0a6cdefe465e073c005d2ee3f9..16484f24b7dbbaa2fe10170bd7cb46fee9832938 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
  *
@@ -57,6 +57,7 @@
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_mirred.h>
 #include <net/netevent.h>
+#include <net/tc_act/tc_sample.h>
 
 #include "spectrum.h"
 #include "pci.h"
@@ -137,8 +138,6 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
  */
 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
 
-static bool mlxsw_sp_port_dev_check(const struct net_device *dev);
-
 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
                                     const struct mlxsw_tx_info *tx_info)
 {
@@ -469,6 +468,16 @@ static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
        mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
 }
 
+static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                   bool enable, u32 rate)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char mpsc_pl[MLXSW_REG_MPSC_LEN];
+
+       mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
+}
+
 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
                                          bool is_up)
 {
@@ -948,15 +957,13 @@ out:
 /* Return the stats from a cache that is updated periodically,
  * as this function might get called in an atomic context.
  */
-static struct rtnl_link_stats64 *
+static void
 mlxsw_sp_port_get_stats64(struct net_device *dev,
                          struct rtnl_link_stats64 *stats)
 {
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 
        memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
-
-       return stats;
 }
 
 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
@@ -1164,8 +1171,8 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
 }
 
 static struct mlxsw_sp_port_mall_tc_entry *
-mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port,
-                               unsigned long cookie) {
+mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
+                                unsigned long cookie) {
        struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
 
        list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
@@ -1177,17 +1184,15 @@ mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port,
 
 static int
 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
-                                     struct tc_cls_matchall_offload *cls,
+                                     struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
                                      const struct tc_action *a,
                                      bool ingress)
 {
-       struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
        struct net *net = dev_net(mlxsw_sp_port->dev);
        enum mlxsw_sp_span_type span_type;
        struct mlxsw_sp_port *to_port;
        struct net_device *to_dev;
        int ifindex;
-       int err;
 
        ifindex = tcf_mirred_ifindex(a);
        to_dev = __dev_get_by_index(net, ifindex);
@@ -1198,90 +1203,149 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
 
        if (!mlxsw_sp_port_dev_check(to_dev)) {
                netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        }
        to_port = netdev_priv(to_dev);
 
-       mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
-       if (!mall_tc_entry)
-               return -ENOMEM;
+       mirror->to_local_port = to_port->local_port;
+       mirror->ingress = ingress;
+       span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+       return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
+}
 
-       mall_tc_entry->cookie = cls->cookie;
-       mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
-       mall_tc_entry->mirror.to_local_port = to_port->local_port;
-       mall_tc_entry->mirror.ingress = ingress;
-       list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
+static void
+mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
+                                     struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       enum mlxsw_sp_span_type span_type;
+       struct mlxsw_sp_port *to_port;
 
-       span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
-       err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
+       to_port = mlxsw_sp->ports[mirror->to_local_port];
+       span_type = mirror->ingress ?
+                       MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+       mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
+}
+
+static int
+mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
+                                     struct tc_cls_matchall_offload *cls,
+                                     const struct tc_action *a,
+                                     bool ingress)
+{
+       int err;
+
+       if (!mlxsw_sp_port->sample)
+               return -EOPNOTSUPP;
+       if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
+               netdev_err(mlxsw_sp_port->dev, "sample already active\n");
+               return -EEXIST;
+       }
+       if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
+               netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
+               return -EOPNOTSUPP;
+       }
+
+       rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
+                          tcf_sample_psample_group(a));
+       mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
+       mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
+       mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
+
+       err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
        if (err)
-               goto err_mirror_add;
+               goto err_port_sample_set;
        return 0;
 
-err_mirror_add:
-       list_del(&mall_tc_entry->list);
-       kfree(mall_tc_entry);
+err_port_sample_set:
+       RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
        return err;
 }
 
+static void
+mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       if (!mlxsw_sp_port->sample)
+               return;
+
+       mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
+       RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
+}
+
 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
                                          __be16 protocol,
                                          struct tc_cls_matchall_offload *cls,
                                          bool ingress)
 {
+       struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
        const struct tc_action *a;
        LIST_HEAD(actions);
        int err;
 
        if (!tc_single_action(cls->exts)) {
                netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        }
 
-       tcf_exts_to_list(cls->exts, &actions);
-       list_for_each_entry(a, &actions, list) {
-               if (!is_tcf_mirred_egress_mirror(a) ||
-                   protocol != htons(ETH_P_ALL)) {
-                       return -ENOTSUPP;
-               }
+       mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
+       if (!mall_tc_entry)
+               return -ENOMEM;
+       mall_tc_entry->cookie = cls->cookie;
 
-               err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls,
+       tcf_exts_to_list(cls->exts, &actions);
+       a = list_first_entry(&actions, struct tc_action, list);
+
+       if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
+               struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
+
+               mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
+               mirror = &mall_tc_entry->mirror;
+               err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
+                                                           mirror, a, ingress);
+       } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
+               mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
+               err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls,
                                                            a, ingress);
-               if (err)
-                       return err;
+       } else {
+               err = -EOPNOTSUPP;
        }
 
+       if (err)
+               goto err_add_action;
+
+       list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
        return 0;
+
+err_add_action:
+       kfree(mall_tc_entry);
+       return err;
 }
 
 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
                                           struct tc_cls_matchall_offload *cls)
 {
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
-       enum mlxsw_sp_span_type span_type;
-       struct mlxsw_sp_port *to_port;
 
-       mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port,
-                                                       cls->cookie);
+       mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
+                                                        cls->cookie);
        if (!mall_tc_entry) {
                netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
                return;
        }
+       list_del(&mall_tc_entry->list);
 
        switch (mall_tc_entry->type) {
        case MLXSW_SP_PORT_MALL_MIRROR:
-               to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port];
-               span_type = mall_tc_entry->mirror.ingress ?
-                               MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
-
-               mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
+               mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
+                                                     &mall_tc_entry->mirror);
+               break;
+       case MLXSW_SP_PORT_MALL_SAMPLE:
+               mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
                break;
        default:
                WARN_ON(1);
        }
 
-       list_del(&mall_tc_entry->list);
        kfree(mall_tc_entry);
 }
 
@@ -1291,7 +1355,8 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
        bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
 
-       if (tc->type == TC_SETUP_MATCHALL) {
+       switch (tc->type) {
+       case TC_SETUP_MATCHALL:
                switch (tc->cls_mall->command) {
                case TC_CLSMATCHALL_REPLACE:
                        return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
@@ -1305,9 +1370,21 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
                default:
                        return -EINVAL;
                }
+       case TC_SETUP_CLSFLOWER:
+               switch (tc->cls_flower->command) {
+               case TC_CLSFLOWER_REPLACE:
+                       return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress,
+                                                      proto, tc->cls_flower);
+               case TC_CLSFLOWER_DESTROY:
+                       mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress,
+                                               tc->cls_flower);
+                       return 0;
+               default:
+                       return -EOPNOTSUPP;
+               }
        }
 
-       return -ENOTSUPP;
+       return -EOPNOTSUPP;
 }
 
 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
@@ -1323,8 +1400,6 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
        .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
        .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
        .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
-       .ndo_neigh_construct    = mlxsw_sp_router_neigh_construct,
-       .ndo_neigh_destroy      = mlxsw_sp_router_neigh_destroy,
        .ndo_fdb_add            = switchdev_port_fdb_add,
        .ndo_fdb_del            = switchdev_port_fdb_del,
        .ndo_fdb_dump           = switchdev_port_fdb_dump,
@@ -1650,7 +1725,7 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
                break;
        default:
                WARN_ON(1);
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        }
        return 0;
 }
@@ -2256,6 +2331,13 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
                goto err_alloc_stats;
        }
 
+       mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
+                                       GFP_KERNEL);
+       if (!mlxsw_sp_port->sample) {
+               err = -ENOMEM;
+               goto err_alloc_sample;
+       }
+
        mlxsw_sp_port->hw_stats.cache =
                kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
 
@@ -2384,6 +2466,8 @@ err_dev_addr_init:
 err_port_swid_set:
        kfree(mlxsw_sp_port->hw_stats.cache);
 err_alloc_hw_stats:
+       kfree(mlxsw_sp_port->sample);
+err_alloc_sample:
        free_percpu(mlxsw_sp_port->pcpu_stats);
 err_alloc_stats:
        kfree(mlxsw_sp_port->untagged_vlans);
@@ -2429,8 +2513,9 @@ static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
        mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
        mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
        mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
-       free_percpu(mlxsw_sp_port->pcpu_stats);
        kfree(mlxsw_sp_port->hw_stats.cache);
+       kfree(mlxsw_sp_port->sample);
+       free_percpu(mlxsw_sp_port->pcpu_stats);
        kfree(mlxsw_sp_port->untagged_vlans);
        kfree(mlxsw_sp_port->active_vlans);
        WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
@@ -2731,6 +2816,41 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
        return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
 }
 
+static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
+                                            void *priv)
+{
+       struct mlxsw_sp *mlxsw_sp = priv;
+       struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+       struct psample_group *psample_group;
+       u32 size;
+
+       if (unlikely(!mlxsw_sp_port)) {
+               dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
+                                    local_port);
+               goto out;
+       }
+       if (unlikely(!mlxsw_sp_port->sample)) {
+               dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
+                                    local_port);
+               goto out;
+       }
+
+       size = mlxsw_sp_port->sample->truncate ?
+                 mlxsw_sp_port->sample->trunc_size : skb->len;
+
+       rcu_read_lock();
+       psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
+       if (!psample_group)
+               goto out_unlock;
+       psample_sample_packet(psample_group, skb, size,
+                             mlxsw_sp_port->dev->ifindex, 0,
+                             mlxsw_sp_port->sample->rate);
+out_unlock:
+       rcu_read_unlock();
+out:
+       consume_skb(skb);
+}
+
 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
        MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
                  _is_ctrl, SP_##_trap_group, DISCARD)
@@ -2766,6 +2886,9 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
        MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
        MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
        MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
+       /* PKT Sample trap */
+       MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
+                 false, SP_IP2ME, DISCARD)
 };
 
 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
@@ -2950,10 +3073,16 @@ static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
        else
                table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
 
-       if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
+       switch (type) {
+       case MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST:
                flood_table = MLXSW_SP_FLOOD_TABLE_UC;
-       else
-               flood_table = MLXSW_SP_FLOOD_TABLE_BM;
+               break;
+       case MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4:
+               flood_table = MLXSW_SP_FLOOD_TABLE_MC;
+               break;
+       default:
+               flood_table = MLXSW_SP_FLOOD_TABLE_BC;
+       }
 
        mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
                            flood_table);
@@ -3089,6 +3218,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
                goto err_span_init;
        }
 
+       err = mlxsw_sp_acl_init(mlxsw_sp);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
+               goto err_acl_init;
+       }
+
        err = mlxsw_sp_ports_create(mlxsw_sp);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
@@ -3098,6 +3233,8 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
        return 0;
 
 err_ports_create:
+       mlxsw_sp_acl_fini(mlxsw_sp);
+err_acl_init:
        mlxsw_sp_span_fini(mlxsw_sp);
 err_span_init:
        mlxsw_sp_router_fini(mlxsw_sp);
@@ -3118,6 +3255,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
 
        mlxsw_sp_ports_remove(mlxsw_sp);
+       mlxsw_sp_acl_fini(mlxsw_sp);
        mlxsw_sp_span_fini(mlxsw_sp);
        mlxsw_sp_router_fini(mlxsw_sp);
        mlxsw_sp_switchdev_fini(mlxsw_sp);
@@ -3138,9 +3276,9 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
        .used_flood_tables              = 1,
        .used_flood_mode                = 1,
        .flood_mode                     = 3,
-       .max_fid_offset_flood_tables    = 2,
+       .max_fid_offset_flood_tables    = 3,
        .fid_offset_flood_table_size    = VLAN_N_VID - 1,
-       .max_fid_flood_tables           = 2,
+       .max_fid_flood_tables           = 3,
        .fid_flood_table_size           = MLXSW_SP_VFID_MAX,
        .used_max_ib_mc                 = 1,
        .max_ib_mc                      = 0,
@@ -3183,7 +3321,7 @@ static struct mlxsw_driver mlxsw_sp_driver = {
        .profile                        = &mlxsw_sp_config_profile,
 };
 
-static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
+bool mlxsw_sp_port_dev_check(const struct net_device *dev)
 {
        return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
 }
@@ -3341,6 +3479,8 @@ mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
        if (!r)
                return NULL;
 
+       INIT_LIST_HEAD(&r->nexthop_list);
+       INIT_LIST_HEAD(&r->neigh_list);
        ether_addr_copy(r->addr, l3_dev->dev_addr);
        r->mtu = l3_dev->mtu;
        r->ref_count = 1;
@@ -3409,6 +3549,8 @@ static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
        u16 fid = f->fid;
        u16 rif = r->rif;
 
+       mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
+
        mlxsw_sp->rifs[rif] = NULL;
        f->r = NULL;
 
@@ -3553,7 +3695,7 @@ static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
 
        table_type = mlxsw_sp_flood_table_type_get(fid);
        index = mlxsw_sp_flood_table_index_get(fid);
-       mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type,
+       mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BC, index, table_type,
                            1, MLXSW_PORT_ROUTER_PORT, set);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
 
@@ -3638,6 +3780,8 @@ void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_fid *f = r->f;
        u16 rif = r->rif;
 
+       mlxsw_sp_router_rif_gone_sync(mlxsw_sp, r);
+
        mlxsw_sp->rifs[rif] = NULL;
        f->r = NULL;
 
@@ -3927,6 +4071,9 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
        mlxsw_sp_port->learning = 1;
        mlxsw_sp_port->learning_sync = 1;
        mlxsw_sp_port->uc_flood = 1;
+       mlxsw_sp_port->mc_flood = 1;
+       mlxsw_sp_port->mc_router = 0;
+       mlxsw_sp_port->mc_disabled = 1;
        mlxsw_sp_port->bridged = 1;
 
        return 0;
@@ -3943,6 +4090,8 @@ static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
        mlxsw_sp_port->learning = 0;
        mlxsw_sp_port->learning_sync = 0;
        mlxsw_sp_port->uc_flood = 0;
+       mlxsw_sp_port->mc_flood = 0;
+       mlxsw_sp_port->mc_router = 0;
        mlxsw_sp_port->bridged = 0;
 
        /* Add implicit VLAN interface in the device, so that untagged
@@ -4605,6 +4754,9 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
        mlxsw_sp_vport->learning = 1;
        mlxsw_sp_vport->learning_sync = 1;
        mlxsw_sp_vport->uc_flood = 1;
+       mlxsw_sp_vport->mc_flood = 1;
+       mlxsw_sp_vport->mc_router = 0;
+       mlxsw_sp_vport->mc_disabled = 1;
        mlxsw_sp_vport->bridged = 1;
 
        return 0;
@@ -4625,6 +4777,8 @@ static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
        mlxsw_sp_vport->learning = 0;
        mlxsw_sp_vport->learning_sync = 0;
        mlxsw_sp_vport->uc_flood = 0;
+       mlxsw_sp_vport->mc_flood = 0;
+       mlxsw_sp_vport->mc_router = 0;
        mlxsw_sp_vport->bridged = 0;
 }
 
index cc1af19d699afc1b7b7ddfbc4ec9e7b1b24715fe..13ec85e7c392f8941ecf6441333d416ba4609f3a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * drivers/net/ethernet/mellanox/mlxsw/spectrum.h
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
  *
 #include <linux/dcbnl.h>
 #include <linux/in6.h>
 #include <linux/notifier.h>
+#include <net/psample.h>
+#include <net/pkt_cls.h>
 
 #include "port.h"
 #include "core.h"
+#include "core_acl_flex_keys.h"
+#include "core_acl_flex_actions.h"
 
 #define MLXSW_SP_VFID_BASE VLAN_N_VID
-#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
+#define MLXSW_SP_VFID_MAX 1024 /* Bridged VLAN interfaces */
 
 #define MLXSW_SP_RFID_BASE 15360
 #define MLXSW_SP_INVALID_RIF 0xffff
@@ -104,6 +108,8 @@ struct mlxsw_sp_fid {
 };
 
 struct mlxsw_sp_rif {
+       struct list_head nexthop_list;
+       struct list_head neigh_list;
        struct net_device *dev;
        unsigned int ref_count;
        struct mlxsw_sp_fid *f;
@@ -229,6 +235,7 @@ struct mlxsw_sp_span_entry {
 
 enum mlxsw_sp_port_mall_action_type {
        MLXSW_SP_PORT_MALL_MIRROR,
+       MLXSW_SP_PORT_MALL_SAMPLE,
 };
 
 struct mlxsw_sp_port_mall_mirror_tc_entry {
@@ -249,17 +256,20 @@ struct mlxsw_sp_router {
        struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
        struct mlxsw_sp_vr *vrs;
        struct rhashtable neigh_ht;
+       struct rhashtable nexthop_group_ht;
+       struct rhashtable nexthop_ht;
        struct {
                struct delayed_work dw;
                unsigned long interval; /* ms */
        } neighs_update;
        struct delayed_work nexthop_probe_dw;
 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
-       struct list_head nexthop_group_list;
        struct list_head nexthop_neighs_list;
        bool aborted;
 };
 
+struct mlxsw_sp_acl;
+
 struct mlxsw_sp {
        struct {
                struct list_head list;
@@ -289,6 +299,7 @@ struct mlxsw_sp {
        u8 port_to_module[MLXSW_PORT_MAX_PORTS];
        struct mlxsw_sp_sb sb;
        struct mlxsw_sp_router router;
+       struct mlxsw_sp_acl *acl;
        struct {
                DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
        } kvdl;
@@ -315,15 +326,25 @@ struct mlxsw_sp_port_pcpu_stats {
        u32                     tx_dropped;
 };
 
+struct mlxsw_sp_port_sample {
+       struct psample_group __rcu *psample_group;
+       u32 trunc_size;
+       u32 rate;
+       bool truncate;
+};
+
 struct mlxsw_sp_port {
        struct net_device *dev;
        struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
        struct mlxsw_sp *mlxsw_sp;
        u8 local_port;
        u8 stp_state;
-       u8 learning:1,
+       u16 learning:1,
           learning_sync:1,
           uc_flood:1,
+          mc_flood:1,
+          mc_router:1,
+          mc_disabled:1,
           bridged:1,
           lagged:1,
           split:1;
@@ -361,8 +382,10 @@ struct mlxsw_sp_port {
                struct rtnl_link_stats64 *cache;
                struct delayed_work update_dw;
        } hw_stats;
+       struct mlxsw_sp_port_sample *sample;
 };
 
+bool mlxsw_sp_port_dev_check(const struct net_device *dev);
 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
 
@@ -489,7 +512,8 @@ mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
 
 enum mlxsw_sp_flood_table {
        MLXSW_SP_FLOOD_TABLE_UC,
-       MLXSW_SP_FLOOD_TABLE_BM,
+       MLXSW_SP_FLOOD_TABLE_BC,
+       MLXSW_SP_FLOOD_TABLE_MC,
 };
 
 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
@@ -582,14 +606,107 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
 
 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_router_neigh_construct(struct net_device *dev,
-                                   struct neighbour *n);
-void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
-                                  struct neighbour *n);
 int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
                                   unsigned long event, void *ptr);
+void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
+                                  struct mlxsw_sp_rif *r);
 
 int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
 void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
 
+struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
+
+struct mlxsw_sp_acl_rule_info {
+       unsigned int priority;
+       struct mlxsw_afk_element_values values;
+       struct mlxsw_afa_block *act_block;
+};
+
+enum mlxsw_sp_acl_profile {
+       MLXSW_SP_ACL_PROFILE_FLOWER,
+};
+
+struct mlxsw_sp_acl_profile_ops {
+       size_t ruleset_priv_size;
+       int (*ruleset_add)(struct mlxsw_sp *mlxsw_sp,
+                          void *priv, void *ruleset_priv);
+       void (*ruleset_del)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+       int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
+                           struct net_device *dev, bool ingress);
+       void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv);
+       size_t rule_priv_size;
+       int (*rule_add)(struct mlxsw_sp *mlxsw_sp,
+                       void *ruleset_priv, void *rule_priv,
+                       struct mlxsw_sp_acl_rule_info *rulei);
+       void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv);
+};
+
+struct mlxsw_sp_acl_ops {
+       size_t priv_size;
+       int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
+       void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
+       const struct mlxsw_sp_acl_profile_ops *
+                       (*profile_ops)(struct mlxsw_sp *mlxsw_sp,
+                                      enum mlxsw_sp_acl_profile profile);
+};
+
+struct mlxsw_sp_acl_ruleset;
+
+struct mlxsw_sp_acl_ruleset *
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+                        struct net_device *dev, bool ingress,
+                        enum mlxsw_sp_acl_profile profile);
+void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
+                             struct mlxsw_sp_acl_ruleset *ruleset);
+
+struct mlxsw_sp_acl_rule_info *
+mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl);
+void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei);
+void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
+                                unsigned int priority);
+void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
+                                   enum mlxsw_afk_element element,
+                                   u32 key_value, u32 mask_value);
+void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
+                                   enum mlxsw_afk_element element,
+                                   const char *key_value,
+                                   const char *mask_value, unsigned int len);
+void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
+void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
+                                u16 group_id);
+int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
+int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
+                              struct mlxsw_sp_acl_rule_info *rulei,
+                              struct net_device *out_dev);
+
+struct mlxsw_sp_acl_rule;
+
+struct mlxsw_sp_acl_rule *
+mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
+                        struct mlxsw_sp_acl_ruleset *ruleset,
+                        unsigned long cookie);
+void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
+                              struct mlxsw_sp_acl_rule *rule);
+int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
+                         struct mlxsw_sp_acl_rule *rule);
+void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
+                          struct mlxsw_sp_acl_rule *rule);
+struct mlxsw_sp_acl_rule *
+mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
+                        struct mlxsw_sp_acl_ruleset *ruleset,
+                        unsigned long cookie);
+struct mlxsw_sp_acl_rule_info *
+mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
+
+int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
+
+extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops;
+
+int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+                           __be16 protocol, struct tc_cls_flower_offload *f);
+void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+                            struct tc_cls_flower_offload *f);
+
 #endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
new file mode 100644 (file)
index 0000000..8a18b3a
--- /dev/null
@@ -0,0 +1,572 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/rhashtable.h>
+#include <linux/netdevice.h>
+
+#include "reg.h"
+#include "core.h"
+#include "resources.h"
+#include "spectrum.h"
+#include "core_acl_flex_keys.h"
+#include "core_acl_flex_actions.h"
+#include "spectrum_acl_flex_keys.h"
+
+struct mlxsw_sp_acl {
+       struct mlxsw_afk *afk;
+       struct mlxsw_afa *afa;
+       const struct mlxsw_sp_acl_ops *ops;
+       struct rhashtable ruleset_ht;
+       unsigned long priv[0];
+       /* priv has to be always the last item */
+};
+
+struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
+{
+       return acl->afk;
+}
+
+struct mlxsw_sp_acl_ruleset_ht_key {
+       struct net_device *dev; /* dev this ruleset is bound to */
+       bool ingress;
+       const struct mlxsw_sp_acl_profile_ops *ops;
+};
+
+struct mlxsw_sp_acl_ruleset {
+       struct rhash_head ht_node; /* Member of acl HT */
+       struct mlxsw_sp_acl_ruleset_ht_key ht_key;
+       struct rhashtable rule_ht;
+       unsigned int ref_count;
+       unsigned long priv[0];
+       /* priv has to be always the last item */
+};
+
+struct mlxsw_sp_acl_rule {
+       struct rhash_head ht_node; /* Member of rule HT */
+       unsigned long cookie; /* HT key */
+       struct mlxsw_sp_acl_ruleset *ruleset;
+       struct mlxsw_sp_acl_rule_info *rulei;
+       unsigned long priv[0];
+       /* priv has to be always the last item */
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
+       .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
+       .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
+       .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
+       .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
+       .key_len = sizeof(unsigned long),
+       .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
+       .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
+       .automatic_shrinking = true,
+};
+
+static struct mlxsw_sp_acl_ruleset *
+mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
+                           const struct mlxsw_sp_acl_profile_ops *ops)
+{
+       struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+       struct mlxsw_sp_acl_ruleset *ruleset;
+       size_t alloc_size;
+       int err;
+
+       alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
+       ruleset = kzalloc(alloc_size, GFP_KERNEL);
+       if (!ruleset)
+               return ERR_PTR(-ENOMEM);
+       ruleset->ref_count = 1;
+       ruleset->ht_key.ops = ops;
+
+       err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
+       if (err)
+               goto err_rhashtable_init;
+
+       err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
+       if (err)
+               goto err_ops_ruleset_add;
+
+       return ruleset;
+
+err_ops_ruleset_add:
+       rhashtable_destroy(&ruleset->rule_ht);
+err_rhashtable_init:
+       kfree(ruleset);
+       return ERR_PTR(err);
+}
+
+static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
+                                        struct mlxsw_sp_acl_ruleset *ruleset)
+{
+       const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+       ops->ruleset_del(mlxsw_sp, ruleset->priv);
+       rhashtable_destroy(&ruleset->rule_ht);
+       kfree(ruleset);
+}
+
+static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+                                    struct mlxsw_sp_acl_ruleset *ruleset,
+                                    struct net_device *dev, bool ingress)
+{
+       const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+       struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+       int err;
+
+       ruleset->ht_key.dev = dev;
+       ruleset->ht_key.ingress = ingress;
+       err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
+                                    mlxsw_sp_acl_ruleset_ht_params);
+       if (err)
+               return err;
+       err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
+       if (err)
+               goto err_ops_ruleset_bind;
+       return 0;
+
+err_ops_ruleset_bind:
+       rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
+                              mlxsw_sp_acl_ruleset_ht_params);
+       return err;
+}
+
+static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+                                       struct mlxsw_sp_acl_ruleset *ruleset)
+{
+       const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+       struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+
+       ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
+       rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
+                              mlxsw_sp_acl_ruleset_ht_params);
+}
+
+static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
+{
+       ruleset->ref_count++;
+}
+
+static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
+                                        struct mlxsw_sp_acl_ruleset *ruleset)
+{
+       if (--ruleset->ref_count)
+               return;
+       mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
+       mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
+}
+
+struct mlxsw_sp_acl_ruleset *
+mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp,
+                        struct net_device *dev, bool ingress,
+                        enum mlxsw_sp_acl_profile profile)
+{
+       const struct mlxsw_sp_acl_profile_ops *ops;
+       struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+       struct mlxsw_sp_acl_ruleset_ht_key ht_key;
+       struct mlxsw_sp_acl_ruleset *ruleset;
+       int err;
+
+       ops = acl->ops->profile_ops(mlxsw_sp, profile);
+       if (!ops)
+               return ERR_PTR(-EINVAL);
+
+       memset(&ht_key, 0, sizeof(ht_key));
+       ht_key.dev = dev;
+       ht_key.ingress = ingress;
+       ht_key.ops = ops;
+       ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
+                                        mlxsw_sp_acl_ruleset_ht_params);
+       if (ruleset) {
+               mlxsw_sp_acl_ruleset_ref_inc(ruleset);
+               return ruleset;
+       }
+       ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
+       if (IS_ERR(ruleset))
+               return ruleset;
+       err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress);
+       if (err)
+               goto err_ruleset_bind;
+       return ruleset;
+
+err_ruleset_bind:
+       mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
+       return ERR_PTR(err);
+}
+
+void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
+                             struct mlxsw_sp_acl_ruleset *ruleset)
+{
+       mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
+}
+
+struct mlxsw_sp_acl_rule_info *
+mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
+{
+       struct mlxsw_sp_acl_rule_info *rulei;
+       int err;
+
+       rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
+       if (!rulei)
+               return NULL;
+       rulei->act_block = mlxsw_afa_block_create(acl->afa);
+       if (IS_ERR(rulei->act_block)) {
+               err = PTR_ERR(rulei->act_block);
+               goto err_afa_block_create;
+       }
+       return rulei;
+
+err_afa_block_create:
+       kfree(rulei);
+       return ERR_PTR(err);
+}
+
+void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
+{
+       mlxsw_afa_block_destroy(rulei->act_block);
+       kfree(rulei);
+}
+
+int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
+{
+       return mlxsw_afa_block_commit(rulei->act_block);
+}
+
+void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
+                                unsigned int priority)
+{
+       rulei->priority = priority;
+}
+
+void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
+                                   enum mlxsw_afk_element element,
+                                   u32 key_value, u32 mask_value)
+{
+       mlxsw_afk_values_add_u32(&rulei->values, element,
+                                key_value, mask_value);
+}
+
+void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
+                                   enum mlxsw_afk_element element,
+                                   const char *key_value,
+                                   const char *mask_value, unsigned int len)
+{
+       mlxsw_afk_values_add_buf(&rulei->values, element,
+                                key_value, mask_value, len);
+}
+
+void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
+{
+       mlxsw_afa_block_continue(rulei->act_block);
+}
+
+void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
+                                u16 group_id)
+{
+       mlxsw_afa_block_jump(rulei->act_block, group_id);
+}
+
+int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
+{
+       return mlxsw_afa_block_append_drop(rulei->act_block);
+}
+
+int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
+                              struct mlxsw_sp_acl_rule_info *rulei,
+                              struct net_device *out_dev)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port;
+       u8 local_port;
+       bool in_port;
+
+       if (out_dev) {
+               if (!mlxsw_sp_port_dev_check(out_dev))
+                       return -EINVAL;
+               mlxsw_sp_port = netdev_priv(out_dev);
+               if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp)
+                       return -EINVAL;
+               local_port = mlxsw_sp_port->local_port;
+               in_port = false;
+       } else {
+               /* If out_dev is NULL, the called wants to
+                * set forward to ingress port.
+                */
+               local_port = 0;
+               in_port = true;
+       }
+       return mlxsw_afa_block_append_fwd(rulei->act_block,
+                                         local_port, in_port);
+}
+
+struct mlxsw_sp_acl_rule *
+mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
+                        struct mlxsw_sp_acl_ruleset *ruleset,
+                        unsigned long cookie)
+{
+       const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+       struct mlxsw_sp_acl_rule *rule;
+       int err;
+
+       mlxsw_sp_acl_ruleset_ref_inc(ruleset);
+       rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
+       if (!rule) {
+               err = -ENOMEM;
+               goto err_alloc;
+       }
+       rule->cookie = cookie;
+       rule->ruleset = ruleset;
+
+       rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
+       if (IS_ERR(rule->rulei)) {
+               err = PTR_ERR(rule->rulei);
+               goto err_rulei_create;
+       }
+       return rule;
+
+err_rulei_create:
+       kfree(rule);
+err_alloc:
+       mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
+       return ERR_PTR(err);
+}
+
+void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
+                              struct mlxsw_sp_acl_rule *rule)
+{
+       struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+
+       mlxsw_sp_acl_rulei_destroy(rule->rulei);
+       kfree(rule);
+       mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
+}
+
+int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
+                         struct mlxsw_sp_acl_rule *rule)
+{
+       struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+       const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+       int err;
+
+       err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
+       if (err)
+               return err;
+
+       err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
+                                    mlxsw_sp_acl_rule_ht_params);
+       if (err)
+               goto err_rhashtable_insert;
+
+       return 0;
+
+err_rhashtable_insert:
+       ops->rule_del(mlxsw_sp, rule->priv);
+       return err;
+}
+
+void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
+                          struct mlxsw_sp_acl_rule *rule)
+{
+       struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
+       const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
+
+       rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
+                              mlxsw_sp_acl_rule_ht_params);
+       ops->rule_del(mlxsw_sp, rule->priv);
+}
+
+struct mlxsw_sp_acl_rule *
+mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
+                        struct mlxsw_sp_acl_ruleset *ruleset,
+                        unsigned long cookie)
+{
+       return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
+                                      mlxsw_sp_acl_rule_ht_params);
+}
+
+struct mlxsw_sp_acl_rule_info *
+mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
+{
+       return rule->rulei;
+}
+
+#define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
+
+static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
+                                    char *enc_actions, bool is_first)
+{
+       struct mlxsw_sp *mlxsw_sp = priv;
+       char pefa_pl[MLXSW_REG_PEFA_LEN];
+       u32 kvdl_index;
+       int ret;
+       int err;
+
+       /* The first action set of a TCAM entry is stored directly in TCAM,
+        * not KVD linear area.
+        */
+       if (is_first)
+               return 0;
+
+       ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE);
+       if (ret < 0)
+               return ret;
+       kvdl_index = ret;
+       mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
+       if (err)
+               goto err_pefa_write;
+       *p_kvdl_index = kvdl_index;
+       return 0;
+
+err_pefa_write:
+       mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+       return err;
+}
+
+static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
+                                     bool is_first)
+{
+       struct mlxsw_sp *mlxsw_sp = priv;
+
+       if (is_first)
+               return;
+       mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+}
+
+static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
+                                          u8 local_port)
+{
+       struct mlxsw_sp *mlxsw_sp = priv;
+       char ppbs_pl[MLXSW_REG_PPBS_LEN];
+       u32 kvdl_index;
+       int ret;
+       int err;
+
+       ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1);
+       if (ret < 0)
+               return ret;
+       kvdl_index = ret;
+       mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
+       if (err)
+               goto err_ppbs_write;
+       *p_kvdl_index = kvdl_index;
+       return 0;
+
+err_ppbs_write:
+       mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+       return err;
+}
+
+static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
+{
+       struct mlxsw_sp *mlxsw_sp = priv;
+
+       mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
+}
+
+static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
+       .kvdl_set_add           = mlxsw_sp_act_kvdl_set_add,
+       .kvdl_set_del           = mlxsw_sp_act_kvdl_set_del,
+       .kvdl_fwd_entry_add     = mlxsw_sp_act_kvdl_fwd_entry_add,
+       .kvdl_fwd_entry_del     = mlxsw_sp_act_kvdl_fwd_entry_del,
+};
+
+int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
+{
+       const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
+       struct mlxsw_sp_acl *acl;
+       int err;
+
+       acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
+       if (!acl)
+               return -ENOMEM;
+       mlxsw_sp->acl = acl;
+
+       acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
+                                                      ACL_FLEX_KEYS),
+                                   mlxsw_sp_afk_blocks,
+                                   MLXSW_SP_AFK_BLOCKS_COUNT);
+       if (!acl->afk) {
+               err = -ENOMEM;
+               goto err_afk_create;
+       }
+
+       acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
+                                                      ACL_ACTIONS_PER_SET),
+                                   &mlxsw_sp_act_afa_ops, mlxsw_sp);
+       if (IS_ERR(acl->afa)) {
+               err = PTR_ERR(acl->afa);
+               goto err_afa_create;
+       }
+
+       err = rhashtable_init(&acl->ruleset_ht,
+                             &mlxsw_sp_acl_ruleset_ht_params);
+       if (err)
+               goto err_rhashtable_init;
+
+       err = acl_ops->init(mlxsw_sp, acl->priv);
+       if (err)
+               goto err_acl_ops_init;
+
+       acl->ops = acl_ops;
+       return 0;
+
+err_acl_ops_init:
+       rhashtable_destroy(&acl->ruleset_ht);
+err_rhashtable_init:
+       mlxsw_afa_destroy(acl->afa);
+err_afa_create:
+       mlxsw_afk_destroy(acl->afk);
+err_afk_create:
+       kfree(acl);
+       return err;
+}
+
+void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
+       const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
+
+       acl_ops->fini(mlxsw_sp, acl->priv);
+       rhashtable_destroy(&acl->ruleset_ht);
+       mlxsw_afa_destroy(acl->afa);
+       mlxsw_afk_destroy(acl->afk);
+       kfree(acl);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
new file mode 100644 (file)
index 0000000..82b81cf
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
+#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H
+
+#include "core_acl_flex_keys.h"
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_dmac[] = {
+       MLXSW_AFK_ELEMENT_INST_BUF(DMAC, 0x00, 6),
+       MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac[] = {
+       MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x00, 6),
+       MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_l2_smac_ex[] = {
+       MLXSW_AFK_ELEMENT_INST_BUF(SMAC, 0x02, 6),
+       MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_sip[] = {
+       MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32),
+       MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+       MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = {
+       MLXSW_AFK_ELEMENT_INST_U32(DST_IP4, 0x00, 0, 32),
+       MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+       MLXSW_AFK_ELEMENT_INST_U32(SRC_SYS_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_ex[] = {
+       MLXSW_AFK_ELEMENT_INST_U32(SRC_L4_PORT, 0x08, 0, 16),
+       MLXSW_AFK_ELEMENT_INST_U32(DST_L4_PORT, 0x0C, 0, 16),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_dip[] = {
+       MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_LO, 0x00, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_ex1[] = {
+       MLXSW_AFK_ELEMENT_INST_BUF(DST_IP6_HI, 0x00, 8),
+       MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x08, 0, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip[] = {
+       MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_LO, 0x00, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_sip_ex[] = {
+       MLXSW_AFK_ELEMENT_INST_BUF(SRC_IP6_HI, 0x00, 8),
+};
+
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_packet_type[] = {
+       MLXSW_AFK_ELEMENT_INST_U32(ETHERTYPE, 0x00, 0, 16),
+};
+
+static const struct mlxsw_afk_block mlxsw_sp_afk_blocks[] = {
+       MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_l2_dmac),
+       MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_l2_smac),
+       MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_l2_smac_ex),
+       MLXSW_AFK_BLOCK(0x30, mlxsw_sp_afk_element_info_ipv4_sip),
+       MLXSW_AFK_BLOCK(0x31, mlxsw_sp_afk_element_info_ipv4_dip),
+       MLXSW_AFK_BLOCK(0x33, mlxsw_sp_afk_element_info_ipv4_ex),
+       MLXSW_AFK_BLOCK(0x60, mlxsw_sp_afk_element_info_ipv6_dip),
+       MLXSW_AFK_BLOCK(0x65, mlxsw_sp_afk_element_info_ipv6_ex1),
+       MLXSW_AFK_BLOCK(0x62, mlxsw_sp_afk_element_info_ipv6_sip),
+       MLXSW_AFK_BLOCK(0x63, mlxsw_sp_afk_element_info_ipv6_sip_ex),
+       MLXSW_AFK_BLOCK(0xB0, mlxsw_sp_afk_element_info_packet_type),
+};
+
+#define MLXSW_SP_AFK_BLOCKS_COUNT ARRAY_SIZE(mlxsw_sp_afk_blocks)
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
new file mode 100644 (file)
index 0000000..7382832
--- /dev/null
@@ -0,0 +1,1084 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/rhashtable.h>
+#include <linux/netdevice.h>
+#include <linux/parman.h>
+
+#include "reg.h"
+#include "core.h"
+#include "resources.h"
+#include "spectrum.h"
+#include "core_acl_flex_keys.h"
+
+struct mlxsw_sp_acl_tcam {
+       unsigned long *used_regions; /* bit array */
+       unsigned int max_regions;
+       unsigned long *used_groups;  /* bit array */
+       unsigned int max_groups;
+       unsigned int max_group_size;
+};
+
+static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+       struct mlxsw_sp_acl_tcam *tcam = priv;
+       u64 max_tcam_regions;
+       u64 max_regions;
+       u64 max_groups;
+       size_t alloc_size;
+       int err;
+
+       max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+                                             ACL_MAX_TCAM_REGIONS);
+       max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
+
+       /* Use 1:1 mapping between ACL region and TCAM region */
+       if (max_tcam_regions < max_regions)
+               max_regions = max_tcam_regions;
+
+       alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
+       tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
+       if (!tcam->used_regions)
+               return -ENOMEM;
+       tcam->max_regions = max_regions;
+
+       max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
+       alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
+       tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
+       if (!tcam->used_groups) {
+               err = -ENOMEM;
+               goto err_alloc_used_groups;
+       }
+       tcam->max_groups = max_groups;
+       tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+                                                ACL_MAX_GROUP_SIZE);
+       return 0;
+
+err_alloc_used_groups:
+       kfree(tcam->used_regions);
+       return err;
+}
+
+static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
+{
+       struct mlxsw_sp_acl_tcam *tcam = priv;
+
+       kfree(tcam->used_groups);
+       kfree(tcam->used_regions);
+}
+
+static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
+                                          u16 *p_id)
+{
+       u16 id;
+
+       id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
+       if (id < tcam->max_regions) {
+               __set_bit(id, tcam->used_regions);
+               *p_id = id;
+               return 0;
+       }
+       return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
+                                           u16 id)
+{
+       __clear_bit(id, tcam->used_regions);
+}
+
+static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
+                                         u16 *p_id)
+{
+       u16 id;
+
+       id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
+       if (id < tcam->max_groups) {
+               __set_bit(id, tcam->used_groups);
+               *p_id = id;
+               return 0;
+       }
+       return -ENOBUFS;
+}
+
+static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
+                                          u16 id)
+{
+       __clear_bit(id, tcam->used_groups);
+}
+
+struct mlxsw_sp_acl_tcam_pattern {
+       const enum mlxsw_afk_element *elements;
+       unsigned int elements_count;
+};
+
+struct mlxsw_sp_acl_tcam_group {
+       struct mlxsw_sp_acl_tcam *tcam;
+       u16 id;
+       struct list_head region_list;
+       unsigned int region_count;
+       struct rhashtable chunk_ht;
+       struct {
+               u16 local_port;
+               bool ingress;
+       } bound;
+       struct mlxsw_sp_acl_tcam_group_ops *ops;
+       const struct mlxsw_sp_acl_tcam_pattern *patterns;
+       unsigned int patterns_count;
+};
+
+struct mlxsw_sp_acl_tcam_region {
+       struct list_head list; /* Member of a TCAM group */
+       struct list_head chunk_list; /* List of chunks under this region */
+       struct parman *parman;
+       struct mlxsw_sp *mlxsw_sp;
+       struct mlxsw_sp_acl_tcam_group *group;
+       u16 id; /* ACL ID and region ID - they are same */
+       char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
+       struct mlxsw_afk_key_info *key_info;
+       struct {
+               struct parman_prio parman_prio;
+               struct parman_item parman_item;
+               struct mlxsw_sp_acl_rule_info *rulei;
+       } catchall;
+};
+
+struct mlxsw_sp_acl_tcam_chunk {
+       struct list_head list; /* Member of a TCAM region */
+       struct rhash_head ht_node; /* Member of a chunk HT */
+       unsigned int priority; /* Priority within the region and group */
+       struct parman_prio parman_prio;
+       struct mlxsw_sp_acl_tcam_group *group;
+       struct mlxsw_sp_acl_tcam_region *region;
+       unsigned int ref_count;
+};
+
+struct mlxsw_sp_acl_tcam_entry {
+       struct parman_item parman_item;
+       struct mlxsw_sp_acl_tcam_chunk *chunk;
+};
+
+static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
+       .key_len = sizeof(unsigned int),
+       .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority),
+       .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node),
+       .automatic_shrinking = true,
+};
+
+static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
+                                         struct mlxsw_sp_acl_tcam_group *group)
+{
+       struct mlxsw_sp_acl_tcam_region *region;
+       char pagt_pl[MLXSW_REG_PAGT_LEN];
+       int acl_index = 0;
+
+       mlxsw_reg_pagt_pack(pagt_pl, group->id);
+       list_for_each_entry(region, &group->region_list, list)
+               mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id);
+       mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
+}
+
+static int
+mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
+                           struct mlxsw_sp_acl_tcam *tcam,
+                           struct mlxsw_sp_acl_tcam_group *group,
+                           const struct mlxsw_sp_acl_tcam_pattern *patterns,
+                           unsigned int patterns_count)
+{
+       int err;
+
+       group->tcam = tcam;
+       group->patterns = patterns;
+       group->patterns_count = patterns_count;
+       INIT_LIST_HEAD(&group->region_list);
+       err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
+       if (err)
+               return err;
+
+       err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
+       if (err)
+               goto err_group_update;
+
+       err = rhashtable_init(&group->chunk_ht,
+                             &mlxsw_sp_acl_tcam_chunk_ht_params);
+       if (err)
+               goto err_rhashtable_init;
+
+       return 0;
+
+err_rhashtable_init:
+err_group_update:
+       mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
+       return err;
+}
+
+static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
+                                       struct mlxsw_sp_acl_tcam_group *group)
+{
+       struct mlxsw_sp_acl_tcam *tcam = group->tcam;
+
+       rhashtable_destroy(&group->chunk_ht);
+       mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
+       WARN_ON(!list_empty(&group->region_list));
+}
+
+static int
+mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
+                            struct mlxsw_sp_acl_tcam_group *group,
+                            struct net_device *dev, bool ingress)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port;
+       char ppbt_pl[MLXSW_REG_PPBT_LEN];
+
+       if (!mlxsw_sp_port_dev_check(dev))
+               return -EINVAL;
+
+       mlxsw_sp_port = netdev_priv(dev);
+       group->bound.local_port = mlxsw_sp_port->local_port;
+       group->bound.ingress = ingress;
+       mlxsw_reg_ppbt_pack(ppbt_pl,
+                           group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
+                                                  MLXSW_REG_PXBT_E_EACL,
+                           MLXSW_REG_PXBT_OP_BIND, group->bound.local_port,
+                           group->id);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
+}
+
+static void
+mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
+                              struct mlxsw_sp_acl_tcam_group *group)
+{
+       char ppbt_pl[MLXSW_REG_PPBT_LEN];
+
+       mlxsw_reg_ppbt_pack(ppbt_pl,
+                           group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
+                                                  MLXSW_REG_PXBT_E_EACL,
+                           MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port,
+                           group->id);
+       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
+}
+
+static unsigned int
+mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
+{
+       struct mlxsw_sp_acl_tcam_chunk *chunk;
+
+       if (list_empty(&region->chunk_list))
+               return 0;
+       /* As a priority of a region, return priority of the first chunk */
+       chunk = list_first_entry(&region->chunk_list, typeof(*chunk), list);
+       return chunk->priority;
+}
+
+static unsigned int
+mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region)
+{
+       struct mlxsw_sp_acl_tcam_chunk *chunk;
+
+       if (list_empty(&region->chunk_list))
+               return 0;
+       chunk = list_last_entry(&region->chunk_list, typeof(*chunk), list);
+       return chunk->priority;
+}
+
+static void
+mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group,
+                                struct mlxsw_sp_acl_tcam_region *region)
+{
+       struct mlxsw_sp_acl_tcam_region *region2;
+       struct list_head *pos;
+
+       /* Position the region inside the list according to priority */
+       list_for_each(pos, &group->region_list) {
+               region2 = list_entry(pos, typeof(*region2), list);
+               if (mlxsw_sp_acl_tcam_region_prio(region2) >
+                   mlxsw_sp_acl_tcam_region_prio(region))
+                       break;
+       }
+       list_add_tail(&region->list, pos);
+       group->region_count++;
+}
+
+static void
+mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group,
+                                struct mlxsw_sp_acl_tcam_region *region)
+{
+       group->region_count--;
+       list_del(&region->list);
+}
+
+static int
+mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
+                                     struct mlxsw_sp_acl_tcam_group *group,
+                                     struct mlxsw_sp_acl_tcam_region *region)
+{
+       int err;
+
+       if (group->region_count == group->tcam->max_group_size)
+               return -ENOBUFS;
+
+       mlxsw_sp_acl_tcam_group_list_add(group, region);
+
+       err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
+       if (err)
+               goto err_group_update;
+       region->group = group;
+
+       return 0;
+
+err_group_update:
+       mlxsw_sp_acl_tcam_group_list_del(group, region);
+       mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
+       return err;
+}
+
+static void
+mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
+                                     struct mlxsw_sp_acl_tcam_region *region)
+{
+       struct mlxsw_sp_acl_tcam_group *group = region->group;
+
+       mlxsw_sp_acl_tcam_group_list_del(group, region);
+       mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
+}
+
+static struct mlxsw_sp_acl_tcam_region *
+mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group,
+                                   unsigned int priority,
+                                   struct mlxsw_afk_element_usage *elusage,
+                                   bool *p_need_split)
+{
+       struct mlxsw_sp_acl_tcam_region *region, *region2;
+       struct list_head *pos;
+       bool issubset;
+
+       list_for_each(pos, &group->region_list) {
+               region = list_entry(pos, typeof(*region), list);
+
+               /* First, check if the requested priority does not rather belong
+                * under some of the next regions.
+                */
+               if (pos->next != &group->region_list) { /* not last */
+                       region2 = list_entry(pos->next, typeof(*region2), list);
+                       if (priority >= mlxsw_sp_acl_tcam_region_prio(region2))
+                               continue;
+               }
+
+               issubset = mlxsw_afk_key_info_subset(region->key_info, elusage);
+
+               /* If requested element usage would not fit and the priority
+                * is lower than the currently inspected region we cannot
+                * use this region, so return NULL to indicate new region has
+                * to be created.
+                */
+               if (!issubset &&
+                   priority < mlxsw_sp_acl_tcam_region_prio(region))
+                       return NULL;
+
+               /* If requested element usage would not fit and the priority
+                * is higher than the currently inspected region we cannot
+                * use this region. There is still some hope that the next
+                * region would be the fit. So let it be processed and
+                * eventually break at the check right above this.
+                */
+               if (!issubset &&
+                   priority > mlxsw_sp_acl_tcam_region_max_prio(region))
+                       continue;
+
+               /* Indicate if the region needs to be split in order to add
+                * the requested priority. Split is needed when requested
+                * element usage won't fit into the found region.
+                */
+               *p_need_split = !issubset;
+               return region;
+       }
+       return NULL; /* New region has to be created. */
+}
+
+static void
+mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
+                                    struct mlxsw_afk_element_usage *elusage,
+                                    struct mlxsw_afk_element_usage *out)
+{
+       const struct mlxsw_sp_acl_tcam_pattern *pattern;
+       int i;
+
+       for (i = 0; i < group->patterns_count; i++) {
+               pattern = &group->patterns[i];
+               mlxsw_afk_element_usage_fill(out, pattern->elements,
+                                            pattern->elements_count);
+               if (mlxsw_afk_element_usage_subset(elusage, out))
+                       return;
+       }
+       memcpy(out, elusage, sizeof(*out));
+}
+
+#define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
+#define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
+
+static int
+mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
+                              struct mlxsw_sp_acl_tcam_region *region)
+{
+       struct mlxsw_afk_key_info *key_info = region->key_info;
+       char ptar_pl[MLXSW_REG_PTAR_LEN];
+       unsigned int encodings_count;
+       int i;
+       int err;
+
+       mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
+                           MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
+                           region->id, region->tcam_region_info);
+       encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
+       for (i = 0; i < encodings_count; i++) {
+               u16 encoding;
+
+               encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
+               mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
+       }
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
+       if (err)
+               return err;
+       mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
+       return 0;
+}
+
+static void
+mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
+                             struct mlxsw_sp_acl_tcam_region *region)
+{
+       char ptar_pl[MLXSW_REG_PTAR_LEN];
+
+       mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id,
+                           region->tcam_region_info);
+       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
+}
+
+static int
+mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp,
+                               struct mlxsw_sp_acl_tcam_region *region,
+                               u16 new_size)
+{
+       char ptar_pl[MLXSW_REG_PTAR_LEN];
+
+       mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
+                           new_size, region->id, region->tcam_region_info);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
+}
+
+static int
+mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
+                               struct mlxsw_sp_acl_tcam_region *region)
+{
+       char pacl_pl[MLXSW_REG_PACL_LEN];
+
+       mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
+                           region->tcam_region_info);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
+}
+
+static void
+mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
+                                struct mlxsw_sp_acl_tcam_region *region)
+{
+       char pacl_pl[MLXSW_REG_PACL_LEN];
+
+       mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
+                           region->tcam_region_info);
+       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
+}
+
+static int
+mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
+                                     struct mlxsw_sp_acl_tcam_region *region,
+                                     unsigned int offset,
+                                     struct mlxsw_sp_acl_rule_info *rulei)
+{
+       char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+       char *act_set;
+       char *mask;
+       char *key;
+
+       mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+                            region->tcam_region_info, offset);
+       key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
+       mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
+       mlxsw_afk_encode(region->key_info, &rulei->values, key, mask);
+
+       /* Only the first action set belongs here, the rest is in KVD */
+       act_set = mlxsw_afa_block_first_set(rulei->act_block);
+       mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+}
+
+static void
+mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
+                                     struct mlxsw_sp_acl_tcam_region *region,
+                                     unsigned int offset)
+{
+       char ptce2_pl[MLXSW_REG_PTCE2_LEN];
+
+       mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
+                            region->tcam_region_info, offset);
+       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+}
+
+#define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
+
+static int
+mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
+                                     struct mlxsw_sp_acl_tcam_region *region)
+{
+       struct parman_prio *parman_prio = &region->catchall.parman_prio;
+       struct parman_item *parman_item = &region->catchall.parman_item;
+       struct mlxsw_sp_acl_rule_info *rulei;
+       int err;
+
+       parman_prio_init(region->parman, parman_prio,
+                        MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
+       err = parman_item_add(region->parman, parman_prio, parman_item);
+       if (err)
+               goto err_parman_item_add;
+
+       rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
+       if (IS_ERR(rulei)) {
+               err = PTR_ERR(rulei);
+               goto err_rulei_create;
+       }
+
+       mlxsw_sp_acl_rulei_act_continue(rulei);
+       err = mlxsw_sp_acl_rulei_commit(rulei);
+       if (err)
+               goto err_rulei_commit;
+
+       err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
+                                                   parman_item->index, rulei);
+       region->catchall.rulei = rulei;
+       if (err)
+               goto err_rule_insert;
+
+       return 0;
+
+err_rule_insert:
+err_rulei_commit:
+       mlxsw_sp_acl_rulei_destroy(rulei);
+err_rulei_create:
+       parman_item_remove(region->parman, parman_prio, parman_item);
+err_parman_item_add:
+       parman_prio_fini(parman_prio);
+       return err;
+}
+
+static void
+mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
+                                     struct mlxsw_sp_acl_tcam_region *region)
+{
+       struct parman_prio *parman_prio = &region->catchall.parman_prio;
+       struct parman_item *parman_item = &region->catchall.parman_item;
+       struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
+
+       mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
+                                             parman_item->index);
+       mlxsw_sp_acl_rulei_destroy(rulei);
+       parman_item_remove(region->parman, parman_prio, parman_item);
+       parman_prio_fini(parman_prio);
+}
+
+static void
+mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp,
+                             struct mlxsw_sp_acl_tcam_region *region,
+                             u16 src_offset, u16 dst_offset, u16 size)
+{
+       char prcr_pl[MLXSW_REG_PRCR_LEN];
+
+       mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
+                           region->tcam_region_info, src_offset,
+                           region->tcam_region_info, dst_offset, size);
+       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
+}
+
+static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv,
+                                                 unsigned long new_count)
+{
+       struct mlxsw_sp_acl_tcam_region *region = priv;
+       struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+       u64 max_tcam_rules;
+
+       max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
+       if (new_count > max_tcam_rules)
+               return -EINVAL;
+       return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count);
+}
+
+static void mlxsw_sp_acl_tcam_region_parman_move(void *priv,
+                                                unsigned long from_index,
+                                                unsigned long to_index,
+                                                unsigned long count)
+{
+       struct mlxsw_sp_acl_tcam_region *region = priv;
+       struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
+
+       mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region,
+                                     from_index, to_index, count);
+}
+
+static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = {
+       .base_count     = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
+       .resize_step    = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
+       .resize         = mlxsw_sp_acl_tcam_region_parman_resize,
+       .move           = mlxsw_sp_acl_tcam_region_parman_move,
+       .algo           = PARMAN_ALGO_TYPE_LSORT,
+};
+
+static struct mlxsw_sp_acl_tcam_region *
+mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
+                               struct mlxsw_sp_acl_tcam *tcam,
+                               struct mlxsw_afk_element_usage *elusage)
+{
+       struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
+       struct mlxsw_sp_acl_tcam_region *region;
+       int err;
+
+       region = kzalloc(sizeof(*region), GFP_KERNEL);
+       if (!region)
+               return ERR_PTR(-ENOMEM);
+       INIT_LIST_HEAD(&region->chunk_list);
+       region->mlxsw_sp = mlxsw_sp;
+
+       region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops,
+                                      region);
+       if (!region->parman) {
+               err = -ENOMEM;
+               goto err_parman_create;
+       }
+
+       region->key_info = mlxsw_afk_key_info_get(afk, elusage);
+       if (IS_ERR(region->key_info)) {
+               err = PTR_ERR(region->key_info);
+               goto err_key_info_get;
+       }
+
+       err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
+       if (err)
+               goto err_region_id_get;
+
+       err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
+       if (err)
+               goto err_tcam_region_alloc;
+
+       err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
+       if (err)
+               goto err_tcam_region_enable;
+
+       err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region);
+       if (err)
+               goto err_tcam_region_catchall_add;
+
+       return region;
+
+err_tcam_region_catchall_add:
+       mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
+err_tcam_region_enable:
+       mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
+err_tcam_region_alloc:
+       mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
+err_region_id_get:
+       mlxsw_afk_key_info_put(region->key_info);
+err_key_info_get:
+       parman_destroy(region->parman);
+err_parman_create:
+       kfree(region);
+       return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
+                                struct mlxsw_sp_acl_tcam_region *region)
+{
+       mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region);
+       mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
+       mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
+       mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
+       mlxsw_afk_key_info_put(region->key_info);
+       parman_destroy(region->parman);
+       kfree(region);
+}
+
+static int
+mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp,
+                             struct mlxsw_sp_acl_tcam_group *group,
+                             unsigned int priority,
+                             struct mlxsw_afk_element_usage *elusage,
+                             struct mlxsw_sp_acl_tcam_chunk *chunk)
+{
+       struct mlxsw_sp_acl_tcam_region *region;
+       bool region_created = false;
+       bool need_split;
+       int err;
+
+       region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage,
+                                                    &need_split);
+       if (region && need_split) {
+               /* According to priority, the chunk should belong to an
+                * existing region. However, this chunk needs elements
+                * that region does not contain. We need to split the existing
+                * region into two and create a new region for this chunk
+                * in between. This is not supported now.
+                */
+               return -EOPNOTSUPP;
+       }
+       if (!region) {
+               struct mlxsw_afk_element_usage region_elusage;
+
+               mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
+                                                    &region_elusage);
+               region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam,
+                                                        &region_elusage);
+               if (IS_ERR(region))
+                       return PTR_ERR(region);
+               region_created = true;
+       }
+
+       chunk->region = region;
+       list_add_tail(&chunk->list, &region->chunk_list);
+
+       if (!region_created)
+               return 0;
+
+       err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region);
+       if (err)
+               goto err_group_region_attach;
+
+       return 0;
+
+err_group_region_attach:
+       mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
+       return err;
+}
+
+static void
+mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp,
+                               struct mlxsw_sp_acl_tcam_chunk *chunk)
+{
+       struct mlxsw_sp_acl_tcam_region *region = chunk->region;
+
+       list_del(&chunk->list);
+       if (list_empty(&region->chunk_list)) {
+               mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region);
+               mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
+       }
+}
+
+static struct mlxsw_sp_acl_tcam_chunk *
+mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
+                              struct mlxsw_sp_acl_tcam_group *group,
+                              unsigned int priority,
+                              struct mlxsw_afk_element_usage *elusage)
+{
+       struct mlxsw_sp_acl_tcam_chunk *chunk;
+       int err;
+
+       if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
+               return ERR_PTR(-EINVAL);
+
+       chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+       if (!chunk)
+               return ERR_PTR(-ENOMEM);
+       chunk->priority = priority;
+       chunk->group = group;
+       chunk->ref_count = 1;
+
+       err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority,
+                                           elusage, chunk);
+       if (err)
+               goto err_chunk_assoc;
+
+       parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority);
+
+       err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
+                                    mlxsw_sp_acl_tcam_chunk_ht_params);
+       if (err)
+               goto err_rhashtable_insert;
+
+       return chunk;
+
+err_rhashtable_insert:
+       parman_prio_fini(&chunk->parman_prio);
+       mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
+err_chunk_assoc:
+       kfree(chunk);
+       return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
+                               struct mlxsw_sp_acl_tcam_chunk *chunk)
+{
+       struct mlxsw_sp_acl_tcam_group *group = chunk->group;
+
+       rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
+                              mlxsw_sp_acl_tcam_chunk_ht_params);
+       parman_prio_fini(&chunk->parman_prio);
+       mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
+       kfree(chunk);
+}
+
+static struct mlxsw_sp_acl_tcam_chunk *
+mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp,
+                           struct mlxsw_sp_acl_tcam_group *group,
+                           unsigned int priority,
+                           struct mlxsw_afk_element_usage *elusage)
+{
+       struct mlxsw_sp_acl_tcam_chunk *chunk;
+
+       chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority,
+                                      mlxsw_sp_acl_tcam_chunk_ht_params);
+       if (chunk) {
+               if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info,
+                                                      elusage)))
+                       return ERR_PTR(-EINVAL);
+               chunk->ref_count++;
+               return chunk;
+       }
+       return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group,
+                                             priority, elusage);
+}
+
+static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
+                                       struct mlxsw_sp_acl_tcam_chunk *chunk)
+{
+       if (--chunk->ref_count)
+               return;
+       mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
+}
+
+static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
+                                      struct mlxsw_sp_acl_tcam_group *group,
+                                      struct mlxsw_sp_acl_tcam_entry *entry,
+                                      struct mlxsw_sp_acl_rule_info *rulei)
+{
+       struct mlxsw_sp_acl_tcam_chunk *chunk;
+       struct mlxsw_sp_acl_tcam_region *region;
+       int err;
+
+       chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority,
+                                           &rulei->values.elusage);
+       if (IS_ERR(chunk))
+               return PTR_ERR(chunk);
+
+       region = chunk->region;
+       err = parman_item_add(region->parman, &chunk->parman_prio,
+                             &entry->parman_item);
+       if (err)
+               goto err_parman_item_add;
+
+       err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
+                                                   entry->parman_item.index,
+                                                   rulei);
+       if (err)
+               goto err_rule_insert;
+       entry->chunk = chunk;
+
+       return 0;
+
+err_rule_insert:
+       parman_item_remove(region->parman, &chunk->parman_prio,
+                          &entry->parman_item);
+err_parman_item_add:
+       mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
+       return err;
+}
+
+static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
+                                       struct mlxsw_sp_acl_tcam_entry *entry)
+{
+       struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
+       struct mlxsw_sp_acl_tcam_region *region = chunk->region;
+
+       mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
+                                             entry->parman_item.index);
+       parman_item_remove(region->parman, &chunk->parman_prio,
+                          &entry->parman_item);
+       mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
+}
+
+static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
+       MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
+       MLXSW_AFK_ELEMENT_DMAC,
+       MLXSW_AFK_ELEMENT_SMAC,
+       MLXSW_AFK_ELEMENT_ETHERTYPE,
+       MLXSW_AFK_ELEMENT_IP_PROTO,
+       MLXSW_AFK_ELEMENT_SRC_IP4,
+       MLXSW_AFK_ELEMENT_DST_IP4,
+       MLXSW_AFK_ELEMENT_DST_L4_PORT,
+       MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+};
+
+static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
+       MLXSW_AFK_ELEMENT_ETHERTYPE,
+       MLXSW_AFK_ELEMENT_IP_PROTO,
+       MLXSW_AFK_ELEMENT_SRC_IP6_HI,
+       MLXSW_AFK_ELEMENT_SRC_IP6_LO,
+       MLXSW_AFK_ELEMENT_DST_IP6_HI,
+       MLXSW_AFK_ELEMENT_DST_IP6_LO,
+       MLXSW_AFK_ELEMENT_DST_L4_PORT,
+       MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+};
+
+static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
+       {
+               .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
+               .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
+       },
+       {
+               .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
+               .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
+       },
+};
+
+#define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
+       ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
+
+struct mlxsw_sp_acl_tcam_flower_ruleset {
+       struct mlxsw_sp_acl_tcam_group group;
+};
+
+struct mlxsw_sp_acl_tcam_flower_rule {
+       struct mlxsw_sp_acl_tcam_entry entry;
+};
+
+static int
+mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
+                                    void *priv, void *ruleset_priv)
+{
+       struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+       struct mlxsw_sp_acl_tcam *tcam = priv;
+
+       return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
+                                          mlxsw_sp_acl_tcam_patterns,
+                                          MLXSW_SP_ACL_TCAM_PATTERNS_COUNT);
+}
+
+static void
+mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
+                                    void *ruleset_priv)
+{
+       struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+
+       mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
+}
+
+static int
+mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
+                                     void *ruleset_priv,
+                                     struct net_device *dev, bool ingress)
+{
+       struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+
+       return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
+                                           dev, ingress);
+}
+
+static void
+mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
+                                       void *ruleset_priv)
+{
+       struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+
+       mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group);
+}
+
+static int
+mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
+                                 void *ruleset_priv, void *rule_priv,
+                                 struct mlxsw_sp_acl_rule_info *rulei)
+{
+       struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
+       struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
+
+       return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
+                                          &rule->entry, rulei);
+}
+
+static void
+mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
+{
+       struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
+
+       mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
+}
+
+static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
+       .ruleset_priv_size      = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
+       .ruleset_add            = mlxsw_sp_acl_tcam_flower_ruleset_add,
+       .ruleset_del            = mlxsw_sp_acl_tcam_flower_ruleset_del,
+       .ruleset_bind           = mlxsw_sp_acl_tcam_flower_ruleset_bind,
+       .ruleset_unbind         = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
+       .rule_priv_size         = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
+       .rule_add               = mlxsw_sp_acl_tcam_flower_rule_add,
+       .rule_del               = mlxsw_sp_acl_tcam_flower_rule_del,
+};
+
+static const struct mlxsw_sp_acl_profile_ops *
+mlxsw_sp_acl_tcam_profile_ops_arr[] = {
+       [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
+};
+
+static const struct mlxsw_sp_acl_profile_ops *
+mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
+                             enum mlxsw_sp_acl_profile profile)
+{
+       const struct mlxsw_sp_acl_profile_ops *ops;
+
+       if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
+               return NULL;
+       ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
+       if (WARN_ON(!ops))
+               return NULL;
+       return ops;
+}
+
+const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = {
+       .priv_size              = sizeof(struct mlxsw_sp_acl_tcam),
+       .init                   = mlxsw_sp_acl_tcam_init,
+       .fini                   = mlxsw_sp_acl_tcam_fini,
+       .profile_ops            = mlxsw_sp_acl_tcam_profile_ops,
+};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
new file mode 100644 (file)
index 0000000..22ab429
--- /dev/null
@@ -0,0 +1,316 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/flow_dissector.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
+
+#include "spectrum.h"
+#include "core_acl_flex_keys.h"
+
+static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
+                                        struct net_device *dev,
+                                        struct mlxsw_sp_acl_rule_info *rulei,
+                                        struct tcf_exts *exts)
+{
+       const struct tc_action *a;
+       LIST_HEAD(actions);
+       int err;
+
+       if (tc_no_actions(exts))
+               return 0;
+
+       tcf_exts_to_list(exts, &actions);
+       list_for_each_entry(a, &actions, list) {
+               if (is_tcf_gact_shot(a)) {
+                       err = mlxsw_sp_acl_rulei_act_drop(rulei);
+                       if (err)
+                               return err;
+               } else if (is_tcf_mirred_egress_redirect(a)) {
+                       int ifindex = tcf_mirred_ifindex(a);
+                       struct net_device *out_dev;
+
+                       out_dev = __dev_get_by_index(dev_net(dev), ifindex);
+                       if (out_dev == dev)
+                               out_dev = NULL;
+
+                       err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
+                                                        out_dev);
+                       if (err)
+                               return err;
+               } else {
+                       dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
+                       return -EOPNOTSUPP;
+               }
+       }
+       return 0;
+}
+
+static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
+                                      struct tc_cls_flower_offload *f)
+{
+       struct flow_dissector_key_ipv4_addrs *key =
+               skb_flow_dissector_target(f->dissector,
+                                         FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+                                         f->key);
+       struct flow_dissector_key_ipv4_addrs *mask =
+               skb_flow_dissector_target(f->dissector,
+                                         FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+                                         f->mask);
+
+       mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4,
+                                      ntohl(key->src), ntohl(mask->src));
+       mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4,
+                                      ntohl(key->dst), ntohl(mask->dst));
+}
+
+static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
+                                      struct tc_cls_flower_offload *f)
+{
+       struct flow_dissector_key_ipv6_addrs *key =
+               skb_flow_dissector_target(f->dissector,
+                                         FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+                                         f->key);
+       struct flow_dissector_key_ipv6_addrs *mask =
+               skb_flow_dissector_target(f->dissector,
+                                         FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+                                         f->mask);
+       size_t addr_half_size = sizeof(key->src) / 2;
+
+       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI,
+                                      &key->src.s6_addr[0],
+                                      &mask->src.s6_addr[0],
+                                      addr_half_size);
+       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO,
+                                      &key->src.s6_addr[addr_half_size],
+                                      &mask->src.s6_addr[addr_half_size],
+                                      addr_half_size);
+       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI,
+                                      &key->dst.s6_addr[0],
+                                      &mask->dst.s6_addr[0],
+                                      addr_half_size);
+       mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO,
+                                      &key->dst.s6_addr[addr_half_size],
+                                      &mask->dst.s6_addr[addr_half_size],
+                                      addr_half_size);
+}
+
+static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
+                                      struct mlxsw_sp_acl_rule_info *rulei,
+                                      struct tc_cls_flower_offload *f,
+                                      u8 ip_proto)
+{
+       struct flow_dissector_key_ports *key, *mask;
+
+       if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
+               return 0;
+
+       if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+               dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
+               return -EINVAL;
+       }
+
+       key = skb_flow_dissector_target(f->dissector,
+                                       FLOW_DISSECTOR_KEY_PORTS,
+                                       f->key);
+       mask = skb_flow_dissector_target(f->dissector,
+                                        FLOW_DISSECTOR_KEY_PORTS,
+                                        f->mask);
+       mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
+                                      ntohs(key->dst), ntohs(mask->dst));
+       mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
+                                      ntohs(key->src), ntohs(mask->src));
+       return 0;
+}
+
+static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
+                                struct net_device *dev,
+                                struct mlxsw_sp_acl_rule_info *rulei,
+                                struct tc_cls_flower_offload *f)
+{
+       u16 addr_type = 0;
+       u8 ip_proto = 0;
+       int err;
+
+       if (f->dissector->used_keys &
+           ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+             BIT(FLOW_DISSECTOR_KEY_BASIC) |
+             BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+             BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+             BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+             BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+               dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
+               return -EOPNOTSUPP;
+       }
+
+       mlxsw_sp_acl_rulei_priority(rulei, f->prio);
+
+       if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+               struct flow_dissector_key_control *key =
+                       skb_flow_dissector_target(f->dissector,
+                                                 FLOW_DISSECTOR_KEY_CONTROL,
+                                                 f->key);
+               addr_type = key->addr_type;
+       }
+
+       if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+               struct flow_dissector_key_basic *key =
+                       skb_flow_dissector_target(f->dissector,
+                                                 FLOW_DISSECTOR_KEY_BASIC,
+                                                 f->key);
+               struct flow_dissector_key_basic *mask =
+                       skb_flow_dissector_target(f->dissector,
+                                                 FLOW_DISSECTOR_KEY_BASIC,
+                                                 f->mask);
+               u16 n_proto_key = ntohs(key->n_proto);
+               u16 n_proto_mask = ntohs(mask->n_proto);
+
+               if (n_proto_key == ETH_P_ALL) {
+                       n_proto_key = 0;
+                       n_proto_mask = 0;
+               }
+               mlxsw_sp_acl_rulei_keymask_u32(rulei,
+                                              MLXSW_AFK_ELEMENT_ETHERTYPE,
+                                              n_proto_key, n_proto_mask);
+
+               ip_proto = key->ip_proto;
+               mlxsw_sp_acl_rulei_keymask_u32(rulei,
+                                              MLXSW_AFK_ELEMENT_IP_PROTO,
+                                              key->ip_proto, mask->ip_proto);
+       }
+
+       if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+               struct flow_dissector_key_eth_addrs *key =
+                       skb_flow_dissector_target(f->dissector,
+                                                 FLOW_DISSECTOR_KEY_ETH_ADDRS,
+                                                 f->key);
+               struct flow_dissector_key_eth_addrs *mask =
+                       skb_flow_dissector_target(f->dissector,
+                                                 FLOW_DISSECTOR_KEY_ETH_ADDRS,
+                                                 f->mask);
+
+               mlxsw_sp_acl_rulei_keymask_buf(rulei,
+                                              MLXSW_AFK_ELEMENT_DMAC,
+                                              key->dst, mask->dst,
+                                              sizeof(key->dst));
+               mlxsw_sp_acl_rulei_keymask_buf(rulei,
+                                              MLXSW_AFK_ELEMENT_SMAC,
+                                              key->src, mask->src,
+                                              sizeof(key->src));
+       }
+
+       if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
+               mlxsw_sp_flower_parse_ipv4(rulei, f);
+
+       if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
+               mlxsw_sp_flower_parse_ipv6(rulei, f);
+
+       err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
+       if (err)
+               return err;
+
+       return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts);
+}
+
+int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+                           __be16 protocol, struct tc_cls_flower_offload *f)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct net_device *dev = mlxsw_sp_port->dev;
+       struct mlxsw_sp_acl_rule_info *rulei;
+       struct mlxsw_sp_acl_ruleset *ruleset;
+       struct mlxsw_sp_acl_rule *rule;
+       int err;
+
+       ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
+                                          MLXSW_SP_ACL_PROFILE_FLOWER);
+       if (IS_ERR(ruleset))
+               return PTR_ERR(ruleset);
+
+       rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               goto err_rule_create;
+       }
+
+       rulei = mlxsw_sp_acl_rule_rulei(rule);
+       err = mlxsw_sp_flower_parse(mlxsw_sp, dev, rulei, f);
+       if (err)
+               goto err_flower_parse;
+
+       err = mlxsw_sp_acl_rulei_commit(rulei);
+       if (err)
+               goto err_rulei_commit;
+
+       err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
+       if (err)
+               goto err_rule_add;
+
+       mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+       return 0;
+
+err_rule_add:
+err_rulei_commit:
+err_flower_parse:
+       mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
+err_rule_create:
+       mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+       return err;
+}
+
+void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
+                            struct tc_cls_flower_offload *f)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct mlxsw_sp_acl_ruleset *ruleset;
+       struct mlxsw_sp_acl_rule *rule;
+
+       ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
+                                          ingress,
+                                          MLXSW_SP_ACL_PROFILE_FLOWER);
+       if (WARN_ON(IS_ERR(ruleset)))
+               return;
+
+       rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
+       if (!WARN_ON(!rule)) {
+               mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
+               mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
+       }
+
+       mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
+}
index 9e494a446b7ea7812a409b87f92bde19a0355286..d7ac22d7f94029dc6f48d8746049f2014453b6a1 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/bitops.h>
 #include <linux/in6.h>
 #include <linux/notifier.h>
+#include <linux/inetdevice.h>
 #include <net/netevent.h>
 #include <net/neighbour.h>
 #include <net/arp.h>
@@ -108,7 +109,6 @@ mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
 }
 
 struct mlxsw_sp_fib_key {
-       struct net_device *dev;
        unsigned char addr[sizeof(struct in6_addr)];
        unsigned char prefix_len;
 };
@@ -121,95 +121,39 @@ enum mlxsw_sp_fib_entry_type {
 
 struct mlxsw_sp_nexthop_group;
 
-struct mlxsw_sp_fib_entry {
-       struct rhash_head ht_node;
+struct mlxsw_sp_fib_node {
+       struct list_head entry_list;
        struct list_head list;
+       struct rhash_head ht_node;
+       struct mlxsw_sp_vr *vr;
        struct mlxsw_sp_fib_key key;
+};
+
+struct mlxsw_sp_fib_entry_params {
+       u32 tb_id;
+       u32 prio;
+       u8 tos;
+       u8 type;
+};
+
+struct mlxsw_sp_fib_entry {
+       struct list_head list;
+       struct mlxsw_sp_fib_node *fib_node;
        enum mlxsw_sp_fib_entry_type type;
-       unsigned int ref_count;
-       u16 rif; /* used for action local */
-       struct mlxsw_sp_vr *vr;
-       struct fib_info *fi;
        struct list_head nexthop_group_node;
        struct mlxsw_sp_nexthop_group *nh_group;
+       struct mlxsw_sp_fib_entry_params params;
+       bool offloaded;
 };
 
 struct mlxsw_sp_fib {
        struct rhashtable ht;
-       struct list_head entry_list;
+       struct list_head node_list;
        unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
        struct mlxsw_sp_prefix_usage prefix_usage;
 };
 
-static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
-       .key_offset = offsetof(struct mlxsw_sp_fib_entry, key),
-       .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node),
-       .key_len = sizeof(struct mlxsw_sp_fib_key),
-       .automatic_shrinking = true,
-};
-
-static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
-                                    struct mlxsw_sp_fib_entry *fib_entry)
-{
-       unsigned char prefix_len = fib_entry->key.prefix_len;
-       int err;
-
-       err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node,
-                                    mlxsw_sp_fib_ht_params);
-       if (err)
-               return err;
-       list_add_tail(&fib_entry->list, &fib->entry_list);
-       if (fib->prefix_ref_count[prefix_len]++ == 0)
-               mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
-       return 0;
-}
-
-static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
-                                     struct mlxsw_sp_fib_entry *fib_entry)
-{
-       unsigned char prefix_len = fib_entry->key.prefix_len;
-
-       if (--fib->prefix_ref_count[prefix_len] == 0)
-               mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
-       list_del(&fib_entry->list);
-       rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
-                              mlxsw_sp_fib_ht_params);
-}
-
-static struct mlxsw_sp_fib_entry *
-mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
-                         size_t addr_len, unsigned char prefix_len,
-                         struct net_device *dev)
-{
-       struct mlxsw_sp_fib_entry *fib_entry;
-
-       fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
-       if (!fib_entry)
-               return NULL;
-       fib_entry->key.dev = dev;
-       memcpy(fib_entry->key.addr, addr, addr_len);
-       fib_entry->key.prefix_len = prefix_len;
-       return fib_entry;
-}
-
-static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
-{
-       kfree(fib_entry);
-}
-
-static struct mlxsw_sp_fib_entry *
-mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
-                         size_t addr_len, unsigned char prefix_len,
-                         struct net_device *dev)
-{
-       struct mlxsw_sp_fib_key key;
-
-       memset(&key, 0, sizeof(key));
-       key.dev = dev;
-       memcpy(key.addr, addr, addr_len);
-       key.prefix_len = prefix_len;
-       return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
-}
+static const struct rhashtable_params mlxsw_sp_fib_ht_params;
 
 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
 {
@@ -222,7 +166,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
        err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
        if (err)
                goto err_rhashtable_init;
-       INIT_LIST_HEAD(&fib->entry_list);
+       INIT_LIST_HEAD(&fib->node_list);
        return fib;
 
 err_rhashtable_init:
@@ -232,6 +176,7 @@ err_rhashtable_init:
 
 static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
 {
+       WARN_ON(!list_empty(&fib->node_list));
        rhashtable_destroy(&fib->ht);
        kfree(fib);
 }
@@ -610,12 +555,11 @@ struct mlxsw_sp_neigh_key {
 };
 
 struct mlxsw_sp_neigh_entry {
+       struct list_head rif_list_node;
        struct rhash_head ht_node;
        struct mlxsw_sp_neigh_key key;
        u16 rif;
-       bool offloaded;
-       struct delayed_work dw;
-       struct mlxsw_sp_port *mlxsw_sp_port;
+       bool connected;
        unsigned char ha[ETH_ALEN];
        struct list_head nexthop_list; /* list of nexthops using
                                        * this neigh entry
@@ -629,105 +573,91 @@ static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
        .key_len = sizeof(struct mlxsw_sp_neigh_key),
 };
 
-static int
-mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
-                           struct mlxsw_sp_neigh_entry *neigh_entry)
-{
-       return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
-                                     &neigh_entry->ht_node,
-                                     mlxsw_sp_neigh_ht_params);
-}
-
-static void
-mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
-                           struct mlxsw_sp_neigh_entry *neigh_entry)
-{
-       rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
-                              &neigh_entry->ht_node,
-                              mlxsw_sp_neigh_ht_params);
-}
-
-static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
-
 static struct mlxsw_sp_neigh_entry *
-mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif)
+mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
+                          u16 rif)
 {
        struct mlxsw_sp_neigh_entry *neigh_entry;
 
-       neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
+       neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
        if (!neigh_entry)
                return NULL;
+
        neigh_entry->key.n = n;
        neigh_entry->rif = rif;
-       INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
        INIT_LIST_HEAD(&neigh_entry->nexthop_list);
+
        return neigh_entry;
 }
 
-static void
-mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
+static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
 {
        kfree(neigh_entry);
 }
 
-static struct mlxsw_sp_neigh_entry *
-mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
+static int
+mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
+                           struct mlxsw_sp_neigh_entry *neigh_entry)
 {
-       struct mlxsw_sp_neigh_key key;
+       return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
+                                     &neigh_entry->ht_node,
+                                     mlxsw_sp_neigh_ht_params);
+}
 
-       key.n = n;
-       return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
-                                     &key, mlxsw_sp_neigh_ht_params);
+static void
+mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
+                           struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+       rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
+                              &neigh_entry->ht_node,
+                              mlxsw_sp_neigh_ht_params);
 }
 
-int mlxsw_sp_router_neigh_construct(struct net_device *dev,
-                                   struct neighbour *n)
+static struct mlxsw_sp_neigh_entry *
+mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
 {
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        struct mlxsw_sp_neigh_entry *neigh_entry;
        struct mlxsw_sp_rif *r;
        int err;
 
-       if (n->tbl != &arp_tbl)
-               return 0;
-
-       neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
-       if (neigh_entry)
-               return 0;
-
        r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
-       if (WARN_ON(!r))
-               return -EINVAL;
+       if (!r)
+               return ERR_PTR(-EINVAL);
 
-       neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif);
+       neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, r->rif);
        if (!neigh_entry)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
+
        err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
        if (err)
                goto err_neigh_entry_insert;
-       return 0;
+
+       list_add(&neigh_entry->rif_list_node, &r->neigh_list);
+
+       return neigh_entry;
 
 err_neigh_entry_insert:
-       mlxsw_sp_neigh_entry_destroy(neigh_entry);
-       return err;
+       mlxsw_sp_neigh_entry_free(neigh_entry);
+       return ERR_PTR(err);
 }
 
-void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
-                                  struct neighbour *n)
+static void
+mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+                            struct mlxsw_sp_neigh_entry *neigh_entry)
 {
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       struct mlxsw_sp_neigh_entry *neigh_entry;
+       list_del(&neigh_entry->rif_list_node);
+       mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
+       mlxsw_sp_neigh_entry_free(neigh_entry);
+}
 
-       if (n->tbl != &arp_tbl)
-               return;
+static struct mlxsw_sp_neigh_entry *
+mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
+{
+       struct mlxsw_sp_neigh_key key;
 
-       neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
-       if (!neigh_entry)
-               return;
-       mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
-       mlxsw_sp_neigh_entry_destroy(neigh_entry);
+       key.n = n;
+       return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
+                                     &key, mlxsw_sp_neigh_ht_params);
 }
 
 static void
@@ -866,13 +796,11 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
        /* Take RTNL mutex here to prevent lists from changes */
        rtnl_lock();
        list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
-                           nexthop_neighs_list_node) {
+                           nexthop_neighs_list_node)
                /* If this neigh have nexthops, make the kernel think this neigh
                 * is active regardless of the traffic.
                 */
-               if (!list_empty(&neigh_entry->nexthop_list))
-                       neigh_event_send(neigh_entry->key.n, NULL);
-       }
+               neigh_event_send(neigh_entry->key.n, NULL);
        rtnl_unlock();
 }
 
@@ -916,11 +844,9 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
         */
        rtnl_lock();
        list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
-                           nexthop_neighs_list_node) {
-               if (!(neigh_entry->key.n->nud_state & NUD_VALID) &&
-                   !list_empty(&neigh_entry->nexthop_list))
+                           nexthop_neighs_list_node)
+               if (!neigh_entry->connected)
                        neigh_event_send(neigh_entry->key.n, NULL);
-       }
        rtnl_unlock();
 
        mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
@@ -932,79 +858,101 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
                              struct mlxsw_sp_neigh_entry *neigh_entry,
                              bool removing);
 
-static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
+static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
+{
+       return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
+                       MLXSW_REG_RAUHT_OP_WRITE_DELETE;
+}
+
+static void
+mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
+                               struct mlxsw_sp_neigh_entry *neigh_entry,
+                               enum mlxsw_reg_rauht_op op)
 {
-       struct mlxsw_sp_neigh_entry *neigh_entry =
-               container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
        struct neighbour *n = neigh_entry->key.n;
-       struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       u32 dip = ntohl(*((__be32 *) n->primary_key));
        char rauht_pl[MLXSW_REG_RAUHT_LEN];
-       struct net_device *dev;
+
+       mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
+                             dip);
+       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
+}
+
+static void
+mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
+                           struct mlxsw_sp_neigh_entry *neigh_entry,
+                           bool adding)
+{
+       if (!adding && !neigh_entry->connected)
+               return;
+       neigh_entry->connected = adding;
+       if (neigh_entry->key.n->tbl == &arp_tbl)
+               mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
+                                               mlxsw_sp_rauht_op(adding));
+       else
+               WARN_ON_ONCE(1);
+}
+
+struct mlxsw_sp_neigh_event_work {
+       struct work_struct work;
+       struct mlxsw_sp *mlxsw_sp;
+       struct neighbour *n;
+};
+
+static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
+{
+       struct mlxsw_sp_neigh_event_work *neigh_work =
+               container_of(work, struct mlxsw_sp_neigh_event_work, work);
+       struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
+       struct mlxsw_sp_neigh_entry *neigh_entry;
+       struct neighbour *n = neigh_work->n;
+       unsigned char ha[ETH_ALEN];
        bool entry_connected;
        u8 nud_state, dead;
-       bool updating;
-       bool removing;
-       bool adding;
-       u32 dip;
-       int err;
 
+       /* If these parameters are changed after we release the lock,
+        * then we are guaranteed to receive another event letting us
+        * know about it.
+        */
        read_lock_bh(&n->lock);
-       dip = ntohl(*((__be32 *) n->primary_key));
-       memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha));
+       memcpy(ha, n->ha, ETH_ALEN);
        nud_state = n->nud_state;
        dead = n->dead;
-       dev = n->dev;
        read_unlock_bh(&n->lock);
 
+       rtnl_lock();
        entry_connected = nud_state & NUD_VALID && !dead;
-       adding = (!neigh_entry->offloaded) && entry_connected;
-       updating = neigh_entry->offloaded && entry_connected;
-       removing = neigh_entry->offloaded && !entry_connected;
-
-       if (adding || updating) {
-               mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_ADD,
-                                     neigh_entry->rif,
-                                     neigh_entry->ha, dip);
-               err = mlxsw_reg_write(mlxsw_sp->core,
-                                     MLXSW_REG(rauht), rauht_pl);
-               if (err) {
-                       netdev_err(dev, "Could not add neigh %pI4h\n", &dip);
-                       neigh_entry->offloaded = false;
-               } else {
-                       neigh_entry->offloaded = true;
-               }
-               mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, false);
-       } else if (removing) {
-               mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE,
-                                     neigh_entry->rif,
-                                     neigh_entry->ha, dip);
-               err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht),
-                                     rauht_pl);
-               if (err) {
-                       netdev_err(dev, "Could not delete neigh %pI4h\n", &dip);
-                       neigh_entry->offloaded = true;
-               } else {
-                       neigh_entry->offloaded = false;
-               }
-               mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, true);
+       neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
+       if (!entry_connected && !neigh_entry)
+               goto out;
+       if (!neigh_entry) {
+               neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
+               if (IS_ERR(neigh_entry))
+                       goto out;
        }
 
+       memcpy(neigh_entry->ha, ha, ETH_ALEN);
+       mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
+       mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
+
+       if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
+               mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+
+out:
+       rtnl_unlock();
        neigh_release(n);
-       mlxsw_sp_port_dev_put(mlxsw_sp_port);
+       kfree(neigh_work);
 }
 
 int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
                                   unsigned long event, void *ptr)
 {
-       struct mlxsw_sp_neigh_entry *neigh_entry;
+       struct mlxsw_sp_neigh_event_work *neigh_work;
        struct mlxsw_sp_port *mlxsw_sp_port;
        struct mlxsw_sp *mlxsw_sp;
        unsigned long interval;
-       struct net_device *dev;
        struct neigh_parms *p;
        struct neighbour *n;
-       u32 dip;
 
        switch (event) {
        case NETEVENT_DELAY_PROBE_TIME_UPDATE:
@@ -1029,33 +977,31 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
                break;
        case NETEVENT_NEIGH_UPDATE:
                n = ptr;
-               dev = n->dev;
 
                if (n->tbl != &arp_tbl)
                        return NOTIFY_DONE;
 
-               mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(dev);
+               mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
                if (!mlxsw_sp_port)
                        return NOTIFY_DONE;
 
-               mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-               dip = ntohl(*((__be32 *) n->primary_key));
-               neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
-               if (WARN_ON(!neigh_entry)) {
+               neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
+               if (!neigh_work) {
                        mlxsw_sp_port_dev_put(mlxsw_sp_port);
-                       return NOTIFY_DONE;
+                       return NOTIFY_BAD;
                }
-               neigh_entry->mlxsw_sp_port = mlxsw_sp_port;
+
+               INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
+               neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+               neigh_work->n = n;
 
                /* Take a reference to ensure the neighbour won't be
                 * destructed until we drop the reference in delayed
                 * work.
                 */
                neigh_clone(n);
-               if (!mlxsw_core_schedule_dw(&neigh_entry->dw, 0)) {
-                       neigh_release(n);
-                       mlxsw_sp_port_dev_put(mlxsw_sp_port);
-               }
+               mlxsw_core_schedule_work(&neigh_work->work);
+               mlxsw_sp_port_dev_put(mlxsw_sp_port);
                break;
        }
 
@@ -1093,11 +1039,40 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
        rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
 }
 
+static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
+                                   const struct mlxsw_sp_rif *r)
+{
+       char rauht_pl[MLXSW_REG_RAUHT_LEN];
+
+       mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
+                            r->rif, r->addr);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
+}
+
+static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
+                                        struct mlxsw_sp_rif *r)
+{
+       struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
+
+       mlxsw_sp_neigh_rif_flush(mlxsw_sp, r);
+       list_for_each_entry_safe(neigh_entry, tmp, &r->neigh_list,
+                                rif_list_node)
+               mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+}
+
+struct mlxsw_sp_nexthop_key {
+       struct fib_nh *fib_nh;
+};
+
 struct mlxsw_sp_nexthop {
        struct list_head neigh_list_node; /* member of neigh entry list */
+       struct list_head rif_list_node;
        struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
                                                * this belongs to
                                                */
+       struct rhash_head ht_node;
+       struct mlxsw_sp_nexthop_key key;
+       struct mlxsw_sp_rif *r;
        u8 should_offload:1, /* set indicates this neigh is connected and
                              * should be put to KVD linear area of this group.
                              */
@@ -1110,16 +1085,81 @@ struct mlxsw_sp_nexthop {
        struct mlxsw_sp_neigh_entry *neigh_entry;
 };
 
+struct mlxsw_sp_nexthop_group_key {
+       struct fib_info *fi;
+};
+
 struct mlxsw_sp_nexthop_group {
-       struct list_head list; /* node in mlxsw->router.nexthop_group_list */
+       struct rhash_head ht_node;
        struct list_head fib_list; /* list of fib entries that use this group */
-       u8 adj_index_valid:1;
+       struct mlxsw_sp_nexthop_group_key key;
+       u8 adj_index_valid:1,
+          gateway:1; /* routes using the group use a gateway */
        u32 adj_index;
        u16 ecmp_size;
        u16 count;
        struct mlxsw_sp_nexthop nexthops[0];
+#define nh_rif nexthops[0].r
+};
+
+static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
+       .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
+       .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
+       .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
+};
+
+static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
+                                        struct mlxsw_sp_nexthop_group *nh_grp)
+{
+       return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_group_ht,
+                                     &nh_grp->ht_node,
+                                     mlxsw_sp_nexthop_group_ht_params);
+}
+
+static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
+                                         struct mlxsw_sp_nexthop_group *nh_grp)
+{
+       rhashtable_remove_fast(&mlxsw_sp->router.nexthop_group_ht,
+                              &nh_grp->ht_node,
+                              mlxsw_sp_nexthop_group_ht_params);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
+                             struct mlxsw_sp_nexthop_group_key key)
+{
+       return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_group_ht, &key,
+                                     mlxsw_sp_nexthop_group_ht_params);
+}
+
+static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
+       .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
+       .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
+       .key_len = sizeof(struct mlxsw_sp_nexthop_key),
 };
 
+static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
+                                  struct mlxsw_sp_nexthop *nh)
+{
+       return rhashtable_insert_fast(&mlxsw_sp->router.nexthop_ht,
+                                     &nh->ht_node, mlxsw_sp_nexthop_ht_params);
+}
+
+static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
+                                   struct mlxsw_sp_nexthop *nh)
+{
+       rhashtable_remove_fast(&mlxsw_sp->router.nexthop_ht, &nh->ht_node,
+                              mlxsw_sp_nexthop_ht_params);
+}
+
+static struct mlxsw_sp_nexthop *
+mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
+                       struct mlxsw_sp_nexthop_key key)
+{
+       return rhashtable_lookup_fast(&mlxsw_sp->router.nexthop_ht, &key,
+                                     mlxsw_sp_nexthop_ht_params);
+}
+
 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
                                             struct mlxsw_sp_vr *vr,
                                             u32 adj_index, u16 ecmp_size,
@@ -1144,9 +1184,9 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
        int err;
 
        list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
-               if (vr == fib_entry->vr)
+               if (vr == fib_entry->fib_node->vr)
                        continue;
-               vr = fib_entry->vr;
+               vr = fib_entry->fib_node->vr;
                err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
                                                        old_adj_index,
                                                        old_ecmp_size,
@@ -1234,6 +1274,11 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
        int i;
        int err;
 
+       if (!nh_grp->gateway) {
+               mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
+               return;
+       }
+
        for (i = 0; i < nh_grp->count; i++) {
                nh = &nh_grp->nexthops[i];
 
@@ -1336,42 +1381,63 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
 {
        struct mlxsw_sp_nexthop *nh;
 
-       /* Take RTNL mutex here to prevent lists from changes */
-       rtnl_lock();
        list_for_each_entry(nh, &neigh_entry->nexthop_list,
                            neigh_list_node) {
                __mlxsw_sp_nexthop_neigh_update(nh, removing);
                mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
        }
-       rtnl_unlock();
 }
 
-static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
-                                struct mlxsw_sp_nexthop_group *nh_grp,
-                                struct mlxsw_sp_nexthop *nh,
-                                struct fib_nh *fib_nh)
+static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
+                                     struct mlxsw_sp_rif *r)
+{
+       if (nh->r)
+               return;
+
+       nh->r = r;
+       list_add(&nh->rif_list_node, &r->nexthop_list);
+}
+
+static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
+{
+       if (!nh->r)
+               return;
+
+       list_del(&nh->rif_list_node);
+       nh->r = NULL;
+}
+
+static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
+                                      struct mlxsw_sp_nexthop *nh)
 {
        struct mlxsw_sp_neigh_entry *neigh_entry;
-       struct net_device *dev = fib_nh->nh_dev;
+       struct fib_nh *fib_nh = nh->key.fib_nh;
        struct neighbour *n;
        u8 nud_state, dead;
+       int err;
+
+       if (!nh->nh_grp->gateway || nh->neigh_entry)
+               return 0;
 
        /* Take a reference of neigh here ensuring that neigh would
         * not be detructed before the nexthop entry is finished.
         * The reference is taken either in neigh_lookup() or
-        * in neith_create() in case n is not found.
+        * in neigh_create() in case n is not found.
         */
-       n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev);
+       n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
        if (!n) {
-               n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev);
+               n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
                if (IS_ERR(n))
                        return PTR_ERR(n);
                neigh_event_send(n, NULL);
        }
        neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
        if (!neigh_entry) {
-               neigh_release(n);
-               return -EINVAL;
+               neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
+               if (IS_ERR(neigh_entry)) {
+                       err = -EINVAL;
+                       goto err_neigh_entry_create;
+               }
        }
 
        /* If that is the first nexthop connected to that neigh, add to
@@ -1381,7 +1447,6 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
                list_add_tail(&neigh_entry->nexthop_neighs_list_node,
                              &mlxsw_sp->router.nexthop_neighs_list);
 
-       nh->nh_grp = nh_grp;
        nh->neigh_entry = neigh_entry;
        list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
        read_lock_bh(&n->lock);
@@ -1391,23 +1456,126 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
        __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
 
        return 0;
-}
 
-static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
-                                 struct mlxsw_sp_nexthop *nh)
+err_neigh_entry_create:
+       neigh_release(n);
+       return err;
+}
+
+static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
+                                       struct mlxsw_sp_nexthop *nh)
 {
        struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
+       struct neighbour *n;
+
+       if (!neigh_entry)
+               return;
+       n = neigh_entry->key.n;
 
        __mlxsw_sp_nexthop_neigh_update(nh, true);
        list_del(&nh->neigh_list_node);
+       nh->neigh_entry = NULL;
 
        /* If that is the last nexthop connected to that neigh, remove from
         * nexthop_neighs_list
         */
-       if (list_empty(&nh->neigh_entry->nexthop_list))
-               list_del(&nh->neigh_entry->nexthop_neighs_list_node);
+       if (list_empty(&neigh_entry->nexthop_list))
+               list_del(&neigh_entry->nexthop_neighs_list_node);
+
+       if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
+               mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
+
+       neigh_release(n);
+}
+
+static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
+                                struct mlxsw_sp_nexthop_group *nh_grp,
+                                struct mlxsw_sp_nexthop *nh,
+                                struct fib_nh *fib_nh)
+{
+       struct net_device *dev = fib_nh->nh_dev;
+       struct in_device *in_dev;
+       struct mlxsw_sp_rif *r;
+       int err;
+
+       nh->nh_grp = nh_grp;
+       nh->key.fib_nh = fib_nh;
+       err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
+       if (err)
+               return err;
+
+       in_dev = __in_dev_get_rtnl(dev);
+       if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
+           fib_nh->nh_flags & RTNH_F_LINKDOWN)
+               return 0;
 
-       neigh_release(neigh_entry->key.n);
+       r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+       if (!r)
+               return 0;
+       mlxsw_sp_nexthop_rif_init(nh, r);
+
+       err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
+       if (err)
+               goto err_nexthop_neigh_init;
+
+       return 0;
+
+err_nexthop_neigh_init:
+       mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
+       return err;
+}
+
+static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
+                                 struct mlxsw_sp_nexthop *nh)
+{
+       mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
+       mlxsw_sp_nexthop_rif_fini(nh);
+       mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
+}
+
+static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
+                                  unsigned long event, struct fib_nh *fib_nh)
+{
+       struct mlxsw_sp_nexthop_key key;
+       struct mlxsw_sp_nexthop *nh;
+       struct mlxsw_sp_rif *r;
+
+       if (mlxsw_sp->router.aborted)
+               return;
+
+       key.fib_nh = fib_nh;
+       nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
+       if (WARN_ON_ONCE(!nh))
+               return;
+
+       r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
+       if (!r)
+               return;
+
+       switch (event) {
+       case FIB_EVENT_NH_ADD:
+               mlxsw_sp_nexthop_rif_init(nh, r);
+               mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
+               break;
+       case FIB_EVENT_NH_DEL:
+               mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
+               mlxsw_sp_nexthop_rif_fini(nh);
+               break;
+       }
+
+       mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+}
+
+static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
+                                          struct mlxsw_sp_rif *r)
+{
+       struct mlxsw_sp_nexthop *nh, *tmp;
+
+       list_for_each_entry_safe(nh, tmp, &r->nexthop_list, rif_list_node) {
+               mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
+               mlxsw_sp_nexthop_rif_fini(nh);
+               mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+       }
 }
 
 static struct mlxsw_sp_nexthop_group *
@@ -1426,7 +1594,9 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
        if (!nh_grp)
                return ERR_PTR(-ENOMEM);
        INIT_LIST_HEAD(&nh_grp->fib_list);
+       nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
        nh_grp->count = fi->fib_nhs;
+       nh_grp->key.fi = fi;
        for (i = 0; i < nh_grp->count; i++) {
                nh = &nh_grp->nexthops[i];
                fib_nh = &fi->fib_nh[i];
@@ -1434,13 +1604,18 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
                if (err)
                        goto err_nexthop_init;
        }
-       list_add_tail(&nh_grp->list, &mlxsw_sp->router.nexthop_group_list);
+       err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
+       if (err)
+               goto err_nexthop_group_insert;
        mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
        return nh_grp;
 
+err_nexthop_group_insert:
 err_nexthop_init:
-       for (i--; i >= 0; i--)
+       for (i--; i >= 0; i--) {
+               nh = &nh_grp->nexthops[i];
                mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
+       }
        kfree(nh_grp);
        return ERR_PTR(err);
 }
@@ -1452,7 +1627,7 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_nexthop *nh;
        int i;
 
-       list_del(&nh_grp->list);
+       mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
        for (i = 0; i < nh_grp->count; i++) {
                nh = &nh_grp->nexthops[i];
                mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
@@ -1462,59 +1637,15 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
        kfree(nh_grp);
 }
 
-static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
-                                  struct fib_info *fi)
-{
-       int i;
-
-       for (i = 0; i < fi->fib_nhs; i++) {
-               struct fib_nh *fib_nh = &fi->fib_nh[i];
-               struct neighbour *n = nh->neigh_entry->key.n;
-
-               if (memcmp(n->primary_key, &fib_nh->nh_gw,
-                          sizeof(fib_nh->nh_gw)) == 0 &&
-                   n->dev == fib_nh->nh_dev)
-                       return true;
-       }
-       return false;
-}
-
-static bool mlxsw_sp_nexthop_group_match(struct mlxsw_sp_nexthop_group *nh_grp,
-                                        struct fib_info *fi)
-{
-       int i;
-
-       if (nh_grp->count != fi->fib_nhs)
-               return false;
-       for (i = 0; i < nh_grp->count; i++) {
-               struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
-
-               if (!mlxsw_sp_nexthop_match(nh, fi))
-                       return false;
-       }
-       return true;
-}
-
-static struct mlxsw_sp_nexthop_group *
-mlxsw_sp_nexthop_group_find(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
-{
-       struct mlxsw_sp_nexthop_group *nh_grp;
-
-       list_for_each_entry(nh_grp, &mlxsw_sp->router.nexthop_group_list,
-                           list) {
-               if (mlxsw_sp_nexthop_group_match(nh_grp, fi))
-                       return nh_grp;
-       }
-       return NULL;
-}
-
 static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
                                      struct mlxsw_sp_fib_entry *fib_entry,
                                      struct fib_info *fi)
 {
+       struct mlxsw_sp_nexthop_group_key key;
        struct mlxsw_sp_nexthop_group *nh_grp;
 
-       nh_grp = mlxsw_sp_nexthop_group_find(mlxsw_sp, fi);
+       key.fi = fi;
+       nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
        if (!nh_grp) {
                nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
                if (IS_ERR(nh_grp))
@@ -1536,13 +1667,82 @@ static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
        mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
 }
 
+static bool
+mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
+{
+       struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
+
+       if (fib_entry->params.tos)
+               return false;
+
+       switch (fib_entry->type) {
+       case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
+               return !!nh_group->adj_index_valid;
+       case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
+               return !!nh_group->nh_rif;
+       default:
+               return false;
+       }
+}
+
+static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
+{
+       fib_entry->offloaded = true;
+
+       switch (fib_entry->fib_node->vr->proto) {
+       case MLXSW_SP_L3_PROTO_IPV4:
+               fib_info_offload_inc(fib_entry->nh_group->key.fi);
+               break;
+       case MLXSW_SP_L3_PROTO_IPV6:
+               WARN_ON_ONCE(1);
+       }
+}
+
+static void
+mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
+{
+       switch (fib_entry->fib_node->vr->proto) {
+       case MLXSW_SP_L3_PROTO_IPV4:
+               fib_info_offload_dec(fib_entry->nh_group->key.fi);
+               break;
+       case MLXSW_SP_L3_PROTO_IPV6:
+               WARN_ON_ONCE(1);
+       }
+
+       fib_entry->offloaded = false;
+}
+
+static void
+mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
+                                  enum mlxsw_reg_ralue_op op, int err)
+{
+       switch (op) {
+       case MLXSW_REG_RALUE_OP_WRITE_DELETE:
+               if (!fib_entry->offloaded)
+                       return;
+               return mlxsw_sp_fib_entry_offload_unset(fib_entry);
+       case MLXSW_REG_RALUE_OP_WRITE_WRITE:
+               if (err)
+                       return;
+               if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
+                   !fib_entry->offloaded)
+                       mlxsw_sp_fib_entry_offload_set(fib_entry);
+               else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
+                        fib_entry->offloaded)
+                       mlxsw_sp_fib_entry_offload_unset(fib_entry);
+               return;
+       default:
+               return;
+       }
+}
+
 static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
                                         struct mlxsw_sp_fib_entry *fib_entry,
                                         enum mlxsw_reg_ralue_op op)
 {
        char ralue_pl[MLXSW_REG_RALUE_LEN];
-       u32 *p_dip = (u32 *) fib_entry->key.addr;
-       struct mlxsw_sp_vr *vr = fib_entry->vr;
+       u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
+       struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
        enum mlxsw_reg_ralue_trap_action trap_action;
        u16 trap_id = 0;
        u32 adjacency_index = 0;
@@ -1552,7 +1752,7 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
         * with provided ECMP size. Otherwise, setup trap and pass
         * traffic to kernel.
         */
-       if (fib_entry->nh_group->adj_index_valid) {
+       if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
                trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
                adjacency_index = fib_entry->nh_group->adj_index;
                ecmp_size = fib_entry->nh_group->ecmp_size;
@@ -1563,7 +1763,8 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
 
        mlxsw_reg_ralue_pack4(ralue_pl,
                              (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
-                             vr->id, fib_entry->key.prefix_len, *p_dip);
+                             vr->id, fib_entry->fib_node->key.prefix_len,
+                             *p_dip);
        mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
                                        adjacency_index, ecmp_size);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
@@ -1573,16 +1774,27 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
                                        struct mlxsw_sp_fib_entry *fib_entry,
                                        enum mlxsw_reg_ralue_op op)
 {
+       struct mlxsw_sp_rif *r = fib_entry->nh_group->nh_rif;
+       enum mlxsw_reg_ralue_trap_action trap_action;
        char ralue_pl[MLXSW_REG_RALUE_LEN];
-       u32 *p_dip = (u32 *) fib_entry->key.addr;
-       struct mlxsw_sp_vr *vr = fib_entry->vr;
+       u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
+       struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
+       u16 trap_id = 0;
+       u16 rif = 0;
+
+       if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
+               trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
+               rif = r->rif;
+       } else {
+               trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
+               trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
+       }
 
        mlxsw_reg_ralue_pack4(ralue_pl,
                              (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
-                             vr->id, fib_entry->key.prefix_len, *p_dip);
-       mlxsw_reg_ralue_act_local_pack(ralue_pl,
-                                      MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
-                                      fib_entry->rif);
+                             vr->id, fib_entry->fib_node->key.prefix_len,
+                             *p_dip);
+       mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
 }
 
@@ -1591,12 +1803,13 @@ static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
                                       enum mlxsw_reg_ralue_op op)
 {
        char ralue_pl[MLXSW_REG_RALUE_LEN];
-       u32 *p_dip = (u32 *) fib_entry->key.addr;
-       struct mlxsw_sp_vr *vr = fib_entry->vr;
+       u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
+       struct mlxsw_sp_vr *vr = fib_entry->fib_node->vr;
 
        mlxsw_reg_ralue_pack4(ralue_pl,
                              (enum mlxsw_reg_ralxx_protocol) vr->proto, op,
-                             vr->id, fib_entry->key.prefix_len, *p_dip);
+                             vr->id, fib_entry->fib_node->key.prefix_len,
+                             *p_dip);
        mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
 }
@@ -1620,13 +1833,17 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
                                 struct mlxsw_sp_fib_entry *fib_entry,
                                 enum mlxsw_reg_ralue_op op)
 {
-       switch (fib_entry->vr->proto) {
+       int err = -EINVAL;
+
+       switch (fib_entry->fib_node->vr->proto) {
        case MLXSW_SP_L3_PROTO_IPV4:
-               return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
+               err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
+               break;
        case MLXSW_SP_L3_PROTO_IPV6:
-               return -EINVAL;
+               return err;
        }
-       return -EINVAL;
+       mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
+       return err;
 }
 
 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
@@ -1644,14 +1861,11 @@ static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
 }
 
 static int
-mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
-                               const struct fib_entry_notifier_info *fen_info,
-                               struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
+                            const struct fib_entry_notifier_info *fen_info,
+                            struct mlxsw_sp_fib_entry *fib_entry)
 {
        struct fib_info *fi = fen_info->fi;
-       struct mlxsw_sp_rif *r = NULL;
-       int nhsel;
-       int err;
 
        if (fen_info->type == RTN_LOCAL || fen_info->type == RTN_BROADCAST) {
                fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
@@ -1659,58 +1873,177 @@ mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
        }
        if (fen_info->type != RTN_UNICAST)
                return -EINVAL;
+       if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
+               fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+       else
+               fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
+       return 0;
+}
 
-       for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
-               const struct fib_nh *nh = &fi->fib_nh[nhsel];
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
+                          struct mlxsw_sp_fib_node *fib_node,
+                          const struct fib_entry_notifier_info *fen_info)
+{
+       struct mlxsw_sp_fib_entry *fib_entry;
+       int err;
 
-               if (!nh->nh_dev)
-                       continue;
-               r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, nh->nh_dev);
-               if (!r) {
-                       /* In case router interface is not found for
-                        * at least one of the nexthops, that means
-                        * the nexthop points to some device unrelated
-                        * to us. Set trap and pass the packets for
-                        * this prefix to kernel.
-                        */
-                       break;
-               }
+       fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
+       if (!fib_entry) {
+               err = -ENOMEM;
+               goto err_fib_entry_alloc;
        }
 
-       if (!r) {
-               fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
-               return 0;
-       }
+       err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
+       if (err)
+               goto err_fib4_entry_type_set;
 
-       if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
-               fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
-               fib_entry->rif = r->rif;
-       } else {
-               fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
-               err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
-               if (err)
-                       return err;
-       }
-       fib_info_offload_inc(fen_info->fi);
-       return 0;
+       err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi);
+       if (err)
+               goto err_nexthop_group_get;
+
+       fib_entry->params.prio = fen_info->fi->fib_priority;
+       fib_entry->params.tb_id = fen_info->tb_id;
+       fib_entry->params.type = fen_info->type;
+       fib_entry->params.tos = fen_info->tos;
+
+       fib_entry->fib_node = fib_node;
+
+       return fib_entry;
+
+err_nexthop_group_get:
+err_fib4_entry_type_set:
+       kfree(fib_entry);
+err_fib_entry_alloc:
+       return ERR_PTR(err);
 }
 
-static void
-mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
-                               struct mlxsw_sp_fib_entry *fib_entry)
+static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+                                       struct mlxsw_sp_fib_entry *fib_entry)
 {
-       if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
-               fib_info_offload_dec(fib_entry->fi);
-       if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
-               mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
+       mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
+       kfree(fib_entry);
 }
 
+static struct mlxsw_sp_fib_node *
+mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
+                      const struct fib_entry_notifier_info *fen_info);
+
 static struct mlxsw_sp_fib_entry *
-mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
-                      const struct fib_entry_notifier_info *fen_info)
+mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
+                          const struct fib_entry_notifier_info *fen_info)
 {
        struct mlxsw_sp_fib_entry *fib_entry;
-       struct fib_info *fi = fen_info->fi;
+       struct mlxsw_sp_fib_node *fib_node;
+
+       fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
+       if (IS_ERR(fib_node))
+               return NULL;
+
+       list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
+               if (fib_entry->params.tb_id == fen_info->tb_id &&
+                   fib_entry->params.tos == fen_info->tos &&
+                   fib_entry->params.type == fen_info->type &&
+                   fib_entry->nh_group->key.fi == fen_info->fi) {
+                       return fib_entry;
+               }
+       }
+
+       return NULL;
+}
+
+static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
+       .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
+       .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
+       .key_len = sizeof(struct mlxsw_sp_fib_key),
+       .automatic_shrinking = true,
+};
+
+static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
+                                   struct mlxsw_sp_fib_node *fib_node)
+{
+       return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
+                                     mlxsw_sp_fib_ht_params);
+}
+
+static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
+                                    struct mlxsw_sp_fib_node *fib_node)
+{
+       rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
+                              mlxsw_sp_fib_ht_params);
+}
+
+static struct mlxsw_sp_fib_node *
+mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
+                        size_t addr_len, unsigned char prefix_len)
+{
+       struct mlxsw_sp_fib_key key;
+
+       memset(&key, 0, sizeof(key));
+       memcpy(key.addr, addr, addr_len);
+       key.prefix_len = prefix_len;
+       return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
+}
+
+static struct mlxsw_sp_fib_node *
+mlxsw_sp_fib_node_create(struct mlxsw_sp_vr *vr, const void *addr,
+                        size_t addr_len, unsigned char prefix_len)
+{
+       struct mlxsw_sp_fib_node *fib_node;
+
+       fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
+       if (!fib_node)
+               return NULL;
+
+       INIT_LIST_HEAD(&fib_node->entry_list);
+       list_add(&fib_node->list, &vr->fib->node_list);
+       memcpy(fib_node->key.addr, addr, addr_len);
+       fib_node->key.prefix_len = prefix_len;
+       mlxsw_sp_fib_node_insert(vr->fib, fib_node);
+       fib_node->vr = vr;
+
+       return fib_node;
+}
+
+static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
+{
+       mlxsw_sp_fib_node_remove(fib_node->vr->fib, fib_node);
+       list_del(&fib_node->list);
+       WARN_ON(!list_empty(&fib_node->entry_list));
+       kfree(fib_node);
+}
+
+static bool
+mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
+                                const struct mlxsw_sp_fib_entry *fib_entry)
+{
+       return list_first_entry(&fib_node->entry_list,
+                               struct mlxsw_sp_fib_entry, list) == fib_entry;
+}
+
+static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
+{
+       unsigned char prefix_len = fib_node->key.prefix_len;
+       struct mlxsw_sp_fib *fib = fib_node->vr->fib;
+
+       if (fib->prefix_ref_count[prefix_len]++ == 0)
+               mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
+}
+
+static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
+{
+       unsigned char prefix_len = fib_node->key.prefix_len;
+       struct mlxsw_sp_fib *fib = fib_node->vr->fib;
+
+       if (--fib->prefix_ref_count[prefix_len] == 0)
+               mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
+}
+
+static struct mlxsw_sp_fib_node *
+mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
+                      const struct fib_entry_notifier_info *fen_info)
+{
+       struct mlxsw_sp_fib_node *fib_node;
        struct mlxsw_sp_vr *vr;
        int err;
 
@@ -1719,113 +2052,258 @@ mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
        if (IS_ERR(vr))
                return ERR_CAST(vr);
 
-       fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
-                                             sizeof(fen_info->dst),
-                                             fen_info->dst_len, fi->fib_dev);
-       if (fib_entry) {
-               /* Already exists, just take a reference */
-               fib_entry->ref_count++;
-               return fib_entry;
-       }
-       fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fen_info->dst,
-                                             sizeof(fen_info->dst),
-                                             fen_info->dst_len, fi->fib_dev);
-       if (!fib_entry) {
+       fib_node = mlxsw_sp_fib_node_lookup(vr->fib, &fen_info->dst,
+                                           sizeof(fen_info->dst),
+                                           fen_info->dst_len);
+       if (fib_node)
+               return fib_node;
+
+       fib_node = mlxsw_sp_fib_node_create(vr, &fen_info->dst,
+                                           sizeof(fen_info->dst),
+                                           fen_info->dst_len);
+       if (!fib_node) {
                err = -ENOMEM;
-               goto err_fib_entry_create;
+               goto err_fib_node_create;
        }
-       fib_entry->vr = vr;
-       fib_entry->fi = fi;
-       fib_entry->ref_count = 1;
 
-       err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fen_info, fib_entry);
-       if (err)
-               goto err_fib4_entry_init;
+       return fib_node;
 
-       return fib_entry;
-
-err_fib4_entry_init:
-       mlxsw_sp_fib_entry_destroy(fib_entry);
-err_fib_entry_create:
+err_fib_node_create:
        mlxsw_sp_vr_put(mlxsw_sp, vr);
-
        return ERR_PTR(err);
 }
 
+static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
+                                  struct mlxsw_sp_fib_node *fib_node)
+{
+       struct mlxsw_sp_vr *vr = fib_node->vr;
+
+       if (!list_empty(&fib_node->entry_list))
+               return;
+       mlxsw_sp_fib_node_destroy(fib_node);
+       mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
 static struct mlxsw_sp_fib_entry *
-mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
-                       const struct fib_entry_notifier_info *fen_info)
+mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
+                             const struct mlxsw_sp_fib_entry_params *params)
 {
-       struct mlxsw_sp_vr *vr;
+       struct mlxsw_sp_fib_entry *fib_entry;
 
-       vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id,
-                             MLXSW_SP_L3_PROTO_IPV4);
-       if (!vr)
-               return NULL;
+       list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
+               if (fib_entry->params.tb_id > params->tb_id)
+                       continue;
+               if (fib_entry->params.tb_id != params->tb_id)
+                       break;
+               if (fib_entry->params.tos > params->tos)
+                       continue;
+               if (fib_entry->params.prio >= params->prio ||
+                   fib_entry->params.tos < params->tos)
+                       return fib_entry;
+       }
 
-       return mlxsw_sp_fib_entry_lookup(vr->fib, &fen_info->dst,
-                                        sizeof(fen_info->dst),
-                                        fen_info->dst_len,
-                                        fen_info->fi->fib_dev);
+       return NULL;
 }
 
-static void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
-                                  struct mlxsw_sp_fib_entry *fib_entry)
+static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry,
+                                         struct mlxsw_sp_fib_entry *new_entry)
 {
-       struct mlxsw_sp_vr *vr = fib_entry->vr;
+       struct mlxsw_sp_fib_node *fib_node;
 
-       if (--fib_entry->ref_count == 0) {
-               mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
-               mlxsw_sp_fib_entry_destroy(fib_entry);
+       if (WARN_ON(!fib_entry))
+               return -EINVAL;
+
+       fib_node = fib_entry->fib_node;
+       list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) {
+               if (fib_entry->params.tb_id != new_entry->params.tb_id ||
+                   fib_entry->params.tos != new_entry->params.tos ||
+                   fib_entry->params.prio != new_entry->params.prio)
+                       break;
        }
-       mlxsw_sp_vr_put(mlxsw_sp, vr);
+
+       list_add_tail(&new_entry->list, &fib_entry->list);
+       return 0;
 }
 
-static void mlxsw_sp_fib_entry_put_all(struct mlxsw_sp *mlxsw_sp,
-                                      struct mlxsw_sp_fib_entry *fib_entry)
+static int
+mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node,
+                              struct mlxsw_sp_fib_entry *new_entry,
+                              bool replace, bool append)
 {
-       unsigned int last_ref_count;
+       struct mlxsw_sp_fib_entry *fib_entry;
 
-       do {
-               last_ref_count = fib_entry->ref_count;
-               mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
-       } while (last_ref_count != 1);
+       fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params);
+
+       if (append)
+               return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry);
+       if (replace && WARN_ON(!fib_entry))
+               return -EINVAL;
+
+       /* Insert new entry before replaced one, so that we can later
+        * remove the second.
+        */
+       if (fib_entry) {
+               list_add_tail(&new_entry->list, &fib_entry->list);
+       } else {
+               struct mlxsw_sp_fib_entry *last;
+
+               list_for_each_entry(last, &fib_node->entry_list, list) {
+                       if (new_entry->params.tb_id > last->params.tb_id)
+                               break;
+                       fib_entry = last;
+               }
+
+               if (fib_entry)
+                       list_add(&new_entry->list, &fib_entry->list);
+               else
+                       list_add(&new_entry->list, &fib_node->entry_list);
+       }
+
+       return 0;
+}
+
+static void
+mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry)
+{
+       list_del(&fib_entry->list);
+}
+
+static int
+mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp,
+                            const struct mlxsw_sp_fib_node *fib_node,
+                            struct mlxsw_sp_fib_entry *fib_entry)
+{
+       if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
+               return 0;
+
+       /* To prevent packet loss, overwrite the previously offloaded
+        * entry.
+        */
+       if (!list_is_singular(&fib_node->entry_list)) {
+               enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
+               struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
+
+               mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
+       }
+
+       return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+}
+
+static void
+mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp,
+                            const struct mlxsw_sp_fib_node *fib_node,
+                            struct mlxsw_sp_fib_entry *fib_entry)
+{
+       if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
+               return;
+
+       /* Promote the next entry by overwriting the deleted entry */
+       if (!list_is_singular(&fib_node->entry_list)) {
+               struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
+               enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
+
+               mlxsw_sp_fib_entry_update(mlxsw_sp, n);
+               mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
+               return;
+       }
+
+       mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+}
+
+static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
+                                        struct mlxsw_sp_fib_entry *fib_entry,
+                                        bool replace, bool append)
+{
+       struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
+       int err;
+
+       err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace,
+                                            append);
+       if (err)
+               return err;
+
+       err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry);
+       if (err)
+               goto err_fib4_node_entry_add;
+
+       mlxsw_sp_fib_node_prefix_inc(fib_node);
+
+       return 0;
+
+err_fib4_node_entry_add:
+       mlxsw_sp_fib4_node_list_remove(fib_entry);
+       return err;
+}
+
+static void
+mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
+                               struct mlxsw_sp_fib_entry *fib_entry)
+{
+       struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
+
+       mlxsw_sp_fib_node_prefix_dec(fib_node);
+       mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
+       mlxsw_sp_fib4_node_list_remove(fib_entry);
+}
+
+static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
+                                       struct mlxsw_sp_fib_entry *fib_entry,
+                                       bool replace)
+{
+       struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
+       struct mlxsw_sp_fib_entry *replaced;
+
+       if (!replace)
+               return;
+
+       /* We inserted the new entry before replaced one */
+       replaced = list_next_entry(fib_entry, list);
+
+       mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
+       mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
+       mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
 }
 
-static int mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
-                                   struct fib_entry_notifier_info *fen_info)
+static int
+mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
+                        const struct fib_entry_notifier_info *fen_info,
+                        bool replace, bool append)
 {
        struct mlxsw_sp_fib_entry *fib_entry;
-       struct mlxsw_sp_vr *vr;
+       struct mlxsw_sp_fib_node *fib_node;
        int err;
 
        if (mlxsw_sp->router.aborted)
                return 0;
 
-       fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fen_info);
-       if (IS_ERR(fib_entry)) {
-               dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB4 entry being added.\n");
-               return PTR_ERR(fib_entry);
+       fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
+       if (IS_ERR(fib_node)) {
+               dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
+               return PTR_ERR(fib_node);
        }
 
-       if (fib_entry->ref_count != 1)
-               return 0;
+       fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
+       if (IS_ERR(fib_entry)) {
+               dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
+               err = PTR_ERR(fib_entry);
+               goto err_fib4_entry_create;
+       }
 
-       vr = fib_entry->vr;
-       err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
+       err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace,
+                                           append);
        if (err) {
-               dev_warn(mlxsw_sp->bus_info->dev, "Failed to insert FIB4 entry being added.\n");
-               goto err_fib_entry_insert;
+               dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
+               goto err_fib4_node_entry_link;
        }
-       err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
-       if (err)
-               goto err_fib_entry_add;
+
+       mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace);
+
        return 0;
 
-err_fib_entry_add:
-       mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
-err_fib_entry_insert:
-       mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
+err_fib4_node_entry_link:
+       mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
+err_fib4_entry_create:
+       mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
        return err;
 }
 
@@ -1833,20 +2311,19 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
                                     struct fib_entry_notifier_info *fen_info)
 {
        struct mlxsw_sp_fib_entry *fib_entry;
+       struct mlxsw_sp_fib_node *fib_node;
 
        if (mlxsw_sp->router.aborted)
                return;
 
-       fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fen_info);
-       if (!fib_entry)
+       fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
+       if (WARN_ON(!fib_entry))
                return;
+       fib_node = fib_entry->fib_node;
 
-       if (fib_entry->ref_count == 1) {
-               mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
-               mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
-       }
-
-       mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
+       mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
+       mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
+       mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
 }
 
 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
@@ -1880,10 +2357,42 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
 }
 
+static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
+                                    struct mlxsw_sp_fib_node *fib_node)
+{
+       struct mlxsw_sp_fib_entry *fib_entry, *tmp;
+
+       list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) {
+               bool do_break = &tmp->list == &fib_node->entry_list;
+
+               mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
+               mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
+               mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
+               /* Break when entry list is empty and node was freed.
+                * Otherwise, we'll access freed memory in the next
+                * iteration.
+                */
+               if (do_break)
+                       break;
+       }
+}
+
+static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
+                                   struct mlxsw_sp_fib_node *fib_node)
+{
+       switch (fib_node->vr->proto) {
+       case MLXSW_SP_L3_PROTO_IPV4:
+               mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
+               break;
+       case MLXSW_SP_L3_PROTO_IPV6:
+               WARN_ON_ONCE(1);
+               break;
+       }
+}
+
 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
 {
-       struct mlxsw_sp_fib_entry *fib_entry;
-       struct mlxsw_sp_fib_entry *tmp;
+       struct mlxsw_sp_fib_node *fib_node, *tmp;
        struct mlxsw_sp_vr *vr;
        int i;
 
@@ -1893,14 +2402,11 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
                if (!vr->used)
                        continue;
 
-               list_for_each_entry_safe(fib_entry, tmp,
-                                        &vr->fib->entry_list, list) {
-                       bool do_break = &tmp->list == &vr->fib->entry_list;
+               list_for_each_entry_safe(fib_node, tmp, &vr->fib->node_list,
+                                        list) {
+                       bool do_break = &tmp->list == &vr->fib->node_list;
 
-                       mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
-                       mlxsw_sp_fib_entry_remove(fib_entry->vr->fib,
-                                                 fib_entry);
-                       mlxsw_sp_fib_entry_put_all(mlxsw_sp, fib_entry);
+                       mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
                        if (do_break)
                                break;
                }
@@ -1921,6 +2427,28 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
                dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
 }
 
+static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
+{
+       char ritr_pl[MLXSW_REG_RITR_LEN];
+       int err;
+
+       mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+       if (WARN_ON_ONCE(err))
+               return err;
+
+       mlxsw_reg_ritr_enable_set(ritr_pl, false);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
+                                  struct mlxsw_sp_rif *r)
+{
+       mlxsw_sp_router_rif_disable(mlxsw_sp, r->rif);
+       mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, r);
+       mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, r);
+}
+
 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
 {
        char rgcr_pl[MLXSW_REG_RGCR_LEN];
@@ -1964,8 +2492,11 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
 }
 
 struct mlxsw_sp_fib_event_work {
-       struct delayed_work dw;
-       struct fib_entry_notifier_info fen_info;
+       struct work_struct work;
+       union {
+               struct fib_entry_notifier_info fen_info;
+               struct fib_nh_notifier_info fnh_info;
+       };
        struct mlxsw_sp *mlxsw_sp;
        unsigned long event;
 };
@@ -1973,15 +2504,21 @@ struct mlxsw_sp_fib_event_work {
 static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
 {
        struct mlxsw_sp_fib_event_work *fib_work =
-               container_of(work, struct mlxsw_sp_fib_event_work, dw.work);
+               container_of(work, struct mlxsw_sp_fib_event_work, work);
        struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
+       bool replace, append;
        int err;
 
        /* Protect internal structures from changes */
        rtnl_lock();
        switch (fib_work->event) {
+       case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+       case FIB_EVENT_ENTRY_APPEND: /* fall through */
        case FIB_EVENT_ENTRY_ADD:
-               err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info);
+               replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
+               append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
+               err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
+                                              replace, append);
                if (err)
                        mlxsw_sp_router_fib4_abort(mlxsw_sp);
                fib_info_put(fib_work->fen_info.fi);
@@ -1994,6 +2531,12 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
        case FIB_EVENT_RULE_DEL:
                mlxsw_sp_router_fib4_abort(mlxsw_sp);
                break;
+       case FIB_EVENT_NH_ADD: /* fall through */
+       case FIB_EVENT_NH_DEL:
+               mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
+                                      fib_work->fnh_info.fib_nh);
+               fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
+               break;
        }
        rtnl_unlock();
        kfree(fib_work);
@@ -2014,11 +2557,13 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
        if (WARN_ON(!fib_work))
                return NOTIFY_BAD;
 
-       INIT_DELAYED_WORK(&fib_work->dw, mlxsw_sp_router_fib_event_work);
+       INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
        fib_work->mlxsw_sp = mlxsw_sp;
        fib_work->event = event;
 
        switch (event) {
+       case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+       case FIB_EVENT_ENTRY_APPEND: /* fall through */
        case FIB_EVENT_ENTRY_ADD: /* fall through */
        case FIB_EVENT_ENTRY_DEL:
                memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
@@ -2027,9 +2572,14 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
                 */
                fib_info_hold(fib_work->fen_info.fi);
                break;
+       case FIB_EVENT_NH_ADD: /* fall through */
+       case FIB_EVENT_NH_DEL:
+               memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
+               fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
+               break;
        }
 
-       mlxsw_core_schedule_odw(&fib_work->dw, 0);
+       mlxsw_core_schedule_work(&fib_work->work);
 
        return NOTIFY_DONE;
 }
@@ -2051,11 +2601,20 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
        int err;
 
        INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
-       INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
        err = __mlxsw_sp_router_init(mlxsw_sp);
        if (err)
                return err;
 
+       err = rhashtable_init(&mlxsw_sp->router.nexthop_ht,
+                             &mlxsw_sp_nexthop_ht_params);
+       if (err)
+               goto err_nexthop_ht_init;
+
+       err = rhashtable_init(&mlxsw_sp->router.nexthop_group_ht,
+                             &mlxsw_sp_nexthop_group_ht_params);
+       if (err)
+               goto err_nexthop_group_ht_init;
+
        mlxsw_sp_lpm_init(mlxsw_sp);
        err = mlxsw_sp_vrs_init(mlxsw_sp);
        if (err)
@@ -2078,6 +2637,10 @@ err_register_fib_notifier:
 err_neigh_init:
        mlxsw_sp_vrs_fini(mlxsw_sp);
 err_vrs_init:
+       rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
+err_nexthop_group_ht_init:
+       rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
+err_nexthop_ht_init:
        __mlxsw_sp_router_fini(mlxsw_sp);
        return err;
 }
@@ -2087,5 +2650,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
        unregister_fib_notifier(&mlxsw_sp->fib_nb);
        mlxsw_sp_neigh_fini(mlxsw_sp);
        mlxsw_sp_vrs_fini(mlxsw_sp);
+       rhashtable_destroy(&mlxsw_sp->router.nexthop_group_ht);
+       rhashtable_destroy(&mlxsw_sp->router.nexthop_ht);
        __mlxsw_sp_router_fini(mlxsw_sp);
 }
index b87ba7d36bc4af98b7464b14c536b9626abf15fe..598727d578c16e924ac5b25a98a7d622e02dc06a 100644 (file)
@@ -71,8 +71,21 @@ mlxsw_sp_port_orig_get(struct net_device *dev,
                       struct mlxsw_sp_port *mlxsw_sp_port)
 {
        struct mlxsw_sp_port *mlxsw_sp_vport;
+       struct mlxsw_sp_fid *fid;
        u16 vid;
 
+       if (netif_is_bridge_master(dev)) {
+               fid = mlxsw_sp_vfid_find(mlxsw_sp_port->mlxsw_sp,
+                                        dev);
+               if (fid) {
+                       mlxsw_sp_vport =
+                               mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
+                                                               fid->fid);
+                       WARN_ON(!mlxsw_sp_vport);
+                       return mlxsw_sp_vport;
+               }
+       }
+
        if (!is_vlan_dev(dev))
                return mlxsw_sp_port;
 
@@ -166,9 +179,10 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
        return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
 }
 
-static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
-                                    u16 idx_begin, u16 idx_end, bool uc_set,
-                                    bool bm_set)
+static int __mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                          u16 idx_begin, u16 idx_end,
+                                          enum mlxsw_sp_flood_table table,
+                                          bool set)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        u16 local_port = mlxsw_sp_port->local_port;
@@ -186,31 +200,48 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
        if (!sftr_pl)
                return -ENOMEM;
 
-       mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
-                           table_type, range, local_port, uc_set);
+       mlxsw_reg_sftr_pack(sftr_pl, table, idx_begin,
+                           table_type, range, local_port, set);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+       kfree(sftr_pl);
+       return err;
+}
+
+static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                    u16 idx_begin, u16 idx_end, bool uc_set,
+                                    bool bc_set, bool mc_set)
+{
+       int err;
+
+       err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
+                                             MLXSW_SP_FLOOD_TABLE_UC, uc_set);
        if (err)
-               goto buffer_out;
+               return err;
 
-       mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
-                           table_type, range, local_port, bm_set);
-       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+       err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
+                                             MLXSW_SP_FLOOD_TABLE_BC, bc_set);
        if (err)
                goto err_flood_bm_set;
 
-       goto buffer_out;
+       err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
+                                             MLXSW_SP_FLOOD_TABLE_MC, mc_set);
+       if (err)
+               goto err_flood_mc_set;
+       return 0;
 
+err_flood_mc_set:
+       __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
+                                       MLXSW_SP_FLOOD_TABLE_BC, !bc_set);
 err_flood_bm_set:
-       mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
-                           table_type, range, local_port, !uc_set);
-       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
-buffer_out:
-       kfree(sftr_pl);
+       __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, idx_begin, idx_end,
+                                       MLXSW_SP_FLOOD_TABLE_UC, !uc_set);
        return err;
 }
 
-static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
-                                     bool set)
+static int mlxsw_sp_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                        enum mlxsw_sp_flood_table table,
+                                        bool set)
 {
        struct net_device *dev = mlxsw_sp_port->dev;
        u16 vid, last_visited_vid;
@@ -220,13 +251,13 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
                u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
                u16 vfid = mlxsw_sp_fid_to_vfid(fid);
 
-               return  __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
-                                                 set, true);
+               return __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vfid,
+                                                      vfid, table, set);
        }
 
        for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) {
-               err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, set,
-                                               true);
+               err = __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid,
+                                                     table, set);
                if (err) {
                        last_visited_vid = vid;
                        goto err_port_flood_set;
@@ -237,21 +268,53 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
 err_port_flood_set:
        for_each_set_bit(vid, mlxsw_sp_port->active_vlans, last_visited_vid)
-               __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid, vid, !set, true);
+               __mlxsw_sp_port_flood_table_set(mlxsw_sp_port, vid, vid, table,
+                                               !set);
        netdev_err(dev, "Failed to configure unicast flooding\n");
        return err;
 }
 
+static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                        struct switchdev_trans *trans,
+                                        bool mc_disabled)
+{
+       int set;
+       int err = 0;
+
+       if (switchdev_trans_ph_prepare(trans))
+               return 0;
+
+       if (mlxsw_sp_port->mc_router != mlxsw_sp_port->mc_flood) {
+               set = mc_disabled ?
+                       mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
+               err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
+                                                   MLXSW_SP_FLOOD_TABLE_MC,
+                                                   set);
+       }
+
+       if (!err)
+               mlxsw_sp_port->mc_disabled = mc_disabled;
+
+       return err;
+}
+
 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
                             bool set)
 {
+       bool mc_set = set;
        u16 vfid;
 
        /* In case of vFIDs, index into the flooding table is relative to
         * the start of the vFIDs range.
         */
        vfid = mlxsw_sp_fid_to_vfid(fid);
-       return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
+
+       if (set)
+               mc_set = mlxsw_sp_vport->mc_disabled ?
+                        mlxsw_sp_vport->mc_flood : mlxsw_sp_vport->mc_router;
+
+       return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set,
+                                        mc_set);
 }
 
 static int mlxsw_sp_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -297,8 +360,9 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
                return 0;
 
        if ((uc_flood ^ brport_flags) & BR_FLOOD) {
-               err = mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
-                                                !mlxsw_sp_port->uc_flood);
+               err = mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
+                                                   MLXSW_SP_FLOOD_TABLE_UC,
+                                                   !mlxsw_sp_port->uc_flood);
                if (err)
                        return err;
        }
@@ -318,8 +382,9 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
 err_port_learning_set:
        if ((uc_flood ^ brport_flags) & BR_FLOOD)
-               mlxsw_sp_port_uc_flood_set(mlxsw_sp_port,
-                                          mlxsw_sp_port->uc_flood);
+               mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
+                                             MLXSW_SP_FLOOD_TABLE_UC,
+                                             mlxsw_sp_port->uc_flood);
        return err;
 }
 
@@ -371,6 +436,22 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
        return 0;
 }
 
+static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                           struct switchdev_trans *trans,
+                                           bool is_port_mc_router)
+{
+       if (switchdev_trans_ph_prepare(trans))
+               return 0;
+
+       mlxsw_sp_port->mc_router = is_port_mc_router;
+       if (!mlxsw_sp_port->mc_disabled)
+               return mlxsw_sp_port_flood_table_set(mlxsw_sp_port,
+                                                    MLXSW_SP_FLOOD_TABLE_MC,
+                                                    is_port_mc_router);
+
+       return 0;
+}
+
 static int mlxsw_sp_port_attr_set(struct net_device *dev,
                                  const struct switchdev_attr *attr,
                                  struct switchdev_trans *trans)
@@ -400,6 +481,14 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
                                                     attr->orig_dev,
                                                     attr->u.vlan_filtering);
                break;
+       case SWITCHDEV_ATTR_ID_PORT_MROUTER:
+               err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans,
+                                                      attr->u.mrouter);
+               break;
+       case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+               err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
+                                                   attr->u.mc_disabled);
+               break;
        default:
                err = -EOPNOTSUPP;
                break;
@@ -545,6 +634,7 @@ static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
 static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
                                  u16 fid_begin, u16 fid_end)
 {
+       bool mc_flood;
        int fid, err;
 
        for (fid = fid_begin; fid <= fid_end; fid++) {
@@ -553,8 +643,12 @@ static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
                        goto err_port_fid_join;
        }
 
+       mc_flood = mlxsw_sp_port->mc_disabled ?
+                       mlxsw_sp_port->mc_flood : mlxsw_sp_port->mc_router;
+
        err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
-                                       mlxsw_sp_port->uc_flood, true);
+                                       mlxsw_sp_port->uc_flood, true,
+                                       mc_flood);
        if (err)
                goto err_port_flood_set;
 
@@ -570,7 +664,7 @@ err_port_fid_map:
        for (fid--; fid >= fid_begin; fid--)
                mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
        __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
-                                 false);
+                                 false, false);
 err_port_flood_set:
        fid = fid_end;
 err_port_fid_join:
@@ -588,7 +682,7 @@ static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
                mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
 
        __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
-                                 false);
+                                 false, false);
 
        for (fid = fid_begin; fid <= fid_end; fid++)
                __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
index 2e88115e87359777279872efaad3581078aaa59e..ec1e886d4566fb098aefc6e4d82d6f69ea62173b 100644 (file)
@@ -382,7 +382,7 @@ static int mlxsw_sx_port_change_mtu(struct net_device *dev, int mtu)
        return 0;
 }
 
-static struct rtnl_link_stats64 *
+static void
 mlxsw_sx_port_get_stats64(struct net_device *dev,
                          struct rtnl_link_stats64 *stats)
 {
@@ -411,7 +411,6 @@ mlxsw_sx_port_get_stats64(struct net_device *dev,
                tx_dropped      += p->tx_dropped;
        }
        stats->tx_dropped       = tx_dropped;
-       return stats;
 }
 
 static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
@@ -734,7 +733,7 @@ static u32 mlxsw_sx_from_ptys_advert_link(u32 ptys_eth_proto)
 }
 
 static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
-                                           struct ethtool_cmd *cmd)
+                                           struct ethtool_link_ksettings *cmd)
 {
        u32 speed = SPEED_UNKNOWN;
        u8 duplex = DUPLEX_UNKNOWN;
@@ -751,8 +750,8 @@ static void mlxsw_sx_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
                }
        }
 out:
-       ethtool_cmd_speed_set(cmd, speed);
-       cmd->duplex = duplex;
+       cmd->base.speed = speed;
+       cmd->base.duplex = duplex;
 }
 
 static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
@@ -777,8 +776,9 @@ static u8 mlxsw_sx_port_connector_port(u32 ptys_eth_proto)
        return PORT_OTHER;
 }
 
-static int mlxsw_sx_port_get_settings(struct net_device *dev,
-                                     struct ethtool_cmd *cmd)
+static int
+mlxsw_sx_port_get_link_ksettings(struct net_device *dev,
+                                struct ethtool_link_ksettings *cmd)
 {
        struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
        struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
@@ -786,6 +786,7 @@ static int mlxsw_sx_port_get_settings(struct net_device *dev,
        u32 eth_proto_cap;
        u32 eth_proto_admin;
        u32 eth_proto_oper;
+       u32 supported, advertising, lp_advertising;
        int err;
 
        mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
@@ -797,18 +798,24 @@ static int mlxsw_sx_port_get_settings(struct net_device *dev,
        mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap,
                                  &eth_proto_admin, &eth_proto_oper);
 
-       cmd->supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
+       supported = mlxsw_sx_from_ptys_supported_port(eth_proto_cap) |
                         mlxsw_sx_from_ptys_supported_link(eth_proto_cap) |
                         SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-       cmd->advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
+       advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_admin);
        mlxsw_sx_from_ptys_speed_duplex(netif_carrier_ok(dev),
                                        eth_proto_oper, cmd);
 
        eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
-       cmd->port = mlxsw_sx_port_connector_port(eth_proto_oper);
-       cmd->lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
+       cmd->base.port = mlxsw_sx_port_connector_port(eth_proto_oper);
+       lp_advertising = mlxsw_sx_from_ptys_advert_link(eth_proto_oper);
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+                                               lp_advertising);
 
-       cmd->transceiver = XCVR_INTERNAL;
        return 0;
 }
 
@@ -848,8 +855,9 @@ static u32 mlxsw_sx_to_ptys_upper_speed(u32 upper_speed)
        return ptys_proto;
 }
 
-static int mlxsw_sx_port_set_settings(struct net_device *dev,
-                                     struct ethtool_cmd *cmd)
+static int
+mlxsw_sx_port_set_link_ksettings(struct net_device *dev,
+                                const struct ethtool_link_ksettings *cmd)
 {
        struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
        struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
@@ -858,13 +866,17 @@ static int mlxsw_sx_port_set_settings(struct net_device *dev,
        u32 eth_proto_new;
        u32 eth_proto_cap;
        u32 eth_proto_admin;
+       u32 advertising;
        bool is_up;
        int err;
 
-       speed = ethtool_cmd_speed(cmd);
+       speed = cmd->base.speed;
+
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
 
-       eth_proto_new = cmd->autoneg == AUTONEG_ENABLE ?
-               mlxsw_sx_to_ptys_advert_link(cmd->advertising) :
+       eth_proto_new = cmd->base.autoneg == AUTONEG_ENABLE ?
+               mlxsw_sx_to_ptys_advert_link(advertising) :
                mlxsw_sx_to_ptys_speed(speed);
 
        mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sx_port->local_port, 0);
@@ -921,8 +933,8 @@ static const struct ethtool_ops mlxsw_sx_port_ethtool_ops = {
        .get_strings            = mlxsw_sx_port_get_strings,
        .get_ethtool_stats      = mlxsw_sx_port_get_stats,
        .get_sset_count         = mlxsw_sx_port_get_sset_count,
-       .get_settings           = mlxsw_sx_port_get_settings,
-       .set_settings           = mlxsw_sx_port_set_settings,
+       .get_link_ksettings     = mlxsw_sx_port_get_link_ksettings,
+       .set_link_ksettings     = mlxsw_sx_port_set_link_ksettings,
 };
 
 static int mlxsw_sx_port_attr_get(struct net_device *dev,
index 7ab275deacacbc51165eed9d5ec3f54281533566..02ea48b15eb57f7533a838806107732b7ea97c26 100644 (file)
@@ -54,6 +54,7 @@ enum {
        MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
        MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
        MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+       MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
        MLXSW_TRAP_ID_ARPBC = 0x50,
        MLXSW_TRAP_ID_ARPUC = 0x51,
        MLXSW_TRAP_ID_MTUERROR = 0x52,
index 20cb85bc0c5f8660a259dfb8dfd7213df71373a1..bd51e057e915063e401c957c6f606b28fee2084a 100644 (file)
@@ -519,7 +519,7 @@ static int ks8695_rx(struct ks8695_priv *ksp, int budget)
                        /* Relinquish the SKB to the network layer */
                        skb_put(skb, pktlen);
                        skb->protocol = eth_type_trans(skb, ndev);
-                       netif_receive_skb(skb);
+                       napi_gro_receive(&ksp->napi, skb);
 
                        /* Record stats */
                        ndev->stats.rx_packets++;
@@ -561,18 +561,17 @@ rx_finished:
 static int ks8695_poll(struct napi_struct *napi, int budget)
 {
        struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
-       unsigned long  work_done;
-
        unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
        unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
+       int work_done;
 
        work_done = ks8695_rx(ksp, budget);
 
-       if (work_done < budget) {
+       if (work_done < budget && napi_complete_done(napi, work_done)) {
                unsigned long flags;
+
                spin_lock_irqsave(&ksp->rx_lock, flags);
-               __napi_complete(napi);
-               /*enable rx interrupt*/
+               /* enable rx interrupt */
                writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
                spin_unlock_irqrestore(&ksp->rx_lock, flags);
        }
@@ -855,85 +854,94 @@ ks8695_set_msglevel(struct net_device *ndev, u32 value)
 }
 
 /**
- *     ks8695_wan_get_settings - Get device-specific settings.
+ *     ks8695_wan_get_link_ksettings - Get device-specific settings.
  *     @ndev: The network device to read settings from
  *     @cmd: The ethtool structure to read into
  */
 static int
-ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_get_link_ksettings(struct net_device *ndev,
+                             struct ethtool_link_ksettings *cmd)
 {
        struct ks8695_priv *ksp = netdev_priv(ndev);
        u32 ctrl;
+       u32 supported, advertising;
 
        /* All ports on the KS8695 support these... */
-       cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+       supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
                          SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
                          SUPPORTED_TP | SUPPORTED_MII);
-       cmd->transceiver = XCVR_INTERNAL;
 
-       cmd->advertising = ADVERTISED_TP | ADVERTISED_MII;
-       cmd->port = PORT_MII;
-       cmd->supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
-       cmd->phy_address = 0;
+       advertising = ADVERTISED_TP | ADVERTISED_MII;
+       cmd->base.port = PORT_MII;
+       supported |= (SUPPORTED_Autoneg | SUPPORTED_Pause);
+       cmd->base.phy_address = 0;
 
        ctrl = readl(ksp->phyiface_regs + KS8695_WMC);
        if ((ctrl & WMC_WAND) == 0) {
                /* auto-negotiation is enabled */
-               cmd->advertising |= ADVERTISED_Autoneg;
+               advertising |= ADVERTISED_Autoneg;
                if (ctrl & WMC_WANA100F)
-                       cmd->advertising |= ADVERTISED_100baseT_Full;
+                       advertising |= ADVERTISED_100baseT_Full;
                if (ctrl & WMC_WANA100H)
-                       cmd->advertising |= ADVERTISED_100baseT_Half;
+                       advertising |= ADVERTISED_100baseT_Half;
                if (ctrl & WMC_WANA10F)
-                       cmd->advertising |= ADVERTISED_10baseT_Full;
+                       advertising |= ADVERTISED_10baseT_Full;
                if (ctrl & WMC_WANA10H)
-                       cmd->advertising |= ADVERTISED_10baseT_Half;
+                       advertising |= ADVERTISED_10baseT_Half;
                if (ctrl & WMC_WANAP)
-                       cmd->advertising |= ADVERTISED_Pause;
-               cmd->autoneg = AUTONEG_ENABLE;
+                       advertising |= ADVERTISED_Pause;
+               cmd->base.autoneg = AUTONEG_ENABLE;
 
-               ethtool_cmd_speed_set(cmd,
-                                     (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10);
-               cmd->duplex = (ctrl & WMC_WDS) ?
+               cmd->base.speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
+               cmd->base.duplex = (ctrl & WMC_WDS) ?
                        DUPLEX_FULL : DUPLEX_HALF;
        } else {
                /* auto-negotiation is disabled */
-               cmd->autoneg = AUTONEG_DISABLE;
+               cmd->base.autoneg = AUTONEG_DISABLE;
 
-               ethtool_cmd_speed_set(cmd, ((ctrl & WMC_WANF100) ?
-                                           SPEED_100 : SPEED_10));
-               cmd->duplex = (ctrl & WMC_WANFF) ?
+               cmd->base.speed = (ctrl & WMC_WANF100) ?
+                                           SPEED_100 : SPEED_10;
+               cmd->base.duplex = (ctrl & WMC_WANFF) ?
                        DUPLEX_FULL : DUPLEX_HALF;
        }
 
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
+
        return 0;
 }
 
 /**
- *     ks8695_wan_set_settings - Set device-specific settings.
+ *     ks8695_wan_set_link_ksettings - Set device-specific settings.
  *     @ndev: The network device to configure
  *     @cmd: The settings to configure
  */
 static int
-ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
+ks8695_wan_set_link_ksettings(struct net_device *ndev,
+                             const struct ethtool_link_ksettings *cmd)
 {
        struct ks8695_priv *ksp = netdev_priv(ndev);
        u32 ctrl;
+       u32 advertising;
 
-       if ((cmd->speed != SPEED_10) && (cmd->speed != SPEED_100))
-               return -EINVAL;
-       if ((cmd->duplex != DUPLEX_HALF) && (cmd->duplex != DUPLEX_FULL))
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
+
+       if ((cmd->base.speed != SPEED_10) && (cmd->base.speed != SPEED_100))
                return -EINVAL;
-       if (cmd->port != PORT_MII)
+       if ((cmd->base.duplex != DUPLEX_HALF) &&
+           (cmd->base.duplex != DUPLEX_FULL))
                return -EINVAL;
-       if (cmd->transceiver != XCVR_INTERNAL)
+       if (cmd->base.port != PORT_MII)
                return -EINVAL;
-       if ((cmd->autoneg != AUTONEG_DISABLE) &&
-           (cmd->autoneg != AUTONEG_ENABLE))
+       if ((cmd->base.autoneg != AUTONEG_DISABLE) &&
+           (cmd->base.autoneg != AUTONEG_ENABLE))
                return -EINVAL;
 
-       if (cmd->autoneg == AUTONEG_ENABLE) {
-               if ((cmd->advertising & (ADVERTISED_10baseT_Half |
+       if (cmd->base.autoneg == AUTONEG_ENABLE) {
+               if ((advertising & (ADVERTISED_10baseT_Half |
                                ADVERTISED_10baseT_Full |
                                ADVERTISED_100baseT_Half |
                                ADVERTISED_100baseT_Full)) == 0)
@@ -943,13 +951,13 @@ ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
 
                ctrl &= ~(WMC_WAND | WMC_WANA100F | WMC_WANA100H |
                          WMC_WANA10F | WMC_WANA10H);
-               if (cmd->advertising & ADVERTISED_100baseT_Full)
+               if (advertising & ADVERTISED_100baseT_Full)
                        ctrl |= WMC_WANA100F;
-               if (cmd->advertising & ADVERTISED_100baseT_Half)
+               if (advertising & ADVERTISED_100baseT_Half)
                        ctrl |= WMC_WANA100H;
-               if (cmd->advertising & ADVERTISED_10baseT_Full)
+               if (advertising & ADVERTISED_10baseT_Full)
                        ctrl |= WMC_WANA10F;
-               if (cmd->advertising & ADVERTISED_10baseT_Half)
+               if (advertising & ADVERTISED_10baseT_Half)
                        ctrl |= WMC_WANA10H;
 
                /* force a re-negotiation */
@@ -962,9 +970,9 @@ ks8695_wan_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
                ctrl |= WMC_WAND;
                ctrl &= ~(WMC_WANF100 | WMC_WANFF);
 
-               if (cmd->speed == SPEED_100)
+               if (cmd->base.speed == SPEED_100)
                        ctrl |= WMC_WANF100;
-               if (cmd->duplex == DUPLEX_FULL)
+               if (cmd->base.duplex == DUPLEX_FULL)
                        ctrl |= WMC_WANFF;
 
                writel(ctrl, ksp->phyiface_regs + KS8695_WMC);
@@ -1043,12 +1051,12 @@ static const struct ethtool_ops ks8695_ethtool_ops = {
 static const struct ethtool_ops ks8695_wan_ethtool_ops = {
        .get_msglevel   = ks8695_get_msglevel,
        .set_msglevel   = ks8695_set_msglevel,
-       .get_settings   = ks8695_wan_get_settings,
-       .set_settings   = ks8695_wan_set_settings,
        .nway_reset     = ks8695_wan_nwayreset,
        .get_link       = ethtool_op_get_link,
        .get_pauseparam = ks8695_wan_get_pause,
        .get_drvinfo    = ks8695_get_drvinfo,
+       .get_link_ksettings = ks8695_wan_get_link_ksettings,
+       .set_link_ksettings = ks8695_wan_set_link_ksettings,
 };
 
 /* Network device interface functions */
index e7e1aff40bd9f2fe676b03ab524de5eaa2a815de..279ee4612981b0af8d482674c6f1f5525988a6f7 100644 (file)
@@ -84,7 +84,6 @@ union ks8851_tx_hdr {
  * @rc_ier: Cached copy of KS_IER.
  * @rc_ccr: Cached copy of KS_CCR.
  * @rc_rxqcr: Cached copy of KS_RXQCR.
- * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
  * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
  * @vdd_reg:   Optional regulator supplying the chip
  * @vdd_io: Optional digital power supply for IO
@@ -120,7 +119,6 @@ struct ks8851_net {
        u16                     rc_ier;
        u16                     rc_rxqcr;
        u16                     rc_ccr;
-       u16                     eeprom_size;
 
        struct mii_if_info      mii;
        struct ks8851_rxctrl    rxctrl;
@@ -1088,16 +1086,18 @@ static void ks8851_set_msglevel(struct net_device *dev, u32 to)
        ks->msg_enable = to;
 }
 
-static int ks8851_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ks8851_get_link_ksettings(struct net_device *dev,
+                                    struct ethtool_link_ksettings *cmd)
 {
        struct ks8851_net *ks = netdev_priv(dev);
-       return mii_ethtool_gset(&ks->mii, cmd);
+       return mii_ethtool_get_link_ksettings(&ks->mii, cmd);
 }
 
-static int ks8851_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int ks8851_set_link_ksettings(struct net_device *dev,
+                                    const struct ethtool_link_ksettings *cmd)
 {
        struct ks8851_net *ks = netdev_priv(dev);
-       return mii_ethtool_sset(&ks->mii, cmd);
+       return mii_ethtool_set_link_ksettings(&ks->mii, cmd);
 }
 
 static u32 ks8851_get_link(struct net_device *dev)
@@ -1253,13 +1253,13 @@ static const struct ethtool_ops ks8851_ethtool_ops = {
        .get_drvinfo    = ks8851_get_drvinfo,
        .get_msglevel   = ks8851_get_msglevel,
        .set_msglevel   = ks8851_set_msglevel,
-       .get_settings   = ks8851_get_settings,
-       .set_settings   = ks8851_set_settings,
        .get_link       = ks8851_get_link,
        .nway_reset     = ks8851_nway_reset,
        .get_eeprom_len = ks8851_get_eeprom_len,
        .get_eeprom     = ks8851_get_eeprom,
        .set_eeprom     = ks8851_set_eeprom,
+       .get_link_ksettings = ks8851_get_link_ksettings,
+       .set_link_ksettings = ks8851_set_link_ksettings,
 };
 
 /* MII interface controls */
@@ -1533,11 +1533,6 @@ static int ks8851_probe(struct spi_device *spi)
        /* cache the contents of the CCR register for EEPROM, etc. */
        ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR);
 
-       if (ks->rc_ccr & CCR_EEPROM)
-               ks->eeprom_size = 128;
-       else
-               ks->eeprom_size = 0;
-
        ks8851_read_selftest(ks);
        ks8851_init_mac(ks);
 
index db628078a4e6f549b0d6a7a5baa97edcab9ba2b0..7647f7bdbcb89527fd709405a339ea6ef0b129fc 100644 (file)
@@ -1311,16 +1311,18 @@ static void ks_set_msglevel(struct net_device *netdev, u32 to)
        ks->msg_enable = to;
 }
 
-static int ks_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+static int ks_get_link_ksettings(struct net_device *netdev,
+                                struct ethtool_link_ksettings *cmd)
 {
        struct ks_net *ks = netdev_priv(netdev);
-       return mii_ethtool_gset(&ks->mii, cmd);
+       return mii_ethtool_get_link_ksettings(&ks->mii, cmd);
 }
 
-static int ks_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+static int ks_set_link_ksettings(struct net_device *netdev,
+                                const struct ethtool_link_ksettings *cmd)
 {
        struct ks_net *ks = netdev_priv(netdev);
-       return mii_ethtool_sset(&ks->mii, cmd);
+       return mii_ethtool_set_link_ksettings(&ks->mii, cmd);
 }
 
 static u32 ks_get_link(struct net_device *netdev)
@@ -1339,10 +1341,10 @@ static const struct ethtool_ops ks_ethtool_ops = {
        .get_drvinfo    = ks_get_drvinfo,
        .get_msglevel   = ks_get_msglevel,
        .set_msglevel   = ks_set_msglevel,
-       .get_settings   = ks_get_settings,
-       .set_settings   = ks_set_settings,
        .get_link       = ks_get_link,
        .nway_reset     = ks_nway_reset,
+       .get_link_ksettings = ks_get_link_ksettings,
+       .set_link_ksettings = ks_set_link_ksettings,
 };
 
 /* MII interface controls */
index 97f6ef1fa7d06a4b273be0e9ffc91c335dadc06a..ee38c18c2d2dcc3c3e468573194de7b87a4d52ac 100644 (file)
@@ -5944,7 +5944,7 @@ static u16 eeprom_data[EEPROM_SIZE] = { 0 };
 /* These functions use the MII functions in mii.c. */
 
 /**
- * netdev_get_settings - get network device settings
+ * netdev_get_link_ksettings - get network device settings
  * @dev:       Network device.
  * @cmd:       Ethtool command.
  *
@@ -5952,23 +5952,26 @@ static u16 eeprom_data[EEPROM_SIZE] = { 0 };
  *
  * Return 0 if successful; otherwise an error code.
  */
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+                                    struct ethtool_link_ksettings *cmd)
 {
        struct dev_priv *priv = netdev_priv(dev);
        struct dev_info *hw_priv = priv->adapter;
 
        mutex_lock(&hw_priv->lock);
-       mii_ethtool_gset(&priv->mii_if, cmd);
-       cmd->advertising |= SUPPORTED_TP;
+       mii_ethtool_get_link_ksettings(&priv->mii_if, cmd);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
        mutex_unlock(&hw_priv->lock);
 
        /* Save advertised settings for workaround in next function. */
-       priv->advertising = cmd->advertising;
+       ethtool_convert_link_mode_to_legacy_u32(&priv->advertising,
+                                               cmd->link_modes.advertising);
+
        return 0;
 }
 
 /**
- * netdev_set_settings - set network device settings
+ * netdev_set_link_ksettings - set network device settings
  * @dev:       Network device.
  * @cmd:       Ethtool command.
  *
@@ -5976,54 +5979,65 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  *
  * Return 0 if successful; otherwise an error code.
  */
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+                                    const struct ethtool_link_ksettings *cmd)
 {
        struct dev_priv *priv = netdev_priv(dev);
        struct dev_info *hw_priv = priv->adapter;
        struct ksz_port *port = &priv->port;
-       u32 speed = ethtool_cmd_speed(cmd);
+       struct ethtool_link_ksettings copy_cmd;
+       u32 speed = cmd->base.speed;
+       u32 advertising;
        int rc;
 
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
+
        /*
         * ethtool utility does not change advertised setting if auto
         * negotiation is not specified explicitly.
         */
-       if (cmd->autoneg && priv->advertising == cmd->advertising) {
-               cmd->advertising |= ADVERTISED_ALL;
+       if (cmd->base.autoneg && priv->advertising == advertising) {
+               advertising |= ADVERTISED_ALL;
                if (10 == speed)
-                       cmd->advertising &=
+                       advertising &=
                                ~(ADVERTISED_100baseT_Full |
                                ADVERTISED_100baseT_Half);
                else if (100 == speed)
-                       cmd->advertising &=
+                       advertising &=
                                ~(ADVERTISED_10baseT_Full |
                                ADVERTISED_10baseT_Half);
-               if (0 == cmd->duplex)
-                       cmd->advertising &=
+               if (0 == cmd->base.duplex)
+                       advertising &=
                                ~(ADVERTISED_100baseT_Full |
                                ADVERTISED_10baseT_Full);
-               else if (1 == cmd->duplex)
-                       cmd->advertising &=
+               else if (1 == cmd->base.duplex)
+                       advertising &=
                                ~(ADVERTISED_100baseT_Half |
                                ADVERTISED_10baseT_Half);
        }
        mutex_lock(&hw_priv->lock);
-       if (cmd->autoneg &&
-                       (cmd->advertising & ADVERTISED_ALL) ==
-                       ADVERTISED_ALL) {
+       if (cmd->base.autoneg &&
+           (advertising & ADVERTISED_ALL) == ADVERTISED_ALL) {
                port->duplex = 0;
                port->speed = 0;
                port->force_link = 0;
        } else {
-               port->duplex = cmd->duplex + 1;
+               port->duplex = cmd->base.duplex + 1;
                if (1000 != speed)
                        port->speed = speed;
-               if (cmd->autoneg)
+               if (cmd->base.autoneg)
                        port->force_link = 0;
                else
                        port->force_link = 1;
        }
-       rc = mii_ethtool_sset(&priv->mii_if, cmd);
+
+       memcpy(&copy_cmd, cmd, sizeof(copy_cmd));
+       ethtool_convert_legacy_u32_to_link_mode(copy_cmd.link_modes.advertising,
+                                               advertising);
+       rc = mii_ethtool_set_link_ksettings(
+               &priv->mii_if,
+               (const struct ethtool_link_ksettings *)&copy_cmd);
        mutex_unlock(&hw_priv->lock);
        return rc;
 }
@@ -6597,8 +6611,6 @@ static int netdev_set_features(struct net_device *dev,
 }
 
 static const struct ethtool_ops netdev_ethtool_ops = {
-       .get_settings           = netdev_get_settings,
-       .set_settings           = netdev_set_settings,
        .nway_reset             = netdev_nway_reset,
        .get_link               = netdev_get_link,
        .get_drvinfo            = netdev_get_drvinfo,
@@ -6617,6 +6629,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
        .get_strings            = netdev_get_strings,
        .get_sset_count         = netdev_get_sset_count,
        .get_ethtool_stats      = netdev_get_ethtool_stats,
+       .get_link_ksettings     = netdev_get_link_ksettings,
+       .set_link_ksettings     = netdev_set_link_ksettings,
 };
 
 /*
index 045b9106c0ff50ba46acde30c22f46dab36e4fd8..f6ecfa778660f81293420e2301ebbd3f320b6891 100644 (file)
@@ -1487,27 +1487,30 @@ enc28j60_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 }
 
 static int
-enc28j60_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+enc28j60_get_link_ksettings(struct net_device *dev,
+                           struct ethtool_link_ksettings *cmd)
 {
        struct enc28j60_net *priv = netdev_priv(dev);
 
-       cmd->transceiver = XCVR_INTERNAL;
-       cmd->supported  = SUPPORTED_10baseT_Half
-                       | SUPPORTED_10baseT_Full
-                       | SUPPORTED_TP;
-       ethtool_cmd_speed_set(cmd,  SPEED_10);
-       cmd->duplex     = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
-       cmd->port       = PORT_TP;
-       cmd->autoneg    = AUTONEG_DISABLE;
+       ethtool_link_ksettings_zero_link_mode(cmd, supported);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Half);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, 10baseT_Full);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+
+       cmd->base.speed = SPEED_10;
+       cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+       cmd->base.port  = PORT_TP;
+       cmd->base.autoneg = AUTONEG_DISABLE;
 
        return 0;
 }
 
 static int
-enc28j60_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+enc28j60_set_link_ksettings(struct net_device *dev,
+                           const struct ethtool_link_ksettings *cmd)
 {
-       return enc28j60_setlink(dev, cmd->autoneg,
-                               ethtool_cmd_speed(cmd), cmd->duplex);
+       return enc28j60_setlink(dev, cmd->base.autoneg,
+                               cmd->base.speed, cmd->base.duplex);
 }
 
 static u32 enc28j60_get_msglevel(struct net_device *dev)
@@ -1523,11 +1526,11 @@ static void enc28j60_set_msglevel(struct net_device *dev, u32 val)
 }
 
 static const struct ethtool_ops enc28j60_ethtool_ops = {
-       .get_settings   = enc28j60_get_settings,
-       .set_settings   = enc28j60_set_settings,
        .get_drvinfo    = enc28j60_get_drvinfo,
        .get_msglevel   = enc28j60_get_msglevel,
        .set_msglevel   = enc28j60_set_msglevel,
+       .get_link_ksettings = enc28j60_get_link_ksettings,
+       .set_link_ksettings = enc28j60_set_link_ksettings,
 };
 
 static int enc28j60_chipset_init(struct net_device *dev)
index fbce6166504e480949f46982390668dff9af4659..f831238d9793abe8b90b36b1e521cb3943230e2c 100644 (file)
@@ -940,29 +940,33 @@ static void encx24j600_get_drvinfo(struct net_device *dev,
                sizeof(info->bus_info));
 }
 
-static int encx24j600_get_settings(struct net_device *dev,
-                                  struct ethtool_cmd *cmd)
+static int encx24j600_get_link_ksettings(struct net_device *dev,
+                                        struct ethtool_link_ksettings *cmd)
 {
        struct encx24j600_priv *priv = netdev_priv(dev);
+       u32 supported;
 
-       cmd->transceiver = XCVR_INTERNAL;
-       cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
+       supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
                         SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
                         SUPPORTED_Autoneg | SUPPORTED_TP;
 
-       ethtool_cmd_speed_set(cmd, priv->speed);
-       cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
-       cmd->port = PORT_TP;
-       cmd->autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+
+       cmd->base.speed = priv->speed;
+       cmd->base.duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
+       cmd->base.port = PORT_TP;
+       cmd->base.autoneg = priv->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
 
        return 0;
 }
 
-static int encx24j600_set_settings(struct net_device *dev,
-                                  struct ethtool_cmd *cmd)
+static int
+encx24j600_set_link_ksettings(struct net_device *dev,
+                             const struct ethtool_link_ksettings *cmd)
 {
-       return encx24j600_setlink(dev, cmd->autoneg,
-                                 ethtool_cmd_speed(cmd), cmd->duplex);
+       return encx24j600_setlink(dev, cmd->base.autoneg,
+                                 cmd->base.speed, cmd->base.duplex);
 }
 
 static u32 encx24j600_get_msglevel(struct net_device *dev)
@@ -980,13 +984,13 @@ static void encx24j600_set_msglevel(struct net_device *dev, u32 val)
 }
 
 static const struct ethtool_ops encx24j600_ethtool_ops = {
-       .get_settings = encx24j600_get_settings,
-       .set_settings = encx24j600_set_settings,
        .get_drvinfo = encx24j600_get_drvinfo,
        .get_msglevel = encx24j600_get_msglevel,
        .set_msglevel = encx24j600_set_msglevel,
        .get_regs_len = encx24j600_get_regs_len,
        .get_regs = encx24j600_get_regs,
+       .get_link_ksettings = encx24j600_get_link_ksettings,
+       .set_link_ksettings = encx24j600_set_link_ksettings,
 };
 
 static const struct net_device_ops encx24j600_netdev_ops = {
index 9774b50cff6e6b3e9a71b06198bbddbe5aa503e5..06c9f4100cb9bd8c0abecada5fa922c7e779fc51 100644 (file)
@@ -269,7 +269,7 @@ rx_next:
        }
 
        if (rx < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rx);
        }
 
        priv->reg_imr |= RPKT_FINISH_M;
@@ -436,7 +436,7 @@ static void moxart_mac_set_rx_mode(struct net_device *ndev)
        spin_unlock_irq(&priv->txlock);
 }
 
-static struct net_device_ops moxart_netdev_ops = {
+static const struct net_device_ops moxart_netdev_ops = {
        .ndo_open               = moxart_mac_open,
        .ndo_stop               = moxart_mac_stop,
        .ndo_start_xmit         = moxart_mac_start_xmit,
index e506ca876d0d319aaf70abdd72792bb2a0224964..b171ed2015fe479b6d7d099f14e188a3dd8cda00 100644 (file)
@@ -191,21 +191,6 @@ struct myri10ge_slice_state {
        int cpu;
        __be32 __iomem *dca_tag;
 #endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       unsigned int state;
-#define SLICE_STATE_IDLE       0
-#define SLICE_STATE_NAPI       1       /* NAPI owns this slice */
-#define SLICE_STATE_POLL       2       /* poll owns this slice */
-#define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL)
-#define SLICE_STATE_NAPI_YIELD 4       /* NAPI yielded this slice */
-#define SLICE_STATE_POLL_YIELD 8       /* poll yielded this slice */
-#define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD)
-       spinlock_t lock;
-       unsigned long lock_napi_yield;
-       unsigned long lock_poll_yield;
-       unsigned long busy_poll_miss;
-       unsigned long busy_poll_cnt;
-#endif /* CONFIG_NET_RX_BUSY_POLL */
        char irq_desc[32];
 };
 
@@ -378,8 +363,8 @@ static inline void put_be32(__be32 val, __be32 __iomem * p)
        __raw_writel((__force __u32) val, (__force void __iomem *)p);
 }
 
-static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
-                                                   struct rtnl_link_stats64 *stats);
+static void myri10ge_get_stats(struct net_device *dev,
+                              struct rtnl_link_stats64 *stats);
 
 static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
 {
@@ -925,92 +910,6 @@ abort:
        return status;
 }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
-{
-       spin_lock_init(&ss->lock);
-       ss->state = SLICE_STATE_IDLE;
-}
-
-static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
-{
-       bool rc = true;
-       spin_lock(&ss->lock);
-       if ((ss->state & SLICE_LOCKED)) {
-               WARN_ON((ss->state & SLICE_STATE_NAPI));
-               ss->state |= SLICE_STATE_NAPI_YIELD;
-               rc = false;
-               ss->lock_napi_yield++;
-       } else
-               ss->state = SLICE_STATE_NAPI;
-       spin_unlock(&ss->lock);
-       return rc;
-}
-
-static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
-{
-       spin_lock(&ss->lock);
-       WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD)));
-       ss->state = SLICE_STATE_IDLE;
-       spin_unlock(&ss->lock);
-}
-
-static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
-{
-       bool rc = true;
-       spin_lock_bh(&ss->lock);
-       if ((ss->state & SLICE_LOCKED)) {
-               ss->state |= SLICE_STATE_POLL_YIELD;
-               rc = false;
-               ss->lock_poll_yield++;
-       } else
-               ss->state |= SLICE_STATE_POLL;
-       spin_unlock_bh(&ss->lock);
-       return rc;
-}
-
-static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
-{
-       spin_lock_bh(&ss->lock);
-       WARN_ON((ss->state & SLICE_STATE_NAPI));
-       ss->state = SLICE_STATE_IDLE;
-       spin_unlock_bh(&ss->lock);
-}
-
-static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
-{
-       WARN_ON(!(ss->state & SLICE_LOCKED));
-       return (ss->state & SLICE_USER_PEND);
-}
-#else /* CONFIG_NET_RX_BUSY_POLL */
-static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
-{
-}
-
-static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
-{
-       return false;
-}
-
-static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
-{
-}
-
-static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
-{
-       return false;
-}
-
-static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
-{
-}
-
-static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
-{
-       return false;
-}
-#endif
-
 static int myri10ge_reset(struct myri10ge_priv *mgp)
 {
        struct myri10ge_cmd cmd;
@@ -1426,7 +1325,6 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
        struct pci_dev *pdev = mgp->pdev;
        struct net_device *dev = mgp->dev;
        u8 *va;
-       bool polling;
 
        if (len <= mgp->small_bytes) {
                rx = &ss->rx_small;
@@ -1441,15 +1339,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
        va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
        prefetch(va);
 
-       /* When busy polling in user context, allocate skb and copy headers to
-        * skb's linear memory ourselves.  When not busy polling, use the napi
-        * gro api.
-        */
-       polling = myri10ge_ss_busy_polling(ss);
-       if (polling)
-               skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
-       else
-               skb = napi_get_frags(&ss->napi);
+       skb = napi_get_frags(&ss->napi);
        if (unlikely(skb == NULL)) {
                ss->stats.rx_dropped++;
                for (i = 0, remainder = len; remainder > 0; i++) {
@@ -1489,27 +1379,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
        myri10ge_vlan_rx(mgp->dev, va, skb);
        skb_record_rx_queue(skb, ss - &mgp->ss[0]);
 
-       if (polling) {
-               int hlen;
-
-               /* myri10ge_vlan_rx might have moved the header, so compute
-                * length and address again.
-                */
-               hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN;
-               va = page_address(skb_frag_page(&rx_frags[0])) +
-                       rx_frags[0].page_offset;
-               /* Copy header into the skb linear memory */
-               skb_copy_to_linear_data(skb, va, hlen);
-               rx_frags[0].page_offset += hlen;
-               rx_frags[0].size -= hlen;
-               skb->data_len -= hlen;
-               skb->tail += hlen;
-               skb->protocol = eth_type_trans(skb, dev);
-               skb_mark_napi_id(skb, &ss->napi);
-               netif_receive_skb(skb);
-       }
-       else
-               napi_gro_frags(&ss->napi);
+       napi_gro_frags(&ss->napi);
 
        return 1;
 }
@@ -1669,49 +1539,16 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
        if (ss->mgp->dca_enabled)
                myri10ge_update_dca(ss);
 #endif
-       /* Try later if the busy_poll handler is running. */
-       if (!myri10ge_ss_lock_napi(ss))
-               return budget;
-
        /* process as many rx events as NAPI will allow */
        work_done = myri10ge_clean_rx_done(ss, budget);
 
-       myri10ge_ss_unlock_napi(ss);
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                put_be32(htonl(3), ss->irq_claim);
        }
        return work_done;
 }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static int myri10ge_busy_poll(struct napi_struct *napi)
-{
-       struct myri10ge_slice_state *ss =
-           container_of(napi, struct myri10ge_slice_state, napi);
-       struct myri10ge_priv *mgp = ss->mgp;
-       int work_done;
-
-       /* Poll only when the link is up */
-       if (mgp->link_state != MXGEFW_LINK_UP)
-               return LL_FLUSH_FAILED;
-
-       if (!myri10ge_ss_lock_poll(ss))
-               return LL_FLUSH_BUSY;
-
-       /* Process a small number of packets */
-       work_done = myri10ge_clean_rx_done(ss, 4);
-       if (work_done)
-               ss->busy_poll_cnt += work_done;
-       else
-               ss->busy_poll_miss++;
-
-       myri10ge_ss_unlock_poll(ss);
-
-       return work_done;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 static irqreturn_t myri10ge_intr(int irq, void *arg)
 {
        struct myri10ge_slice_state *ss = arg;
@@ -1773,15 +1610,16 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
 }
 
 static int
-myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+myri10ge_get_link_ksettings(struct net_device *netdev,
+                           struct ethtool_link_ksettings *cmd)
 {
        struct myri10ge_priv *mgp = netdev_priv(netdev);
        char *ptr;
        int i;
 
-       cmd->autoneg = AUTONEG_DISABLE;
-       ethtool_cmd_speed_set(cmd, SPEED_10000);
-       cmd->duplex = DUPLEX_FULL;
+       cmd->base.autoneg = AUTONEG_DISABLE;
+       cmd->base.speed = SPEED_10000;
+       cmd->base.duplex = DUPLEX_FULL;
 
        /*
         * parse the product code to deterimine the interface type
@@ -1806,16 +1644,12 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
                ptr++;
        if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
                /* We've found either an XFP, quad ribbon fiber, or SFP+ */
-               cmd->port = PORT_FIBRE;
-               cmd->supported |= SUPPORTED_FIBRE;
-               cmd->advertising |= ADVERTISED_FIBRE;
+               cmd->base.port = PORT_FIBRE;
+               ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
        } else {
-               cmd->port = PORT_OTHER;
+               cmd->base.port = PORT_OTHER;
        }
-       if (*ptr == 'R' || *ptr == 'S')
-               cmd->transceiver = XCVR_EXTERNAL;
-       else
-               cmd->transceiver = XCVR_INTERNAL;
 
        return 0;
 }
@@ -1919,10 +1753,6 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
        "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
        "rx_small_cnt", "rx_big_cnt",
        "wake_queue", "stop_queue", "tx_linearized",
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       "rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss",
-       "rx_busy_poll_cnt",
-#endif
 };
 
 #define MYRI10GE_NET_STATS_LEN      21
@@ -2022,12 +1852,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
                data[i++] = (unsigned int)ss->tx.wake_queue;
                data[i++] = (unsigned int)ss->tx.stop_queue;
                data[i++] = (unsigned int)ss->tx.linearized;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-               data[i++] = ss->lock_napi_yield;
-               data[i++] = ss->lock_poll_yield;
-               data[i++] = ss->busy_poll_miss;
-               data[i++] = ss->busy_poll_cnt;
-#endif
        }
 }
 
@@ -2098,7 +1922,6 @@ myri10ge_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
 }
 
 static const struct ethtool_ops myri10ge_ethtool_ops = {
-       .get_settings = myri10ge_get_settings,
        .get_drvinfo = myri10ge_get_drvinfo,
        .get_coalesce = myri10ge_get_coalesce,
        .set_coalesce = myri10ge_set_coalesce,
@@ -2112,6 +1935,7 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
        .set_msglevel = myri10ge_set_msglevel,
        .get_msglevel = myri10ge_get_msglevel,
        .set_phys_id = myri10ge_phys_id,
+       .get_link_ksettings = myri10ge_get_link_ksettings,
 };
 
 static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
@@ -2589,9 +2413,6 @@ static int myri10ge_open(struct net_device *dev)
                        goto abort_with_rings;
                }
 
-               /* Initialize the slice spinlock and state used for polling */
-               myri10ge_ss_init_lock(ss);
-
                /* must happen prior to any irq */
                napi_enable(&(ss)->napi);
        }
@@ -2668,19 +2489,9 @@ static int myri10ge_close(struct net_device *dev)
 
        del_timer_sync(&mgp->watchdog_timer);
        mgp->running = MYRI10GE_ETH_STOPPING;
-       for (i = 0; i < mgp->num_slices; i++) {
+       for (i = 0; i < mgp->num_slices; i++)
                napi_disable(&mgp->ss[i].napi);
-               local_bh_disable(); /* myri10ge_ss_lock_napi needs this */
-               /* Lock the slice to prevent the busy_poll handler from
-                * accessing it.  Later when we bring the NIC up, myri10ge_open
-                * resets the slice including this lock.
-                */
-               while (!myri10ge_ss_lock_napi(&mgp->ss[i])) {
-                       pr_info("Slice %d locked\n", i);
-                       mdelay(1);
-               }
-               local_bh_enable();
-       }
+
        netif_carrier_off(dev);
 
        netif_tx_stop_all_queues(dev);
@@ -3119,8 +2930,8 @@ drop:
        return NETDEV_TX_OK;
 }
 
-static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
-                                                   struct rtnl_link_stats64 *stats)
+static void myri10ge_get_stats(struct net_device *dev,
+                              struct rtnl_link_stats64 *stats)
 {
        const struct myri10ge_priv *mgp = netdev_priv(dev);
        const struct myri10ge_slice_netstats *slice_stats;
@@ -3135,7 +2946,6 @@ static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
                stats->rx_dropped += slice_stats->rx_dropped;
                stats->tx_dropped += slice_stats->tx_dropped;
        }
-       return stats;
 }
 
 static void myri10ge_set_multicast_list(struct net_device *dev)
@@ -3954,9 +3764,6 @@ static const struct net_device_ops myri10ge_netdev_ops = {
        .ndo_change_mtu         = myri10ge_change_mtu,
        .ndo_set_rx_mode        = myri10ge_set_multicast_list,
        .ndo_set_mac_address    = myri10ge_set_mac_address,
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       .ndo_busy_poll          = myri10ge_busy_poll,
-#endif
 };
 
 static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
index 90eac63f9606a446f696bf120379f19776b6fec0..18af2a23a933a9bfa5638f01df34cd35a1e40287 100644 (file)
@@ -640,8 +640,10 @@ static int netdev_set_wol(struct net_device *dev, u32 newval);
 static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
 static int netdev_set_sopass(struct net_device *dev, u8 *newval);
 static int netdev_get_sopass(struct net_device *dev, u8 *data);
-static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
-static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd);
+static int netdev_get_ecmd(struct net_device *dev,
+                          struct ethtool_link_ksettings *ecmd);
+static int netdev_set_ecmd(struct net_device *dev,
+                          const struct ethtool_link_ksettings *ecmd);
 static void enable_wol_mode(struct net_device *dev, int enable_intr);
 static int netdev_close(struct net_device *dev);
 static int netdev_get_regs(struct net_device *dev, u8 *buf);
@@ -2265,7 +2267,7 @@ static int natsemi_poll(struct napi_struct *napi, int budget)
                np->intr_status = readl(ioaddr + IntrStatus);
        } while (np->intr_status);
 
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
 
        /* Reenable interrupts providing nothing is trying to shut
         * the chip down. */
@@ -2584,7 +2586,8 @@ static int get_eeprom_len(struct net_device *dev)
        return np->eeprom_size;
 }
 
-static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int get_link_ksettings(struct net_device *dev,
+                             struct ethtool_link_ksettings *ecmd)
 {
        struct netdev_private *np = netdev_priv(dev);
        spin_lock_irq(&np->lock);
@@ -2593,7 +2596,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
        return 0;
 }
 
-static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int set_link_ksettings(struct net_device *dev,
+                             const struct ethtool_link_ksettings *ecmd)
 {
        struct netdev_private *np = netdev_priv(dev);
        int res;
@@ -2689,8 +2693,6 @@ static const struct ethtool_ops ethtool_ops = {
        .get_drvinfo = get_drvinfo,
        .get_regs_len = get_regs_len,
        .get_eeprom_len = get_eeprom_len,
-       .get_settings = get_settings,
-       .set_settings = set_settings,
        .get_wol = get_wol,
        .set_wol = set_wol,
        .get_regs = get_regs,
@@ -2699,6 +2701,8 @@ static const struct ethtool_ops ethtool_ops = {
        .nway_reset = nway_reset,
        .get_link = get_link,
        .get_eeprom = get_eeprom,
+       .get_link_ksettings = get_link_ksettings,
+       .set_link_ksettings = set_link_ksettings,
 };
 
 static int netdev_set_wol(struct net_device *dev, u32 newval)
@@ -2828,29 +2832,32 @@ static int netdev_get_sopass(struct net_device *dev, u8 *data)
        return 0;
 }
 
-static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int netdev_get_ecmd(struct net_device *dev,
+                          struct ethtool_link_ksettings *ecmd)
 {
        struct netdev_private *np = netdev_priv(dev);
+       u32 supported, advertising;
        u32 tmp;
 
-       ecmd->port        = dev->if_port;
-       ethtool_cmd_speed_set(ecmd, np->speed);
-       ecmd->duplex      = np->duplex;
-       ecmd->autoneg     = np->autoneg;
-       ecmd->advertising = 0;
+       ecmd->base.port   = dev->if_port;
+       ecmd->base.speed  = np->speed;
+       ecmd->base.duplex = np->duplex;
+       ecmd->base.autoneg = np->autoneg;
+       advertising = 0;
+
        if (np->advertising & ADVERTISE_10HALF)
-               ecmd->advertising |= ADVERTISED_10baseT_Half;
+               advertising |= ADVERTISED_10baseT_Half;
        if (np->advertising & ADVERTISE_10FULL)
-               ecmd->advertising |= ADVERTISED_10baseT_Full;
+               advertising |= ADVERTISED_10baseT_Full;
        if (np->advertising & ADVERTISE_100HALF)
-               ecmd->advertising |= ADVERTISED_100baseT_Half;
+               advertising |= ADVERTISED_100baseT_Half;
        if (np->advertising & ADVERTISE_100FULL)
-               ecmd->advertising |= ADVERTISED_100baseT_Full;
-       ecmd->supported   = (SUPPORTED_Autoneg |
+               advertising |= ADVERTISED_100baseT_Full;
+       supported   = (SUPPORTED_Autoneg |
                SUPPORTED_10baseT_Half  | SUPPORTED_10baseT_Full  |
                SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
                SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
-       ecmd->phy_address = np->phy_addr_external;
+       ecmd->base.phy_address = np->phy_addr_external;
        /*
         * We intentionally report the phy address of the external
         * phy, even if the internal phy is used. This is necessary
@@ -2870,62 +2877,70 @@ static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
         */
 
        /* set information based on active port type */
-       switch (ecmd->port) {
+       switch (ecmd->base.port) {
        default:
        case PORT_TP:
-               ecmd->advertising |= ADVERTISED_TP;
-               ecmd->transceiver = XCVR_INTERNAL;
+               advertising |= ADVERTISED_TP;
                break;
        case PORT_MII:
-               ecmd->advertising |= ADVERTISED_MII;
-               ecmd->transceiver = XCVR_EXTERNAL;
+               advertising |= ADVERTISED_MII;
                break;
        case PORT_FIBRE:
-               ecmd->advertising |= ADVERTISED_FIBRE;
-               ecmd->transceiver = XCVR_EXTERNAL;
+               advertising |= ADVERTISED_FIBRE;
                break;
        }
 
        /* if autonegotiation is on, try to return the active speed/duplex */
-       if (ecmd->autoneg == AUTONEG_ENABLE) {
-               ecmd->advertising |= ADVERTISED_Autoneg;
+       if (ecmd->base.autoneg == AUTONEG_ENABLE) {
+               advertising |= ADVERTISED_Autoneg;
                tmp = mii_nway_result(
                        np->advertising & mdio_read(dev, MII_LPA));
                if (tmp == LPA_100FULL || tmp == LPA_100HALF)
-                       ethtool_cmd_speed_set(ecmd, SPEED_100);
+                       ecmd->base.speed = SPEED_100;
                else
-                       ethtool_cmd_speed_set(ecmd, SPEED_10);
+                       ecmd->base.speed = SPEED_10;
                if (tmp == LPA_100FULL || tmp == LPA_10FULL)
-                       ecmd->duplex = DUPLEX_FULL;
+                       ecmd->base.duplex = DUPLEX_FULL;
                else
-                       ecmd->duplex = DUPLEX_HALF;
+                       ecmd->base.duplex = DUPLEX_HALF;
        }
 
        /* ignore maxtxpkt, maxrxpkt for now */
 
+       ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
+                                               advertising);
+
        return 0;
 }
 
-static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int netdev_set_ecmd(struct net_device *dev,
+                          const struct ethtool_link_ksettings *ecmd)
 {
        struct netdev_private *np = netdev_priv(dev);
+       u32 advertising;
 
-       if (ecmd->port != PORT_TP && ecmd->port != PORT_MII && ecmd->port != PORT_FIBRE)
-               return -EINVAL;
-       if (ecmd->transceiver != XCVR_INTERNAL && ecmd->transceiver != XCVR_EXTERNAL)
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               ecmd->link_modes.advertising);
+
+       if (ecmd->base.port != PORT_TP &&
+           ecmd->base.port != PORT_MII &&
+           ecmd->base.port != PORT_FIBRE)
                return -EINVAL;
-       if (ecmd->autoneg == AUTONEG_ENABLE) {
-               if ((ecmd->advertising & (ADVERTISED_10baseT_Half |
+       if (ecmd->base.autoneg == AUTONEG_ENABLE) {
+               if ((advertising & (ADVERTISED_10baseT_Half |
                                          ADVERTISED_10baseT_Full |
                                          ADVERTISED_100baseT_Half |
                                          ADVERTISED_100baseT_Full)) == 0) {
                        return -EINVAL;
                }
-       } else if (ecmd->autoneg == AUTONEG_DISABLE) {
-               u32 speed = ethtool_cmd_speed(ecmd);
+       } else if (ecmd->base.autoneg == AUTONEG_DISABLE) {
+               u32 speed = ecmd->base.speed;
                if (speed != SPEED_10 && speed != SPEED_100)
                        return -EINVAL;
-               if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+               if (ecmd->base.duplex != DUPLEX_HALF &&
+                   ecmd->base.duplex != DUPLEX_FULL)
                        return -EINVAL;
        } else {
                return -EINVAL;
@@ -2936,8 +2951,8 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
         * transceiver are really not going to work so don't let the
         * user select them.
         */
-       if (np->ignore_phy && (ecmd->autoneg == AUTONEG_ENABLE ||
-                              ecmd->port == PORT_TP))
+       if (np->ignore_phy && (ecmd->base.autoneg == AUTONEG_ENABLE ||
+                              ecmd->base.port == PORT_TP))
                return -EINVAL;
 
        /*
@@ -2956,30 +2971,30 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
        /* WHEW! now lets bang some bits */
 
        /* save the parms */
-       dev->if_port          = ecmd->port;
-       np->autoneg           = ecmd->autoneg;
-       np->phy_addr_external = ecmd->phy_address & PhyAddrMask;
+       dev->if_port          = ecmd->base.port;
+       np->autoneg           = ecmd->base.autoneg;
+       np->phy_addr_external = ecmd->base.phy_address & PhyAddrMask;
        if (np->autoneg == AUTONEG_ENABLE) {
                /* advertise only what has been requested */
                np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
-               if (ecmd->advertising & ADVERTISED_10baseT_Half)
+               if (advertising & ADVERTISED_10baseT_Half)
                        np->advertising |= ADVERTISE_10HALF;
-               if (ecmd->advertising & ADVERTISED_10baseT_Full)
+               if (advertising & ADVERTISED_10baseT_Full)
                        np->advertising |= ADVERTISE_10FULL;
-               if (ecmd->advertising & ADVERTISED_100baseT_Half)
+               if (advertising & ADVERTISED_100baseT_Half)
                        np->advertising |= ADVERTISE_100HALF;
-               if (ecmd->advertising & ADVERTISED_100baseT_Full)
+               if (advertising & ADVERTISED_100baseT_Full)
                        np->advertising |= ADVERTISE_100FULL;
        } else {
-               np->speed  = ethtool_cmd_speed(ecmd);
-               np->duplex = ecmd->duplex;
+               np->speed  = ecmd->base.speed;
+               np->duplex = ecmd->base.duplex;
                /* user overriding the initial full duplex parm? */
                if (np->duplex == DUPLEX_HALF)
                        np->full_duplex = 0;
        }
 
        /* get the right phy enabled */
-       if (ecmd->port == PORT_TP)
+       if (ecmd->base.port == PORT_TP)
                switch_port_internal(dev);
        else
                switch_port_external(dev);
index f9d2eb9a920a8720b83cf95d12fabe0a9a44ebb3..729095db3e08680b07489f7cb46eb6784a722394 100644 (file)
@@ -1217,12 +1217,13 @@ static struct net_device_stats *ns83820_get_stats(struct net_device *ndev)
 }
 
 /* Let ethtool retrieve info */
-static int ns83820_get_settings(struct net_device *ndev,
-                               struct ethtool_cmd *cmd)
+static int ns83820_get_link_ksettings(struct net_device *ndev,
+                                     struct ethtool_link_ksettings *cmd)
 {
        struct ns83820 *dev = PRIV(ndev);
        u32 cfg, tanar, tbicr;
        int fullduplex   = 0;
+       u32 supported;
 
        /*
         * Here's the list of available ethtool commands from other drivers:
@@ -1244,44 +1245,47 @@ static int ns83820_get_settings(struct net_device *ndev,
 
        fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
 
-       cmd->supported = SUPPORTED_Autoneg;
+       supported = SUPPORTED_Autoneg;
 
        if (dev->CFG_cache & CFG_TBI_EN) {
                /* we have optical interface */
-               cmd->supported |= SUPPORTED_1000baseT_Half |
+               supported |= SUPPORTED_1000baseT_Half |
                                        SUPPORTED_1000baseT_Full |
                                        SUPPORTED_FIBRE;
-               cmd->port       = PORT_FIBRE;
+               cmd->base.port       = PORT_FIBRE;
        } else {
                /* we have copper */
-               cmd->supported |= SUPPORTED_10baseT_Half |
+               supported |= SUPPORTED_10baseT_Half |
                        SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half |
                        SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half |
                        SUPPORTED_1000baseT_Full |
                        SUPPORTED_MII;
-               cmd->port = PORT_MII;
+               cmd->base.port = PORT_MII;
        }
 
-       cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF;
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+
+       cmd->base.duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF;
        switch (cfg / CFG_SPDSTS0 & 3) {
        case 2:
-               ethtool_cmd_speed_set(cmd, SPEED_1000);
+               cmd->base.speed = SPEED_1000;
                break;
        case 1:
-               ethtool_cmd_speed_set(cmd, SPEED_100);
+               cmd->base.speed = SPEED_100;
                break;
        default:
-               ethtool_cmd_speed_set(cmd, SPEED_10);
+               cmd->base.speed = SPEED_10;
                break;
        }
-       cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE)
+       cmd->base.autoneg = (tbicr & TBICR_MR_AN_ENABLE)
                ? AUTONEG_ENABLE : AUTONEG_DISABLE;
        return 0;
 }
 
 /* Let ethool change settings*/
-static int ns83820_set_settings(struct net_device *ndev,
-                               struct ethtool_cmd *cmd)
+static int ns83820_set_link_ksettings(struct net_device *ndev,
+                                     const struct ethtool_link_ksettings *cmd)
 {
        struct ns83820 *dev = PRIV(ndev);
        u32 cfg, tanar;
@@ -1306,10 +1310,10 @@ static int ns83820_set_settings(struct net_device *ndev,
        spin_lock(&dev->tx_lock);
 
        /* Set duplex */
-       if (cmd->duplex != fullduplex) {
+       if (cmd->base.duplex != fullduplex) {
                if (have_optical) {
                        /*set full duplex*/
-                       if (cmd->duplex == DUPLEX_FULL) {
+                       if (cmd->base.duplex == DUPLEX_FULL) {
                                /* force full duplex */
                                writel(readl(dev->base + TXCFG)
                                        | TXCFG_CSI | TXCFG_HBI | TXCFG_ATP,
@@ -1333,7 +1337,7 @@ static int ns83820_set_settings(struct net_device *ndev,
 
        /* Set autonegotiation */
        if (1) {
-               if (cmd->autoneg == AUTONEG_ENABLE) {
+               if (cmd->base.autoneg == AUTONEG_ENABLE) {
                        /* restart auto negotiation */
                        writel(TBICR_MR_AN_ENABLE | TBICR_MR_RESTART_AN,
                                dev->base + TBICR);
@@ -1348,7 +1352,7 @@ static int ns83820_set_settings(struct net_device *ndev,
                }
 
                printk(KERN_INFO "%s: autoneg %s via ethtool\n", ndev->name,
-                               cmd->autoneg ? "ENABLED" : "DISABLED");
+                               cmd->base.autoneg ? "ENABLED" : "DISABLED");
        }
 
        phy_intr(ndev);
@@ -1375,10 +1379,10 @@ static u32 ns83820_get_link(struct net_device *ndev)
 }
 
 static const struct ethtool_ops ops = {
-       .get_settings    = ns83820_get_settings,
-       .set_settings    = ns83820_set_settings,
        .get_drvinfo     = ns83820_get_drvinfo,
-       .get_link        = ns83820_get_link
+       .get_link        = ns83820_get_link,
+       .get_link_ksettings = ns83820_get_link_ksettings,
+       .set_link_ksettings = ns83820_set_link_ksettings,
 };
 
 static inline void ns83820_disable_interrupts(struct ns83820 *dev)
index 564f682fa4dc326dba8f5bc142a62c881479ed12..c5c1d0e0c16fbd57e7f848c5d738fff1df054dcb 100644 (file)
@@ -2783,7 +2783,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
        s2io_chk_rx_buffers(nic, ring);
 
        if (pkts_processed < budget_org) {
-               napi_complete(napi);
+               napi_complete_done(napi, pkts_processed);
                /*Re Enable MSI-Rx Vector*/
                addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
                addr += 7 - ring->ring_no;
@@ -2817,7 +2817,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
                        break;
        }
        if (pkts_processed < budget_org) {
-               napi_complete(napi);
+               napi_complete_done(napi, pkts_processed);
                /* Re enable the Rx interrupts for the ring */
                writeq(0, &bar0->rx_traffic_mask);
                readl(&bar0->rx_traffic_mask);
@@ -5300,10 +5300,10 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
 }
 
 /**
- * s2io_ethtool_sset - Sets different link parameters.
+ * s2io_ethtool_set_link_ksettings - Sets different link parameters.
  * @sp : private member of the device structure, which is a pointer to the
  * s2io_nic structure.
- * @info: pointer to the structure with parameters given by ethtool to set
+ * @cmd: pointer to the structure with parameters given by ethtool to set
  * link information.
  * Description:
  * The function sets different link parameters provided by the user onto
@@ -5312,13 +5312,14 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
  * 0 on success.
  */
 
-static int s2io_ethtool_sset(struct net_device *dev,
-                            struct ethtool_cmd *info)
+static int
+s2io_ethtool_set_link_ksettings(struct net_device *dev,
+                               const struct ethtool_link_ksettings *cmd)
 {
        struct s2io_nic *sp = netdev_priv(dev);
-       if ((info->autoneg == AUTONEG_ENABLE) ||
-           (ethtool_cmd_speed(info) != SPEED_10000) ||
-           (info->duplex != DUPLEX_FULL))
+       if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
+           (cmd->base.speed != SPEED_10000) ||
+           (cmd->base.duplex != DUPLEX_FULL))
                return -EINVAL;
        else {
                s2io_close(sp->dev);
@@ -5329,10 +5330,10 @@ static int s2io_ethtool_sset(struct net_device *dev,
 }
 
 /**
- * s2io_ethtol_gset - Return link specific information.
+ * s2io_ethtol_get_link_ksettings - Return link specific information.
  * @sp : private member of the device structure, pointer to the
  *      s2io_nic structure.
- * @info : pointer to the structure with parameters given by ethtool
+ * @cmd : pointer to the structure with parameters given by ethtool
  * to return link information.
  * Description:
  * Returns link specific information like speed, duplex etc.. to ethtool.
@@ -5340,25 +5341,31 @@ static int s2io_ethtool_sset(struct net_device *dev,
  * return 0 on success.
  */
 
-static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
+static int
+s2io_ethtool_get_link_ksettings(struct net_device *dev,
+                               struct ethtool_link_ksettings *cmd)
 {
        struct s2io_nic *sp = netdev_priv(dev);
-       info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
-       info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
-       info->port = PORT_FIBRE;
 
-       /* info->transceiver */
-       info->transceiver = XCVR_EXTERNAL;
+       ethtool_link_ksettings_zero_link_mode(cmd, supported);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+
+       ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+       cmd->base.port = PORT_FIBRE;
 
        if (netif_carrier_ok(sp->dev)) {
-               ethtool_cmd_speed_set(info, SPEED_10000);
-               info->duplex = DUPLEX_FULL;
+               cmd->base.speed = SPEED_10000;
+               cmd->base.duplex = DUPLEX_FULL;
        } else {
-               ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
-               info->duplex = DUPLEX_UNKNOWN;
+               cmd->base.speed = SPEED_UNKNOWN;
+               cmd->base.duplex = DUPLEX_UNKNOWN;
        }
 
-       info->autoneg = AUTONEG_DISABLE;
+       cmd->base.autoneg = AUTONEG_DISABLE;
        return 0;
 }
 
@@ -6626,8 +6633,6 @@ static int s2io_set_features(struct net_device *dev, netdev_features_t features)
 }
 
 static const struct ethtool_ops netdev_ethtool_ops = {
-       .get_settings = s2io_ethtool_gset,
-       .set_settings = s2io_ethtool_sset,
        .get_drvinfo = s2io_ethtool_gdrvinfo,
        .get_regs_len = s2io_ethtool_get_regs_len,
        .get_regs = s2io_ethtool_gregs,
@@ -6643,6 +6648,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
        .set_phys_id = s2io_ethtool_set_led,
        .get_ethtool_stats = s2io_get_ethtool_stats,
        .get_sset_count = s2io_get_sset_count,
+       .get_link_ksettings = s2io_ethtool_get_link_ksettings,
+       .set_link_ksettings = s2io_ethtool_set_link_ksettings,
 };
 
 /**
index 9a2967016c18aa15f307e46c1542c7c1804e3217..db55e6d89cf45b7305c44fab69ac4d21cd90181f 100644 (file)
@@ -38,9 +38,9 @@ static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
 };
 
 /**
- * vxge_ethtool_sset - Sets different link parameters.
+ * vxge_ethtool_set_link_ksettings - Sets different link parameters.
  * @dev: device pointer.
- * @info: pointer to the structure with parameters given by ethtool to set
+ * @cmd: pointer to the structure with parameters given by ethtool to set
  * link information.
  *
  * The function sets different link parameters provided by the user onto
@@ -48,44 +48,51 @@ static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
  * Return value:
  * 0 on success.
  */
-static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
+static int
+vxge_ethtool_set_link_ksettings(struct net_device *dev,
+                               const struct ethtool_link_ksettings *cmd)
 {
        /* We currently only support 10Gb/FULL */
-       if ((info->autoneg == AUTONEG_ENABLE) ||
-           (ethtool_cmd_speed(info) != SPEED_10000) ||
-           (info->duplex != DUPLEX_FULL))
+       if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
+           (cmd->base.speed != SPEED_10000) ||
+           (cmd->base.duplex != DUPLEX_FULL))
                return -EINVAL;
 
        return 0;
 }
 
 /**
- * vxge_ethtool_gset - Return link specific information.
+ * vxge_ethtool_get_link_ksettings - Return link specific information.
  * @dev: device pointer.
- * @info: pointer to the structure with parameters given by ethtool
+ * @cmd: pointer to the structure with parameters given by ethtool
  * to return link information.
  *
  * Returns link specific information like speed, duplex etc.. to ethtool.
  * Return value :
  * return 0 on success.
  */
-static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
+static int vxge_ethtool_get_link_ksettings(struct net_device *dev,
+                                          struct ethtool_link_ksettings *cmd)
 {
-       info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
-       info->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
-       info->port = PORT_FIBRE;
+       ethtool_link_ksettings_zero_link_mode(cmd, supported);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
 
-       info->transceiver = XCVR_EXTERNAL;
+       ethtool_link_ksettings_zero_link_mode(cmd, advertising);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseT_Full);
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
+
+       cmd->base.port = PORT_FIBRE;
 
        if (netif_carrier_ok(dev)) {
-               ethtool_cmd_speed_set(info, SPEED_10000);
-               info->duplex = DUPLEX_FULL;
+               cmd->base.speed = SPEED_10000;
+               cmd->base.duplex = DUPLEX_FULL;
        } else {
-               ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
-               info->duplex = DUPLEX_UNKNOWN;
+               cmd->base.speed = SPEED_UNKNOWN;
+               cmd->base.duplex = DUPLEX_UNKNOWN;
        }
 
-       info->autoneg = AUTONEG_DISABLE;
+       cmd->base.autoneg = AUTONEG_DISABLE;
        return 0;
 }
 
@@ -1126,8 +1133,6 @@ static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
 }
 
 static const struct ethtool_ops vxge_ethtool_ops = {
-       .get_settings           = vxge_ethtool_gset,
-       .set_settings           = vxge_ethtool_sset,
        .get_drvinfo            = vxge_ethtool_gdrvinfo,
        .get_regs_len           = vxge_ethtool_get_regs_len,
        .get_regs               = vxge_ethtool_gregs,
@@ -1139,6 +1144,8 @@ static const struct ethtool_ops vxge_ethtool_ops = {
        .get_sset_count         = vxge_ethtool_get_sset_count,
        .get_ethtool_stats      = vxge_get_ethtool_stats,
        .flash_device           = vxge_fw_flash,
+       .get_link_ksettings     = vxge_ethtool_get_link_ksettings,
+       .set_link_ksettings     = vxge_ethtool_set_link_ksettings,
 };
 
 void vxge_initialize_ethtool_ops(struct net_device *ndev)
index e07b936f64ecc85babfe4eeadaf4b666dcd8d335..6a4310af5d970eae8821357c61701847804d1b72 100644 (file)
@@ -1823,8 +1823,8 @@ static int vxge_poll_msix(struct napi_struct *napi, int budget)
        vxge_hw_vpath_poll_rx(ring->handle);
        pkts_processed = ring->pkts_processed;
 
-       if (ring->pkts_processed < budget_org) {
-               napi_complete(napi);
+       if (pkts_processed < budget_org) {
+               napi_complete_done(napi, pkts_processed);
 
                /* Re enable the Rx interrupts for the vpath */
                vxge_hw_channel_msix_unmask(
@@ -1863,7 +1863,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
        VXGE_COMPLETE_ALL_TX(vdev);
 
        if (pkts_processed < budget_org) {
-               napi_complete(napi);
+               napi_complete_done(napi, pkts_processed);
                /* Re enable the Rx interrupts for the ring */
                vxge_hw_device_unmask_all(hldev);
                vxge_hw_device_flush_io(hldev);
@@ -3111,7 +3111,7 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu)
  * @stats: pointer to struct rtnl_link_stats64
  *
  */
-static struct rtnl_link_stats64 *
+static void
 vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
 {
        struct vxgedev *vdev = netdev_priv(dev);
@@ -3150,8 +3150,6 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
                net_stats->tx_bytes += bytes;
                net_stats->tx_errors += txstats->tx_errors;
        }
-
-       return net_stats;
 }
 
 static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
index 9508ad782c3062606eb49233fa5424494de332e2..967d7ca8c28cbea3b07ffc6ce8fe6b82b582408d 100644 (file)
@@ -15,21 +15,21 @@ config NET_VENDOR_NETRONOME
 
 if NET_VENDOR_NETRONOME
 
-config NFP_NETVF
-       tristate "Netronome(R) NFP4000/NFP6000 VF NIC driver"
+config NFP
+       tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
        depends on PCI && PCI_MSI
        depends on VXLAN || VXLAN=n
        ---help---
-         This driver supports SR-IOV virtual functions of
-         the Netronome(R) NFP4000/NFP6000 cards working as
-         a advanced Ethernet NIC.
+         This driver supports the Netronome(R) NFP4000/NFP6000 based
+         cards working as a advanced Ethernet NIC.  It works with both
+         SR-IOV physical and virtual functions.
 
-config NFP_NET_DEBUG
-       bool "Debug support for Netronome(R) NFP3200/NFP6000 NIC drivers"
-       depends on NFP_NET || NFP_NETVF
+config NFP_DEBUG
+       bool "Debug support for Netronome(R) NFP4000/NFP6000 NIC drivers"
+       depends on NFP
        ---help---
          Enable extra sanity checks and debugfs support in
-         Netronome(R) NFP3200/NFP6000 NIC PF and VF drivers.
+         Netronome(R) NFP4000/NFP6000 NIC drivers.
          Note: selecting this option may adversely impact
                performance.
 
index dcb7b383f6348137eb39544392bcfd47459a8408..7fb3b84b5556db05dee586bab885cf11339670b9 100644 (file)
@@ -2,4 +2,4 @@
 # Makefile for the Netronome network device drivers
 #
 
-obj-$(CONFIG_NFP_NETVF) += nfp/
+obj-$(CONFIG_NFP) += nfp/
index 0efb2ba9a558f9679aa96362c4a38236a19cdf5e..6933afa69df2e28b8ed24ced53d7108ccc308647 100644 (file)
@@ -1,15 +1,28 @@
-obj-$(CONFIG_NFP_NETVF)        += nfp_netvf.o
+obj-$(CONFIG_NFP)      += nfp.o
 
-nfp_netvf-objs := \
+nfp-objs := \
+           nfpcore/nfp6000_pcie.o \
+           nfpcore/nfp_cppcore.o \
+           nfpcore/nfp_cpplib.o \
+           nfpcore/nfp_hwinfo.o \
+           nfpcore/nfp_mip.o \
+           nfpcore/nfp_nffw.o \
+           nfpcore/nfp_nsp.o \
+           nfpcore/nfp_nsp_eth.o \
+           nfpcore/nfp_resource.o \
+           nfpcore/nfp_rtsym.o \
+           nfpcore/nfp_target.o \
+           nfp_main.o \
            nfp_net_common.o \
            nfp_net_ethtool.o \
            nfp_net_offload.o \
+           nfp_net_main.o \
            nfp_netvf_main.o
 
 ifeq ($(CONFIG_BPF_SYSCALL),y)
-nfp_netvf-objs += \
+nfp-objs += \
            nfp_bpf_verifier.o \
            nfp_bpf_jit.o
 endif
 
-nfp_netvf-$(CONFIG_NFP_NET_DEBUG) += nfp_net_debugfs.o
+nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o
index 76a19f1796af71a338e0f47636ef70dcaa6ae8ba..9513c80f7be5c9bfa2ddc4802ebe6cc508f43c52 100644 (file)
@@ -39,8 +39,6 @@
 #include <linux/list.h>
 #include <linux/types.h>
 
-#define FIELD_FIT(mask, val)  (!((((u64)val) << __bf_shf(mask)) & ~(mask)))
-
 /* For branch fixup logic use up-most byte of branch instruction as scratch
  * area.  Remember to clear this before sending instructions to HW!
  */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
new file mode 100644 (file)
index 0000000..db52b6a
--- /dev/null
@@ -0,0 +1,415 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_main.c
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ *          Alejandro Lucero <alejandro.lucero@netronome.com>
+ *          Jason McMullan <jason.mcmullan@netronome.com>
+ *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/vermagic.h>
+
+#include "nfpcore/nfp.h"
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nsp_eth.h"
+
+#include "nfpcore/nfp6000_pcie.h"
+
+#include "nfp_main.h"
+#include "nfp_net.h"
+
+static const char nfp_driver_name[] = "nfp";
+const char nfp_driver_version[] = VERMAGIC_STRING;
+
+static const struct pci_device_id nfp_pci_device_ids[] = {
+       { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000,
+         PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
+         PCI_ANY_ID, 0,
+       },
+       { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000,
+         PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID,
+         PCI_ANY_ID, 0,
+       },
+       { 0, } /* Required last entry. */
+};
+MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids);
+
+static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+#ifdef CONFIG_PCI_IOV
+       struct nfp_pf *pf = pci_get_drvdata(pdev);
+       int err;
+
+       err = pci_enable_sriov(pdev, num_vfs);
+       if (err) {
+               dev_warn(&pdev->dev, "Failed to enable PCI sriov: %d\n", err);
+               return err;
+       }
+
+       pf->num_vfs = num_vfs;
+
+       dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs);
+
+       return num_vfs;
+#endif
+       return 0;
+}
+
+static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
+{
+#ifdef CONFIG_PCI_IOV
+       struct nfp_pf *pf = pci_get_drvdata(pdev);
+
+       /* If the VFs are assigned we cannot shut down SR-IOV without
+        * causing issues, so just leave the hardware available but
+        * disabled
+        */
+       if (pci_vfs_assigned(pdev)) {
+               dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n");
+               return -EPERM;
+       }
+
+       pf->num_vfs = 0;
+
+       pci_disable_sriov(pdev);
+       dev_dbg(&pdev->dev, "Removed VFs.\n");
+#endif
+       return 0;
+}
+
+static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+       if (num_vfs == 0)
+               return nfp_pcie_sriov_disable(pdev);
+       else
+               return nfp_pcie_sriov_enable(pdev, num_vfs);
+}
+
+/**
+ * nfp_net_fw_find() - Find the correct firmware image for netdev mode
+ * @pdev:      PCI Device structure
+ * @pf:                NFP PF Device structure
+ *
+ * Return: firmware if found and requested successfully.
+ */
+static const struct firmware *
+nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf)
+{
+       const struct firmware *fw = NULL;
+       struct nfp_eth_table_port *port;
+       const char *fw_model;
+       char fw_name[256];
+       int spc, err = 0;
+       int i, j;
+
+       if (!pf->eth_tbl) {
+               dev_err(&pdev->dev, "Error: can't identify media config\n");
+               return NULL;
+       }
+
+       fw_model = nfp_hwinfo_lookup(pf->cpp, "assembly.partno");
+       if (!fw_model) {
+               dev_err(&pdev->dev, "Error: can't read part number\n");
+               return NULL;
+       }
+
+       spc = ARRAY_SIZE(fw_name);
+       spc -= snprintf(fw_name, spc, "netronome/nic_%s", fw_model);
+
+       for (i = 0; spc > 0 && i < pf->eth_tbl->count; i += j) {
+               port = &pf->eth_tbl->ports[i];
+               j = 1;
+               while (i + j < pf->eth_tbl->count &&
+                      port->speed == port[j].speed)
+                       j++;
+
+               spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc,
+                               "_%dx%d", j, port->speed / 1000);
+       }
+
+       if (spc <= 0)
+               return NULL;
+
+       spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc, ".nffw");
+       if (spc <= 0)
+               return NULL;
+
+       err = request_firmware(&fw, fw_name, &pdev->dev);
+       if (err)
+               return NULL;
+
+       dev_info(&pdev->dev, "Loading FW image: %s\n", fw_name);
+
+       return fw;
+}
+
+/**
+ * nfp_net_fw_load() - Load the firmware image
+ * @pdev:       PCI Device structure
+ * @pf:                NFP PF Device structure
+ * @nsp:       NFP SP handle
+ *
+ * Return: -ERRNO, 0 for no firmware loaded, 1 for firmware loaded
+ */
+static int
+nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp)
+{
+       const struct firmware *fw;
+       u16 interface;
+       int err;
+
+       interface = nfp_cpp_interface(pf->cpp);
+       if (NFP_CPP_INTERFACE_UNIT_of(interface) != 0) {
+               /* Only Unit 0 should reset or load firmware */
+               dev_info(&pdev->dev, "Firmware will be loaded by partner\n");
+               return 0;
+       }
+
+       fw = nfp_net_fw_find(pdev, pf);
+       if (!fw)
+               return 0;
+
+       dev_info(&pdev->dev, "Soft-reset, loading FW image\n");
+       err = nfp_nsp_device_soft_reset(nsp);
+       if (err < 0) {
+               dev_err(&pdev->dev, "Failed to soft reset the NFP: %d\n",
+                       err);
+               goto exit_release_fw;
+       }
+
+       err = nfp_nsp_load_fw(nsp, fw);
+
+       if (err < 0) {
+               dev_err(&pdev->dev, "FW loading failed: %d\n", err);
+               goto exit_release_fw;
+       }
+
+       dev_info(&pdev->dev, "Finished loading FW image\n");
+
+exit_release_fw:
+       release_firmware(fw);
+
+       return err < 0 ? err : 1;
+}
+
+static void nfp_fw_unload(struct nfp_pf *pf)
+{
+       struct nfp_nsp *nsp;
+       int err;
+
+       nsp = nfp_nsp_open(pf->cpp);
+       if (IS_ERR(nsp)) {
+               nfp_err(pf->cpp, "Reset failed, can't open NSP\n");
+               return;
+       }
+
+       err = nfp_nsp_device_soft_reset(nsp);
+       if (err < 0)
+               dev_warn(&pf->pdev->dev, "Couldn't unload firmware: %d\n", err);
+       else
+               dev_info(&pf->pdev->dev, "Firmware safely unloaded\n");
+
+       nfp_nsp_close(nsp);
+}
+
+static int nfp_pci_probe(struct pci_dev *pdev,
+                        const struct pci_device_id *pci_id)
+{
+       struct nfp_nsp *nsp;
+       struct nfp_pf *pf;
+       int err;
+
+       err = pci_enable_device(pdev);
+       if (err < 0)
+               return err;
+
+       pci_set_master(pdev);
+
+       err = dma_set_mask_and_coherent(&pdev->dev,
+                                       DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS));
+       if (err)
+               goto err_pci_disable;
+
+       err = pci_request_regions(pdev, nfp_driver_name);
+       if (err < 0) {
+               dev_err(&pdev->dev, "Unable to reserve pci resources.\n");
+               goto err_pci_disable;
+       }
+
+       pf = kzalloc(sizeof(*pf), GFP_KERNEL);
+       if (!pf) {
+               err = -ENOMEM;
+               goto err_rel_regions;
+       }
+       INIT_LIST_HEAD(&pf->ports);
+       pci_set_drvdata(pdev, pf);
+       pf->pdev = pdev;
+
+       pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev);
+       if (IS_ERR_OR_NULL(pf->cpp)) {
+               err = PTR_ERR(pf->cpp);
+               if (err >= 0)
+                       err = -ENOMEM;
+               goto err_disable_msix;
+       }
+
+       nsp = nfp_nsp_open(pf->cpp);
+       if (IS_ERR(nsp)) {
+               err = PTR_ERR(nsp);
+               goto err_cpp_free;
+       }
+
+       err = nfp_nsp_wait(nsp);
+       if (err < 0) {
+               nfp_nsp_close(nsp);
+               goto err_cpp_free;
+       }
+
+       pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp);
+
+       err = nfp_fw_load(pdev, pf, nsp);
+       nfp_nsp_close(nsp);
+       if (err < 0) {
+               dev_err(&pdev->dev, "Failed to load FW\n");
+               goto err_eth_tbl_free;
+       }
+
+       pf->fw_loaded = !!err;
+
+       err = nfp_net_pci_probe(pf);
+       if (err)
+               goto err_fw_unload;
+
+       return 0;
+
+err_fw_unload:
+       if (pf->fw_loaded)
+               nfp_fw_unload(pf);
+err_eth_tbl_free:
+       kfree(pf->eth_tbl);
+err_cpp_free:
+       nfp_cpp_free(pf->cpp);
+err_disable_msix:
+       pci_set_drvdata(pdev, NULL);
+       kfree(pf);
+err_rel_regions:
+       pci_release_regions(pdev);
+err_pci_disable:
+       pci_disable_device(pdev);
+
+       return err;
+}
+
+static void nfp_pci_remove(struct pci_dev *pdev)
+{
+       struct nfp_pf *pf = pci_get_drvdata(pdev);
+
+       if (!list_empty(&pf->ports))
+               nfp_net_pci_remove(pf);
+
+       nfp_pcie_sriov_disable(pdev);
+
+       if (pf->fw_loaded)
+               nfp_fw_unload(pf);
+
+       pci_set_drvdata(pdev, NULL);
+       nfp_cpp_free(pf->cpp);
+
+       kfree(pf->eth_tbl);
+       kfree(pf);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+}
+
+static struct pci_driver nfp_pci_driver = {
+       .name                   = nfp_driver_name,
+       .id_table               = nfp_pci_device_ids,
+       .probe                  = nfp_pci_probe,
+       .remove                 = nfp_pci_remove,
+       .sriov_configure        = nfp_pcie_sriov_configure,
+};
+
+static int __init nfp_main_init(void)
+{
+       int err;
+
+       pr_info("%s: NFP PCIe Driver, Copyright (C) 2014-2017 Netronome Systems\n",
+               nfp_driver_name);
+
+       nfp_net_debugfs_create();
+
+       err = pci_register_driver(&nfp_pci_driver);
+       if (err < 0)
+               goto err_destroy_debugfs;
+
+       err = pci_register_driver(&nfp_netvf_pci_driver);
+       if (err)
+               goto err_unreg_pf;
+
+       return err;
+
+err_unreg_pf:
+       pci_unregister_driver(&nfp_pci_driver);
+err_destroy_debugfs:
+       nfp_net_debugfs_destroy();
+       return err;
+}
+
+static void __exit nfp_main_exit(void)
+{
+       pci_unregister_driver(&nfp_netvf_pci_driver);
+       pci_unregister_driver(&nfp_pci_driver);
+       nfp_net_debugfs_destroy();
+}
+
+module_init(nfp_main_init);
+module_exit(nfp_main_exit);
+
+MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_1x40.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_4x10.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0096-0001_2x10.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_2x40.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_4x10_1x40.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_8x10.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw");
+
+MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("The Netronome Flow Processor (NFP) driver.");
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
new file mode 100644 (file)
index 0000000..6c40fa3
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_main.h
+ * Author: Jason McMullan <jason.mcmullan@netronome.com>
+ */
+
+#ifndef NFP_MAIN_H
+#define NFP_MAIN_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+struct dentry;
+struct pci_dev;
+
+struct nfp_cpp;
+struct nfp_cpp_area;
+struct nfp_eth_table;
+
+/**
+ * struct nfp_pf - NFP PF-specific device structure
+ * @pdev:              Backpointer to PCI device
+ * @cpp:               Pointer to the CPP handle
+ * @ctrl_area:         Pointer to the CPP area for the control BAR
+ * @tx_area:           Pointer to the CPP area for the TX queues
+ * @rx_area:           Pointer to the CPP area for the FL/RX queues
+ * @irq_entries:       Array of MSI-X entries for all ports
+ * @num_vfs:           Number of SR-IOV VFs enabled
+ * @fw_loaded:         Is the firmware loaded?
+ * @eth_tbl:           NSP ETH table
+ * @ddir:              Per-device debugfs directory
+ * @num_ports:         Number of adapter ports
+ * @ports:             Linked list of port structures (struct nfp_net)
+ */
+struct nfp_pf {
+       struct pci_dev *pdev;
+
+       struct nfp_cpp *cpp;
+
+       struct nfp_cpp_area *ctrl_area;
+       struct nfp_cpp_area *tx_area;
+       struct nfp_cpp_area *rx_area;
+
+       struct msix_entry *irq_entries;
+
+       unsigned int num_vfs;
+
+       bool fw_loaded;
+
+       struct nfp_eth_table *eth_tbl;
+
+       struct dentry *ddir;
+
+       unsigned int num_ports;
+       struct list_head ports;
+};
+
+extern struct pci_driver nfp_netvf_pci_driver;
+
+int nfp_net_pci_probe(struct nfp_pf *pf);
+void nfp_net_pci_remove(struct nfp_pf *pf);
+
+#endif /* NFP_MAIN_H */
index 2115f446031ef46c3e2c6c586d58f31e064efa38..d37d2391b4feecaebad95c874be615ae6aeb7271 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 Netronome Systems, Inc.
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -43,6 +43,7 @@
 #define _NFP_NET_H_
 
 #include <linux/interrupt.h>
+#include <linux/list.h>
 #include <linux/netdevice.h>
 #include <linux/pci.h>
 #include <linux/io-64-nonatomic-hi-lo.h>
@@ -83,6 +84,7 @@
 #define NFP_NET_NON_Q_VECTORS          2
 #define NFP_NET_IRQ_LSC_IDX            0
 #define NFP_NET_IRQ_EXN_IDX            1
+#define NFP_NET_MIN_PORT_IRQS          (NFP_NET_NON_Q_VECTORS + 1)
 
 /* Queue/Ring definitions */
 #define NFP_NET_MAX_TX_RINGS   64      /* Max. # of Tx rings per device */
@@ -345,7 +347,7 @@ struct nfp_net_rx_ring {
  * @tx_ring:        Pointer to TX ring
  * @rx_ring:        Pointer to RX ring
  * @xdp_ring:      Pointer to an extra TX ring for XDP
- * @irq_idx:        Index into MSI-X table
+ * @irq_entry:      MSI-X table entry (use for talking to the device)
  * @rx_sync:       Seqlock for atomic updates of RX stats
  * @rx_pkts:        Number of received packets
  * @rx_bytes:      Number of received bytes
@@ -362,6 +364,7 @@ struct nfp_net_rx_ring {
  * @tx_lso:        Counter of LSO packets sent
  * @tx_errors:     How many TX errors were encountered
  * @tx_busy:        How often was TX busy (no space)?
+ * @irq_vector:     Interrupt vector number (use for talking to the OS)
  * @handler:        Interrupt handler for this ring vector
  * @name:           Name of the interrupt vector
  * @affinity_mask:  SMP affinity mask for this vector
@@ -378,7 +381,7 @@ struct nfp_net_r_vector {
        struct nfp_net_tx_ring *tx_ring;
        struct nfp_net_rx_ring *rx_ring;
 
-       int irq_idx;
+       u16 irq_entry;
 
        struct u64_stats_sync rx_sync;
        u64 rx_pkts;
@@ -400,6 +403,7 @@ struct nfp_net_r_vector {
        u64 tx_errors;
        u64 tx_busy;
 
+       u32 irq_vector;
        irq_handler_t handler;
        char name[IFNAMSIZ + 8];
        cpumask_t affinity_mask;
@@ -431,20 +435,13 @@ struct nfp_stat_pair {
  * struct nfp_net - NFP network device structure
  * @pdev:               Backpointer to PCI device
  * @netdev:             Backpointer to net_device structure
- * @nfp_fallback:       Is the driver used in fallback mode?
  * @is_vf:              Is the driver attached to a VF?
- * @fw_loaded:          Is the firmware loaded?
  * @bpf_offload_skip_sw:  Offloaded BPF program will not be rerun by cls_bpf
  * @bpf_offload_xdp:   Offloaded BPF program is XDP
  * @ctrl:               Local copy of the control register/word.
  * @fl_bufsz:           Currently configured size of the freelist buffers
  * @rx_offset:         Offset in the RX buffers where packet data starts
  * @xdp_prog:          Installed XDP program
- * @cpp:                Pointer to the CPP handle
- * @nfp_dev_cpp:        Pointer to the NFP Device handle
- * @ctrl_area:          Pointer to the CPP area for the control BAR
- * @tx_area:            Pointer to the CPP area for the TX queues
- * @rx_area:            Pointer to the CPP area for the FL/RX queues
  * @fw_ver:             Firmware version
  * @cap:                Capabilities advertised by the Firmware
  * @max_mtu:            Maximum support MTU advertised by the Firmware
@@ -494,14 +491,13 @@ struct nfp_stat_pair {
  * @tx_bar:             Pointer to mapped TX queues
  * @rx_bar:             Pointer to mapped FL/RX queues
  * @debugfs_dir:       Device directory in debugfs
+ * @port_list:         Entry on device port list
  */
 struct nfp_net {
        struct pci_dev *pdev;
        struct net_device *netdev;
 
-       unsigned nfp_fallback:1;
        unsigned is_vf:1;
-       unsigned fw_loaded:1;
        unsigned bpf_offload_skip_sw:1;
        unsigned bpf_offload_xdp:1;
 
@@ -515,18 +511,6 @@ struct nfp_net {
        struct nfp_net_tx_ring *tx_rings;
        struct nfp_net_rx_ring *rx_rings;
 
-#ifdef CONFIG_PCI_IOV
-       unsigned int num_vfs;
-       struct vf_data_storage *vfinfo;
-       int vf_rate_link_speed;
-#endif
-
-       struct nfp_cpp *cpp;
-       struct platform_device *nfp_dev_cpp;
-       struct nfp_cpp_area *ctrl_area;
-       struct nfp_cpp_area *tx_area;
-       struct nfp_cpp_area *rx_area;
-
        struct nfp_net_fw_version fw_ver;
        u32 cap;
        u32 max_mtu;
@@ -589,11 +573,12 @@ struct nfp_net {
        u8 __iomem *qcp_cfg;
 
        u8 __iomem *ctrl_bar;
-       u8 __iomem *q_bar;
        u8 __iomem *tx_bar;
        u8 __iomem *rx_bar;
 
        struct dentry *debugfs_dir;
+
+       struct list_head port_list;
 };
 
 struct nfp_net_ring_set {
@@ -770,8 +755,7 @@ static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q)
 }
 
 /* Globals */
-extern const char nfp_net_driver_name[];
-extern const char nfp_net_driver_version[];
+extern const char nfp_driver_version[];
 
 /* Prototypes */
 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
@@ -789,17 +773,24 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update);
 void nfp_net_rss_write_itbl(struct nfp_net *nn);
 void nfp_net_rss_write_key(struct nfp_net *nn);
 void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
-int nfp_net_irqs_alloc(struct nfp_net *nn);
-void nfp_net_irqs_disable(struct nfp_net *nn);
+
+unsigned int
+nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
+                  unsigned int min_irqs, unsigned int want_irqs);
+void nfp_net_irqs_disable(struct pci_dev *pdev);
+void
+nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
+                   unsigned int n);
 int
 nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
                      struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx);
 
-#ifdef CONFIG_NFP_NET_DEBUG
+#ifdef CONFIG_NFP_DEBUG
 void nfp_net_debugfs_create(void);
 void nfp_net_debugfs_destroy(void);
-void nfp_net_debugfs_adapter_add(struct nfp_net *nn);
-void nfp_net_debugfs_adapter_del(struct nfp_net *nn);
+struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev);
+void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id);
+void nfp_net_debugfs_dir_clean(struct dentry **dir);
 #else
 static inline void nfp_net_debugfs_create(void)
 {
@@ -809,14 +800,20 @@ static inline void nfp_net_debugfs_destroy(void)
 {
 }
 
-static inline void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
+static inline struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
+{
+       return NULL;
+}
+
+static inline void
+nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
 {
 }
 
-static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn)
+static inline void nfp_net_debugfs_dir_clean(struct dentry **dir)
 {
 }
-#endif /* CONFIG_NFP_NET_DEBUG */
+#endif /* CONFIG_NFP_DEBUG */
 
 void nfp_net_filter_stats_timer(unsigned long data);
 int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf);
index e8d448109e03d518c1b67e3046c640cfd2c71795..074259cc8e066d3a04fbe499af53e10cd292bdc8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 Netronome Systems, Inc.
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -42,6 +42,7 @@
  */
 
 #include <linux/bpf.h>
+#include <linux/bpf_trace.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -280,72 +281,76 @@ static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
 }
 
 /**
- * nfp_net_msix_alloc() - Try to allocate MSI-X irqs
- * @nn:       NFP Network structure
- * @nr_vecs:  Number of MSI-X vectors to allocate
- *
- * For MSI-X we want at least NFP_NET_NON_Q_VECTORS + 1 vectors.
+ * nfp_net_irqs_alloc() - allocates MSI-X irqs
+ * @pdev:        PCI device structure
+ * @irq_entries: Array to be initialized and used to hold the irq entries
+ * @min_irqs:    Minimal acceptable number of interrupts
+ * @wanted_irqs: Target number of interrupts to allocate
  *
- * Return: Number of MSI-X vectors obtained or 0 on error.
+ * Return: Number of irqs obtained or 0 on error.
  */
-static int nfp_net_msix_alloc(struct nfp_net *nn, int nr_vecs)
+unsigned int
+nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
+                  unsigned int min_irqs, unsigned int wanted_irqs)
 {
-       struct pci_dev *pdev = nn->pdev;
-       int nvecs;
-       int i;
+       unsigned int i;
+       int got_irqs;
 
-       for (i = 0; i < nr_vecs; i++)
-               nn->irq_entries[i].entry = i;
+       for (i = 0; i < wanted_irqs; i++)
+               irq_entries[i].entry = i;
 
-       nvecs = pci_enable_msix_range(pdev, nn->irq_entries,
-                                     NFP_NET_NON_Q_VECTORS + 1, nr_vecs);
-       if (nvecs < 0) {
-               nn_warn(nn, "Failed to enable MSI-X. Wanted %d-%d (err=%d)\n",
-                       NFP_NET_NON_Q_VECTORS + 1, nr_vecs, nvecs);
+       got_irqs = pci_enable_msix_range(pdev, irq_entries,
+                                        min_irqs, wanted_irqs);
+       if (got_irqs < 0) {
+               dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
+                       min_irqs, wanted_irqs, got_irqs);
                return 0;
        }
 
-       return nvecs;
+       if (got_irqs < wanted_irqs)
+               dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
+                        wanted_irqs, got_irqs);
+
+       return got_irqs;
 }
 
 /**
- * nfp_net_irqs_alloc() - allocates MSI-X irqs
- * @nn:       NFP Network structure
+ * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev
+ * @nn:                 NFP Network structure
+ * @irq_entries: Table of allocated interrupts
+ * @n:          Size of @irq_entries (number of entries to grab)
  *
- * Return: Number of irqs obtained or 0 on error.
+ * After interrupts are allocated with nfp_net_irqs_alloc() this function
+ * should be called to assign them to a specific netdev (port).
  */
-int nfp_net_irqs_alloc(struct nfp_net *nn)
+void
+nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
+                   unsigned int n)
 {
-       int wanted_irqs;
-       unsigned int n;
-
-       wanted_irqs = nn->num_r_vecs + NFP_NET_NON_Q_VECTORS;
-
-       n = nfp_net_msix_alloc(nn, wanted_irqs);
-       if (n == 0) {
-               nn_err(nn, "Failed to allocate MSI-X IRQs\n");
-               return 0;
-       }
-
        nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
        nn->num_r_vecs = nn->max_r_vecs;
 
-       if (n < wanted_irqs)
-               nn_warn(nn, "Unable to allocate %d vectors. Got %d instead\n",
-                       wanted_irqs, n);
+       memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
 
-       return n;
+       if (nn->num_rx_rings > nn->num_r_vecs ||
+           nn->num_tx_rings > nn->num_r_vecs)
+               nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
+                       nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs);
+
+       nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
+       nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
+       nn->num_stack_tx_rings = nn->num_tx_rings;
 }
 
 /**
  * nfp_net_irqs_disable() - Disable interrupts
- * @nn:       NFP Network structure
+ * @pdev:        PCI device structure
  *
  * Undoes what @nfp_net_irqs_alloc() does.
  */
-void nfp_net_irqs_disable(struct nfp_net *nn)
+void nfp_net_irqs_disable(struct pci_dev *pdev)
 {
-       pci_disable_msix(nn->pdev);
+       pci_disable_msix(pdev);
 }
 
 /**
@@ -409,10 +414,13 @@ out:
 static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
 {
        struct nfp_net *nn = data;
+       struct msix_entry *entry;
+
+       entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
 
        nfp_net_read_link_status(nn);
 
-       nfp_net_irq_unmask(nn, NFP_NET_IRQ_LSC_IDX);
+       nfp_net_irq_unmask(nn, entry->entry);
 
        return IRQ_HANDLED;
 }
@@ -475,32 +483,28 @@ nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
 }
 
 /**
- * nfp_net_irqs_assign() - Assign IRQs and setup rvecs.
+ * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
  * @netdev:   netdev structure
  */
-static void nfp_net_irqs_assign(struct net_device *netdev)
+static void nfp_net_vecs_init(struct net_device *netdev)
 {
        struct nfp_net *nn = netdev_priv(netdev);
        struct nfp_net_r_vector *r_vec;
        int r;
 
-       if (nn->num_rx_rings > nn->num_r_vecs ||
-           nn->num_tx_rings > nn->num_r_vecs)
-               nn_warn(nn, "More rings (%d,%d) than vectors (%d).\n",
-                       nn->num_rx_rings, nn->num_tx_rings, nn->num_r_vecs);
-
-       nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
-       nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
-       nn->num_stack_tx_rings = nn->num_tx_rings;
-
        nn->lsc_handler = nfp_net_irq_lsc;
        nn->exn_handler = nfp_net_irq_exn;
 
        for (r = 0; r < nn->max_r_vecs; r++) {
+               struct msix_entry *entry;
+
+               entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
+
                r_vec = &nn->r_vecs[r];
                r_vec->nfp_net = nn;
                r_vec->handler = nfp_net_irq_rxtx;
-               r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r;
+               r_vec->irq_entry = entry->entry;
+               r_vec->irq_vector = entry->vector;
 
                cpumask_set_cpu(r, &r_vec->affinity_mask);
        }
@@ -533,7 +537,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
                       entry->vector, err);
                return err;
        }
-       nn_writeb(nn, ctrl_offset, vector_idx);
+       nn_writeb(nn, ctrl_offset, entry->entry);
 
        return 0;
 }
@@ -1459,7 +1463,7 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
                dev_kfree_skb_any(skb);
 }
 
-static void
+static bool
 nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
                   struct nfp_net_tx_ring *tx_ring,
                   struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off,
@@ -1473,13 +1477,13 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
 
        if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
                nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
-               return;
+               return false;
        }
 
        new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr);
        if (unlikely(!new_frag)) {
                nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
-               return;
+               return false;
        }
        nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
 
@@ -1509,6 +1513,7 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
 
        tx_ring->wr_p++;
        tx_ring->wr_ptr_add++;
+       return true;
 }
 
 static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
@@ -1613,12 +1618,15 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
                        case XDP_PASS:
                                break;
                        case XDP_TX:
-                               nfp_net_tx_xdp_buf(nn, rx_ring, tx_ring, rxbuf,
-                                                  pkt_off, pkt_len);
+                               if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring,
+                                                                tx_ring, rxbuf,
+                                                                pkt_off, pkt_len)))
+                                       trace_xdp_exception(nn->netdev, xdp_prog, act);
                                continue;
                        default:
                                bpf_warn_invalid_xdp_action(act);
                        case XDP_ABORTED:
+                               trace_xdp_exception(nn->netdev, xdp_prog, act);
                        case XDP_DROP:
                                nfp_net_rx_give_one(rx_ring, rxbuf->frag,
                                                    rxbuf->dma_addr);
@@ -1701,7 +1709,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
 
        if (pkts_polled < budget) {
                napi_complete_done(napi, pkts_polled);
-               nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_idx);
+               nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
        }
 
        return pkts_polled;
@@ -1983,7 +1991,6 @@ static int
 nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
                       int idx)
 {
-       struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
        int err;
 
        /* Setup NAPI */
@@ -1992,17 +1999,19 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
 
        snprintf(r_vec->name, sizeof(r_vec->name),
                 "%s-rxtx-%d", nn->netdev->name, idx);
-       err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
+       err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
+                         r_vec);
        if (err) {
                netif_napi_del(&r_vec->napi);
-               nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
+               nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
                return err;
        }
-       disable_irq(entry->vector);
+       disable_irq(r_vec->irq_vector);
 
-       irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
+       irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
 
-       nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
+       nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
+              r_vec->irq_entry);
 
        return 0;
 }
@@ -2010,11 +2019,9 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
 static void
 nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
 {
-       struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
-
-       irq_set_affinity_hint(entry->vector, NULL);
+       irq_set_affinity_hint(r_vec->irq_vector, NULL);
        netif_napi_del(&r_vec->napi);
-       free_irq(entry->vector, r_vec);
+       free_irq(r_vec->irq_vector, r_vec);
 }
 
 /**
@@ -2143,7 +2150,7 @@ nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
        /* Write the DMA address, size and MSI-X info to the device */
        nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), rx_ring->dma);
        nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(rx_ring->cnt));
-       nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_idx);
+       nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), rx_ring->r_vec->irq_entry);
 }
 
 static void
@@ -2152,7 +2159,7 @@ nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
 {
        nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), tx_ring->dma);
        nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(tx_ring->cnt));
-       nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_idx);
+       nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), tx_ring->r_vec->irq_entry);
 }
 
 static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
@@ -2246,7 +2253,7 @@ static void nfp_net_open_stack(struct nfp_net *nn)
 
        for (r = 0; r < nn->num_r_vecs; r++) {
                napi_enable(&nn->r_vecs[r].napi);
-               enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
+               enable_irq(nn->r_vecs[r].irq_vector);
        }
 
        netif_tx_wake_all_queues(nn->netdev);
@@ -2370,7 +2377,7 @@ static void nfp_net_close_stack(struct nfp_net *nn)
        nn->link_up = false;
 
        for (r = 0; r < nn->num_r_vecs; r++) {
-               disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
+               disable_irq(nn->r_vecs[r].irq_vector);
                napi_disable(&nn->r_vecs[r].napi);
        }
 
@@ -2638,8 +2645,8 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
        return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL);
 }
 
-static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
-                                               struct rtnl_link_stats64 *stats)
+static void nfp_net_stat64(struct net_device *netdev,
+                          struct rtnl_link_stats64 *stats)
 {
        struct nfp_net *nn = netdev_priv(netdev);
        int r;
@@ -2669,8 +2676,6 @@ static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
                stats->tx_bytes += data[1];
                stats->tx_errors += data[2];
        }
-
-       return stats;
 }
 
 static bool nfp_net_ebpf_capable(struct nfp_net *nn)
@@ -3256,7 +3261,7 @@ int nfp_net_netdev_init(struct net_device *netdev)
        netif_carrier_off(netdev);
 
        nfp_net_set_ethtool_ops(netdev);
-       nfp_net_irqs_assign(netdev);
+       nfp_net_vecs_init(netdev);
 
        return register_netdev(netdev);
 }
index c66f3f954aa8816b6817f37a49e25dedcb99a8de..6e9372a1837579928bb24b5435e2062dc0c534b8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 Netronome Systems, Inc.
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -202,16 +202,17 @@ static const struct file_operations nfp_xdp_q_fops = {
        .llseek = seq_lseek
 };
 
-void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
+void nfp_net_debugfs_port_add(struct nfp_net *nn, struct dentry *ddir, int id)
 {
        struct dentry *queues, *tx, *rx, *xdp;
-       char int_name[16];
+       char name[20];
        int i;
 
        if (IS_ERR_OR_NULL(nfp_dir))
                return;
 
-       nn->debugfs_dir = debugfs_create_dir(pci_name(nn->pdev), nfp_dir);
+       sprintf(name, "port%d", id);
+       nn->debugfs_dir = debugfs_create_dir(name, ddir);
        if (IS_ERR_OR_NULL(nn->debugfs_dir))
                return;
 
@@ -227,24 +228,38 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
                return;
 
        for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) {
-               sprintf(int_name, "%d", i);
-               debugfs_create_file(int_name, S_IRUSR, rx,
+               sprintf(name, "%d", i);
+               debugfs_create_file(name, S_IRUSR, rx,
                                    &nn->r_vecs[i], &nfp_rx_q_fops);
-               debugfs_create_file(int_name, S_IRUSR, xdp,
+               debugfs_create_file(name, S_IRUSR, xdp,
                                    &nn->r_vecs[i], &nfp_xdp_q_fops);
        }
 
        for (i = 0; i < min(nn->max_tx_rings, nn->max_r_vecs); i++) {
-               sprintf(int_name, "%d", i);
-               debugfs_create_file(int_name, S_IRUSR, tx,
+               sprintf(name, "%d", i);
+               debugfs_create_file(name, S_IRUSR, tx,
                                    &nn->r_vecs[i], &nfp_tx_q_fops);
        }
 }
 
-void nfp_net_debugfs_adapter_del(struct nfp_net *nn)
+struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev)
 {
-       debugfs_remove_recursive(nn->debugfs_dir);
-       nn->debugfs_dir = NULL;
+       struct dentry *dev_dir;
+
+       if (IS_ERR_OR_NULL(nfp_dir))
+               return NULL;
+
+       dev_dir = debugfs_create_dir(pci_name(pdev), nfp_dir);
+       if (IS_ERR_OR_NULL(dev_dir))
+               return NULL;
+
+       return dev_dir;
+}
+
+void nfp_net_debugfs_dir_clean(struct dentry **dir)
+{
+       debugfs_remove_recursive(*dir);
+       *dir = NULL;
 }
 
 void nfp_net_debugfs_create(void)
index 1b26e964657421fe9608b791a204a48b3e0e4b5f..255f30252550902e75d4c14576a015db5883e5d4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 Netronome Systems, Inc.
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
@@ -132,9 +132,9 @@ static void nfp_net_get_drvinfo(struct net_device *netdev,
 {
        struct nfp_net *nn = netdev_priv(netdev);
 
-       strlcpy(drvinfo->driver, nfp_net_driver_name, sizeof(drvinfo->driver));
-       strlcpy(drvinfo->version, nfp_net_driver_version,
-               sizeof(drvinfo->version));
+       strlcpy(drvinfo->driver, nn->pdev->driver->name,
+               sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version));
 
        snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
                 "%d.%d.%d.%d",
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
new file mode 100644 (file)
index 0000000..eccd310
--- /dev/null
@@ -0,0 +1,585 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_net_main.c
+ * Netronome network device driver: Main entry point
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ *          Alejandro Lucero <alejandro.lucero@netronome.com>
+ *          Jason McMullan <jason.mcmullan@netronome.com>
+ *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/msi.h>
+#include <linux/random.h>
+
+#include "nfpcore/nfp.h"
+#include "nfpcore/nfp_cpp.h"
+#include "nfpcore/nfp_nffw.h"
+#include "nfpcore/nfp_nsp_eth.h"
+#include "nfpcore/nfp6000_pcie.h"
+
+#include "nfp_net_ctrl.h"
+#include "nfp_net.h"
+#include "nfp_main.h"
+
+#define NFP_PF_CSR_SLICE_SIZE  (32 * 1024)
+
+static int nfp_is_ready(struct nfp_cpp *cpp)
+{
+       const char *cp;
+       long state;
+       int err;
+
+       cp = nfp_hwinfo_lookup(cpp, "board.state");
+       if (!cp)
+               return 0;
+
+       err = kstrtol(cp, 0, &state);
+       if (err < 0)
+               return 0;
+
+       return state == 15;
+}
+
+/**
+ * nfp_net_map_area() - Help function to map an area
+ * @cpp:    NFP CPP handler
+ * @name:   Name for the area
+ * @target: CPP target
+ * @addr:   CPP address
+ * @size:   Size of the area
+ * @area:   Area handle (returned).
+ *
+ * This function is primarily to simplify the code in the main probe
+ * function. To undo the effect of this functions call
+ * @nfp_cpp_area_release_free(*area);
+ *
+ * Return: Pointer to memory mapped area or ERR_PTR
+ */
+static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp,
+                                   const char *name, int isl, int target,
+                                   unsigned long long addr, unsigned long size,
+                                   struct nfp_cpp_area **area)
+{
+       u8 __iomem *res;
+       u32 dest;
+       int err;
+
+       dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, isl);
+
+       *area = nfp_cpp_area_alloc_with_name(cpp, dest, name, addr, size);
+       if (!*area) {
+               err = -EIO;
+               goto err_area;
+       }
+
+       err = nfp_cpp_area_acquire(*area);
+       if (err < 0)
+               goto err_acquire;
+
+       res = nfp_cpp_area_iomem(*area);
+       if (!res) {
+               err = -EIO;
+               goto err_map;
+       }
+
+       return res;
+
+err_map:
+       nfp_cpp_area_release(*area);
+err_acquire:
+       nfp_cpp_area_free(*area);
+err_area:
+       return (u8 __iomem *)ERR_PTR(err);
+}
+
+static void
+nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp,
+                           unsigned int id)
+{
+       u8 mac_addr[ETH_ALEN];
+       const char *mac_str;
+       char name[32];
+
+       snprintf(name, sizeof(name), "eth%d.mac", id);
+
+       mac_str = nfp_hwinfo_lookup(cpp, name);
+       if (!mac_str) {
+               dev_warn(&nn->pdev->dev,
+                        "Can't lookup MAC address. Generate\n");
+               eth_hw_addr_random(nn->netdev);
+               return;
+       }
+
+       if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+                  &mac_addr[0], &mac_addr[1], &mac_addr[2],
+                  &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
+               dev_warn(&nn->pdev->dev,
+                        "Can't parse MAC address (%s). Generate.\n", mac_str);
+               eth_hw_addr_random(nn->netdev);
+               return;
+       }
+
+       ether_addr_copy(nn->netdev->dev_addr, mac_addr);
+       ether_addr_copy(nn->netdev->perm_addr, mac_addr);
+}
+
+/**
+ * nfp_net_get_mac_addr() - Get the MAC address.
+ * @nn:       NFP Network structure
+ * @pf:              NFP PF device structure
+ * @id:              NFP port id
+ *
+ * First try to get the MAC address from NSP ETH table. If that
+ * fails try HWInfo.  As a last resort generate a random address.
+ */
+static void
+nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id)
+{
+       int i;
+
+       for (i = 0; pf->eth_tbl && i < pf->eth_tbl->count; i++)
+               if (pf->eth_tbl->ports[i].eth_index == id) {
+                       const u8 *mac_addr = pf->eth_tbl->ports[i].mac_addr;
+
+                       ether_addr_copy(nn->netdev->dev_addr, mac_addr);
+                       ether_addr_copy(nn->netdev->perm_addr, mac_addr);
+                       return;
+               }
+
+       nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id);
+}
+
+static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
+{
+       char name[256];
+       u16 interface;
+       int pcie_pf;
+       int err = 0;
+       u64 val;
+
+       interface = nfp_cpp_interface(pf->cpp);
+       pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
+
+       snprintf(name, sizeof(name), "nfd_cfg_pf%d_num_ports", pcie_pf);
+
+       val = nfp_rtsym_read_le(pf->cpp, name, &err);
+       /* Default to one port */
+       if (err) {
+               if (err != -ENOENT)
+                       nfp_err(pf->cpp, "Unable to read adapter port count\n");
+               val = 1;
+       }
+
+       return val;
+}
+
+static unsigned int
+nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar,
+                    unsigned int stride, u32 start_off, u32 num_off)
+{
+       unsigned int i, min_qc, max_qc;
+
+       min_qc = readl(ctrl_bar + start_off);
+       max_qc = min_qc;
+
+       for (i = 0; i < pf->num_ports; i++) {
+               /* To make our lives simpler only accept configuration where
+                * queues are allocated to PFs in order (queues of PFn all have
+                * indexes lower than PFn+1).
+                */
+               if (max_qc > readl(ctrl_bar + start_off))
+                       return 0;
+
+               max_qc = readl(ctrl_bar + start_off);
+               max_qc += readl(ctrl_bar + num_off) * stride;
+               ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
+       }
+
+       return max_qc - min_qc;
+}
+
+static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf)
+{
+       const struct nfp_rtsym *ctrl_sym;
+       u8 __iomem *ctrl_bar;
+       char pf_symbol[256];
+       u16 interface;
+       int pcie_pf;
+
+       interface = nfp_cpp_interface(pf->cpp);
+       pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface);
+
+       snprintf(pf_symbol, sizeof(pf_symbol), "_pf%d_net_bar0", pcie_pf);
+
+       ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol);
+       if (!ctrl_sym) {
+               dev_err(&pf->pdev->dev,
+                       "Failed to find PF BAR0 symbol %s\n", pf_symbol);
+               return NULL;
+       }
+
+       if (ctrl_sym->size < pf->num_ports * NFP_PF_CSR_SLICE_SIZE) {
+               dev_err(&pf->pdev->dev,
+                       "PF BAR0 too small to contain %d ports\n",
+                       pf->num_ports);
+               return NULL;
+       }
+
+       ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl",
+                                   ctrl_sym->domain, ctrl_sym->target,
+                                   ctrl_sym->addr, ctrl_sym->size,
+                                   &pf->ctrl_area);
+       if (IS_ERR(ctrl_bar)) {
+               dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n",
+                       PTR_ERR(ctrl_bar));
+               return NULL;
+       }
+
+       return ctrl_bar;
+}
+
+static void nfp_net_pf_free_netdevs(struct nfp_pf *pf)
+{
+       struct nfp_net *nn;
+
+       while (!list_empty(&pf->ports)) {
+               nn = list_first_entry(&pf->ports, struct nfp_net, port_list);
+               list_del(&nn->port_list);
+
+               nfp_net_netdev_free(nn);
+       }
+}
+
+static struct nfp_net *
+nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
+                            void __iomem *tx_bar, void __iomem *rx_bar,
+                            int stride, struct nfp_net_fw_version *fw_ver)
+{
+       u32 n_tx_rings, n_rx_rings;
+       struct nfp_net *nn;
+
+       n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
+       n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
+
+       /* Allocate and initialise the netdev */
+       nn = nfp_net_netdev_alloc(pf->pdev, n_tx_rings, n_rx_rings);
+       if (IS_ERR(nn))
+               return nn;
+
+       nn->fw_ver = *fw_ver;
+       nn->ctrl_bar = ctrl_bar;
+       nn->tx_bar = tx_bar;
+       nn->rx_bar = rx_bar;
+       nn->is_vf = 0;
+       nn->stride_rx = stride;
+       nn->stride_tx = stride;
+
+       return nn;
+}
+
+static int
+nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
+                           unsigned int id)
+{
+       int err;
+
+       /* Get MAC address */
+       nfp_net_get_mac_addr(nn, pf, id);
+
+       /* Get ME clock frequency from ctrl BAR
+        * XXX for now frequency is hardcoded until we figure out how
+        * to get the value from nfp-hwinfo into ctrl bar
+        */
+       nn->me_freq_mhz = 1200;
+
+       err = nfp_net_netdev_init(nn->netdev);
+       if (err)
+               return err;
+
+       nfp_net_debugfs_port_add(nn, pf->ddir, id);
+
+       nfp_net_info(nn);
+
+       return 0;
+}
+
+static int
+nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar,
+                        void __iomem *tx_bar, void __iomem *rx_bar,
+                        int stride, struct nfp_net_fw_version *fw_ver)
+{
+       u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
+       struct nfp_net *nn;
+       unsigned int i;
+       int err;
+
+       prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
+       prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
+
+       for (i = 0; i < pf->num_ports; i++) {
+               tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
+               tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
+               tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
+               rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ;
+               prev_tx_base = tgt_tx_base;
+               prev_rx_base = tgt_rx_base;
+
+               nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, rx_bar,
+                                                 stride, fw_ver);
+               if (IS_ERR(nn)) {
+                       err = PTR_ERR(nn);
+                       goto err_free_prev;
+               }
+               list_add_tail(&nn->port_list, &pf->ports);
+
+               ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
+       }
+
+       return 0;
+
+err_free_prev:
+       nfp_net_pf_free_netdevs(pf);
+       return err;
+}
+
+static int
+nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
+                        void __iomem *ctrl_bar, void __iomem *tx_bar,
+                        void __iomem *rx_bar, int stride,
+                        struct nfp_net_fw_version *fw_ver)
+{
+       unsigned int id, wanted_irqs, num_irqs, ports_left, irqs_left;
+       struct nfp_net *nn;
+       int err;
+
+       /* Allocate the netdevs and do basic init */
+       err = nfp_net_pf_alloc_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
+                                      stride, fw_ver);
+       if (err)
+               return err;
+
+       /* Get MSI-X vectors */
+       wanted_irqs = 0;
+       list_for_each_entry(nn, &pf->ports, port_list)
+               wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs;
+       pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
+                                 GFP_KERNEL);
+       if (!pf->irq_entries) {
+               err = -ENOMEM;
+               goto err_nn_free;
+       }
+
+       num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
+                                     NFP_NET_MIN_PORT_IRQS * pf->num_ports,
+                                     wanted_irqs);
+       if (!num_irqs) {
+               nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
+               err = -ENOMEM;
+               goto err_vec_free;
+       }
+
+       /* Distribute IRQs to ports */
+       irqs_left = num_irqs;
+       ports_left = pf->num_ports;
+       list_for_each_entry(nn, &pf->ports, port_list) {
+               unsigned int n;
+
+               n = DIV_ROUND_UP(irqs_left, ports_left);
+               nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
+                                   n);
+               irqs_left -= n;
+               ports_left--;
+       }
+
+       /* Finish netdev init and register */
+       id = 0;
+       list_for_each_entry(nn, &pf->ports, port_list) {
+               err = nfp_net_pf_init_port_netdev(pf, nn, id);
+               if (err)
+                       goto err_prev_deinit;
+
+               id++;
+       }
+
+       return 0;
+
+err_prev_deinit:
+       list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
+               nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+               nfp_net_netdev_clean(nn->netdev);
+       }
+       nfp_net_irqs_disable(pf->pdev);
+err_vec_free:
+       kfree(pf->irq_entries);
+err_nn_free:
+       nfp_net_pf_free_netdevs(pf);
+       return err;
+}
+
+/*
+ * PCI device functions
+ */
+int nfp_net_pci_probe(struct nfp_pf *pf)
+{
+       u8 __iomem *ctrl_bar, *tx_bar, *rx_bar;
+       u32 total_tx_qcs, total_rx_qcs;
+       struct nfp_net_fw_version fw_ver;
+       u32 tx_area_sz, rx_area_sz;
+       u32 start_q;
+       int stride;
+       int err;
+
+       /* Verify that the board has completed initialization */
+       if (!nfp_is_ready(pf->cpp)) {
+               nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
+               return -EINVAL;
+       }
+
+       pf->num_ports = nfp_net_pf_get_num_ports(pf);
+
+       ctrl_bar = nfp_net_pf_map_ctrl_bar(pf);
+       if (!ctrl_bar)
+               return pf->fw_loaded ? -EINVAL : -EPROBE_DEFER;
+
+       nfp_net_get_fw_version(&fw_ver, ctrl_bar);
+       if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+               nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
+                       fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
+               err = -EINVAL;
+               goto err_ctrl_unmap;
+       }
+
+       /* Determine stride */
+       if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
+               stride = 2;
+               nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
+       } else {
+               switch (fw_ver.major) {
+               case 1 ... 4:
+                       stride = 4;
+                       break;
+               default:
+                       nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
+                               fw_ver.resv, fw_ver.class,
+                               fw_ver.major, fw_ver.minor);
+                       err = -EINVAL;
+                       goto err_ctrl_unmap;
+               }
+       }
+
+       /* Find how many QC structs need to be mapped */
+       total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
+                                           NFP_NET_CFG_START_TXQ,
+                                           NFP_NET_CFG_MAX_TXRINGS);
+       total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
+                                           NFP_NET_CFG_START_RXQ,
+                                           NFP_NET_CFG_MAX_RXRINGS);
+       if (!total_tx_qcs || !total_rx_qcs) {
+               nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n",
+                       total_tx_qcs, total_rx_qcs);
+               err = -EINVAL;
+               goto err_ctrl_unmap;
+       }
+
+       tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs;
+       rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs;
+
+       /* Map TX queues */
+       start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
+       tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0,
+                                 NFP_PCIE_QUEUE(start_q),
+                                 tx_area_sz, &pf->tx_area);
+       if (IS_ERR(tx_bar)) {
+               nfp_err(pf->cpp, "Failed to map TX area.\n");
+               err = PTR_ERR(tx_bar);
+               goto err_ctrl_unmap;
+       }
+
+       /* Map RX queues */
+       start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
+       rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0,
+                                 NFP_PCIE_QUEUE(start_q),
+                                 rx_area_sz, &pf->rx_area);
+       if (IS_ERR(rx_bar)) {
+               nfp_err(pf->cpp, "Failed to map RX area.\n");
+               err = PTR_ERR(rx_bar);
+               goto err_unmap_tx;
+       }
+
+       pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
+
+       err = nfp_net_pf_spawn_netdevs(pf, ctrl_bar, tx_bar, rx_bar,
+                                      stride, &fw_ver);
+       if (err)
+               goto err_clean_ddir;
+
+       return 0;
+
+err_clean_ddir:
+       nfp_net_debugfs_dir_clean(&pf->ddir);
+       nfp_cpp_area_release_free(pf->rx_area);
+err_unmap_tx:
+       nfp_cpp_area_release_free(pf->tx_area);
+err_ctrl_unmap:
+       nfp_cpp_area_release_free(pf->ctrl_area);
+       return err;
+}
+
+void nfp_net_pci_remove(struct nfp_pf *pf)
+{
+       struct nfp_net *nn;
+
+       list_for_each_entry(nn, &pf->ports, port_list) {
+               nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+
+               nfp_net_netdev_clean(nn->netdev);
+       }
+
+       nfp_net_pf_free_netdevs(pf);
+
+       nfp_net_debugfs_dir_clean(&pf->ddir);
+
+       nfp_net_irqs_disable(pf->pdev);
+       kfree(pf->irq_entries);
+
+       nfp_cpp_area_release_free(pf->rx_area);
+       nfp_cpp_area_release_free(pf->tx_area);
+       nfp_cpp_area_release_free(pf->ctrl_area);
+}
index d065235034d484e8e33160eb1cbf8f3b6b5d3a04..39407f7cc586c948319b963aae3fb455a7a6cb8f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2015 Netronome Systems, Inc.
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
  *
  * This software is dual licensed under the GNU General License Version 2,
  * June 1991 as shown in the file COPYING in the top-level directory of this
 
 #include "nfp_net_ctrl.h"
 #include "nfp_net.h"
+#include "nfp_main.h"
+
+/**
+ * struct nfp_net_vf - NFP VF-specific device structure
+ * @nn:                NFP Net structure for this device
+ * @irq_entries: Pre-allocated array of MSI-X entries
+ * @q_bar:     Pointer to mapped QC memory (NULL if TX/RX mapped directly)
+ * @ddir:      Per-device debugfs directory
+ */
+struct nfp_net_vf {
+       struct nfp_net *nn;
+
+       struct msix_entry irq_entries[NFP_NET_NON_Q_VECTORS +
+                                     NFP_NET_MAX_TX_RINGS];
+       u8 __iomem *q_bar;
+
+       struct dentry *ddir;
+};
+
+static const char nfp_net_driver_name[] = "nfp_netvf";
 
-const char nfp_net_driver_name[] = "nfp_netvf";
-const char nfp_net_driver_version[] = "0.1";
 #define PCI_DEVICE_NFP6000VF           0x6003
 static const struct pci_device_id nfp_netvf_pci_device_ids[] = {
        { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_NFP6000VF,
@@ -82,15 +100,22 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
        u32 tx_bar_off, rx_bar_off;
        u32 tx_bar_sz, rx_bar_sz;
        int tx_bar_no, rx_bar_no;
+       struct nfp_net_vf *vf;
+       unsigned int num_irqs;
        u8 __iomem *ctrl_bar;
        struct nfp_net *nn;
        u32 startq;
        int stride;
        int err;
 
+       vf = kzalloc(sizeof(*vf), GFP_KERNEL);
+       if (!vf)
+               return -ENOMEM;
+       pci_set_drvdata(pdev, vf);
+
        err = pci_enable_device_mem(pdev);
        if (err)
-               return err;
+               goto err_free_vf;
 
        err = pci_request_regions(pdev, nfp_net_driver_name);
        if (err) {
@@ -182,6 +207,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
                err = PTR_ERR(nn);
                goto err_ctrl_unmap;
        }
+       vf->nn = nn;
 
        nn->fw_ver = fw_ver;
        nn->ctrl_bar = ctrl_bar;
@@ -205,17 +231,17 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
                        bar_sz = (rx_bar_off + rx_bar_sz) - bar_off;
 
                map_addr = pci_resource_start(pdev, tx_bar_no) + bar_off;
-               nn->q_bar = ioremap_nocache(map_addr, bar_sz);
-               if (!nn->q_bar) {
+               vf->q_bar = ioremap_nocache(map_addr, bar_sz);
+               if (!vf->q_bar) {
                        nn_err(nn, "Failed to map resource %d\n", tx_bar_no);
                        err = -EIO;
                        goto err_netdev_free;
                }
 
                /* TX queues */
-               nn->tx_bar = nn->q_bar + (tx_bar_off - bar_off);
+               nn->tx_bar = vf->q_bar + (tx_bar_off - bar_off);
                /* RX queues */
-               nn->rx_bar = nn->q_bar + (rx_bar_off - bar_off);
+               nn->rx_bar = vf->q_bar + (rx_bar_off - bar_off);
        } else {
                resource_size_t map_addr;
 
@@ -240,12 +266,15 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
 
        nfp_netvf_get_mac_addr(nn);
 
-       err = nfp_net_irqs_alloc(nn);
-       if (!err) {
+       num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
+                                     NFP_NET_MIN_PORT_IRQS,
+                                     NFP_NET_NON_Q_VECTORS + nn->num_r_vecs);
+       if (!num_irqs) {
                nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
                err = -EIO;
                goto err_unmap_rx;
        }
+       nfp_net_irqs_assign(nn, vf->irq_entries, num_irqs);
 
        /* Get ME clock frequency from ctrl BAR
         * XXX for now frequency is hardcoded until we figure out how
@@ -257,25 +286,23 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
        if (err)
                goto err_irqs_disable;
 
-       pci_set_drvdata(pdev, nn);
-
        nfp_net_info(nn);
-       nfp_net_debugfs_adapter_add(nn);
+       vf->ddir = nfp_net_debugfs_device_add(pdev);
+       nfp_net_debugfs_port_add(nn, vf->ddir, 0);
 
        return 0;
 
 err_irqs_disable:
-       nfp_net_irqs_disable(nn);
+       nfp_net_irqs_disable(pdev);
 err_unmap_rx:
-       if (!nn->q_bar)
+       if (!vf->q_bar)
                iounmap(nn->rx_bar);
 err_unmap_tx:
-       if (!nn->q_bar)
+       if (!vf->q_bar)
                iounmap(nn->tx_bar);
        else
-               iounmap(nn->q_bar);
+               iounmap(vf->q_bar);
 err_netdev_free:
-       pci_set_drvdata(pdev, NULL);
        nfp_net_netdev_free(nn);
 err_ctrl_unmap:
        iounmap(ctrl_bar);
@@ -283,71 +310,47 @@ err_pci_regions:
        pci_release_regions(pdev);
 err_pci_disable:
        pci_disable_device(pdev);
+err_free_vf:
+       pci_set_drvdata(pdev, NULL);
+       kfree(vf);
        return err;
 }
 
 static void nfp_netvf_pci_remove(struct pci_dev *pdev)
 {
-       struct nfp_net *nn = pci_get_drvdata(pdev);
+       struct nfp_net_vf *vf = pci_get_drvdata(pdev);
+       struct nfp_net *nn = vf->nn;
 
        /* Note, the order is slightly different from above as we need
         * to keep the nn pointer around till we have freed everything.
         */
-       nfp_net_debugfs_adapter_del(nn);
+       nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
+       nfp_net_debugfs_dir_clean(&vf->ddir);
 
        nfp_net_netdev_clean(nn->netdev);
 
-       nfp_net_irqs_disable(nn);
+       nfp_net_irqs_disable(pdev);
 
-       if (!nn->q_bar) {
+       if (!vf->q_bar) {
                iounmap(nn->rx_bar);
                iounmap(nn->tx_bar);
        } else {
-               iounmap(nn->q_bar);
+               iounmap(vf->q_bar);
        }
        iounmap(nn->ctrl_bar);
 
-       pci_set_drvdata(pdev, NULL);
-
        nfp_net_netdev_free(nn);
 
        pci_release_regions(pdev);
        pci_disable_device(pdev);
+
+       pci_set_drvdata(pdev, NULL);
+       kfree(vf);
 }
 
-static struct pci_driver nfp_netvf_pci_driver = {
+struct pci_driver nfp_netvf_pci_driver = {
        .name        = nfp_net_driver_name,
        .id_table    = nfp_netvf_pci_device_ids,
        .probe       = nfp_netvf_pci_probe,
        .remove      = nfp_netvf_pci_remove,
 };
-
-static int __init nfp_netvf_init(void)
-{
-       int err;
-
-       pr_info("%s: NFP VF Network driver, Copyright (C) 2014-2015 Netronome Systems\n",
-               nfp_net_driver_name);
-
-       nfp_net_debugfs_create();
-       err = pci_register_driver(&nfp_netvf_pci_driver);
-       if (err) {
-               nfp_net_debugfs_destroy();
-               return err;
-       }
-
-       return 0;
-}
-
-static void __exit nfp_netvf_exit(void)
-{
-       pci_unregister_driver(&nfp_netvf_pci_driver);
-       nfp_net_debugfs_destroy();
-}
-
-module_init(nfp_netvf_init);
-module_exit(nfp_netvf_exit);
-
-MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("NFP VF network device driver");
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h
new file mode 100644 (file)
index 0000000..6cee638
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NFP_CRC32_H
+#define NFP_CRC32_H
+
+#include <linux/kernel.h>
+#include <linux/crc32.h>
+
+/**
+ * crc32_posix_end() - Finalize POSIX CRC32 working state
+ * @crc:       Current CRC32 working state
+ * @total_len: Total length of data that was CRC32'd
+ *
+ * Return: Final POSIX CRC32 value
+ */
+static inline u32 crc32_posix_end(u32 crc, size_t total_len)
+{
+       /* Extend with the length of the string. */
+       while (total_len != 0) {
+               u8 c = total_len & 0xff;
+
+               crc = crc32_be(crc, &c, 1);
+               total_len >>= 8;
+       }
+
+       return ~crc;
+}
+
+static inline u32 crc32_posix(const void *buff, size_t len)
+{
+       return crc32_posix_end(crc32_be(0, buff, len), len);
+}
+
+#endif /* NFP_CRC32_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h
new file mode 100644 (file)
index 0000000..3d70a85
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp.h
+ * Interface for NFP device access and query functions.
+ */
+
+#ifndef __NFP_H__
+#define __NFP_H__
+
+#include <linux/device.h>
+
+#include "nfp_cpp.h"
+
+/* Implemented in nfp_hwinfo.c */
+
+const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup);
+
+/* Implemented in nfp_nsp.c */
+
+struct nfp_nsp;
+struct firmware;
+
+struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp);
+void nfp_nsp_close(struct nfp_nsp *state);
+int nfp_nsp_wait(struct nfp_nsp *state);
+int nfp_nsp_device_soft_reset(struct nfp_nsp *state);
+int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw);
+int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_write_eth_table(struct nfp_nsp *state,
+                           const void *buf, unsigned int size);
+
+/* Implemented in nfp_resource.c */
+
+#define NFP_RESOURCE_TBL_TARGET                NFP_CPP_TARGET_MU
+#define NFP_RESOURCE_TBL_BASE          0x8100000000ULL
+
+/* NFP Resource Table self-identifier */
+#define NFP_RESOURCE_TBL_NAME          "nfp.res"
+#define NFP_RESOURCE_TBL_KEY           0x00000000 /* Special key for entry 0 */
+
+/* All other keys are CRC32-POSIX of the 8-byte identification string */
+
+/* ARM/PCI vNIC Interfaces 0..3 */
+#define NFP_RESOURCE_VNIC_PCI_0                "vnic.p0"
+#define NFP_RESOURCE_VNIC_PCI_1                "vnic.p1"
+#define NFP_RESOURCE_VNIC_PCI_2                "vnic.p2"
+#define NFP_RESOURCE_VNIC_PCI_3                "vnic.p3"
+
+/* NFP Hardware Info Database */
+#define NFP_RESOURCE_NFP_HWINFO                "nfp.info"
+
+/* Service Processor */
+#define NFP_RESOURCE_NSP               "nfp.sp"
+
+/* Netronone Flow Firmware Table */
+#define NFP_RESOURCE_NFP_NFFW          "nfp.nffw"
+
+/* MAC Statistics Accumulator */
+#define NFP_RESOURCE_MAC_STATISTICS    "mac.stat"
+
+struct nfp_resource *
+nfp_resource_acquire(struct nfp_cpp *cpp, const char *name);
+
+void nfp_resource_release(struct nfp_resource *res);
+
+u32 nfp_resource_cpp_id(struct nfp_resource *res);
+
+const char *nfp_resource_name(struct nfp_resource *res);
+
+u64 nfp_resource_address(struct nfp_resource *res);
+
+u64 nfp_resource_size(struct nfp_resource *res);
+
+#endif /* !__NFP_H__ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h
new file mode 100644 (file)
index 0000000..0e497a6
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NFP6000_NFP6000_H
+#define NFP6000_NFP6000_H
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+/* CPP Target IDs */
+#define NFP_CPP_TARGET_INVALID          0
+#define NFP_CPP_TARGET_NBI              1
+#define NFP_CPP_TARGET_QDR              2
+#define NFP_CPP_TARGET_ILA              6
+#define NFP_CPP_TARGET_MU               7
+#define NFP_CPP_TARGET_PCIE             9
+#define NFP_CPP_TARGET_ARM              10
+#define NFP_CPP_TARGET_CRYPTO           12
+#define NFP_CPP_TARGET_ISLAND_XPB       14      /* Shared with CAP */
+#define NFP_CPP_TARGET_ISLAND_CAP       14      /* Shared with XPB */
+#define NFP_CPP_TARGET_CT_XPB           14
+#define NFP_CPP_TARGET_LOCAL_SCRATCH    15
+#define NFP_CPP_TARGET_CLS              NFP_CPP_TARGET_LOCAL_SCRATCH
+
+#define NFP_ISL_EMEM0                  24
+
+#define NFP_MU_ADDR_ACCESS_TYPE_MASK   3ULL
+#define NFP_MU_ADDR_ACCESS_TYPE_DIRECT 2ULL
+
+#define PUSHPULL(_pull, _push)         ((_pull) << 4 | (_push) << 0)
+#define PUSH_WIDTH(_pushpull)          pushpull_width((_pushpull) >> 0)
+#define PULL_WIDTH(_pushpull)          pushpull_width((_pushpull) >> 4)
+
+static inline int pushpull_width(int pp)
+{
+       pp &= 0xf;
+
+       if (pp == 0)
+               return -EINVAL;
+       return 2 << pp;
+}
+
+static inline int nfp_cppat_mu_locality_lsb(int mode, bool addr40)
+{
+       switch (mode) {
+       case 0 ... 3:
+               return addr40 ? 38 : 30;
+       default:
+               return -EINVAL;
+       }
+}
+
+int nfp_target_pushpull(u32 cpp_id, u64 address);
+int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address,
+                  u32 *cpp_target_id, u64 *cpp_target_address,
+                  const u32 *imb_table);
+
+#endif /* NFP6000_NFP6000_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h
new file mode 100644 (file)
index 0000000..40fb199
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_xpb.h
+ * Author: Jason McMullan <jason.mcmullan@netronome.com>
+ */
+
+#ifndef NFP6000_XPB_H
+#define NFP6000_XPB_H
+
+/* For use with NFP6000 Databook "XPB Addressing" section
+ */
+#define NFP_XPB_OVERLAY(island)  (((island) & 0x3f) << 24)
+
+#define NFP_XPB_ISLAND(island)   (NFP_XPB_OVERLAY(island) + 0x60000)
+
+#define NFP_XPB_ISLAND_of(offset) (((offset) >> 24) & 0x3F)
+
+/* For use with NFP6000 Databook "XPB Island and Device IDs" chapter
+ */
+#define NFP_XPB_DEVICE(island, slave, device) \
+       (NFP_XPB_OVERLAY(island) | \
+        (((slave) & 3) << 22) | \
+        (((device) & 0x3f) << 16))
+
+#endif /* NFP6000_XPB_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c
new file mode 100644 (file)
index 0000000..15cc3e7
--- /dev/null
@@ -0,0 +1,1364 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp6000_pcie.c
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ *          Jason McMullan <jason.mcmullan@netronome.com>
+ *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
+ *
+ * Multiplexes the NFP BARs between NFP internal resources and
+ * implements the PCIe specific interface for generic CPP bus access.
+ *
+ * The BARs are managed with refcounts and are allocated/acquired
+ * using target, token and offset/size matching.  The generic CPP bus
+ * abstraction builds upon this BAR interface.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/sort.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include "nfp_cpp.h"
+
+#include "nfp6000/nfp6000.h"
+
+#include "nfp6000_pcie.h"
+
+#define NFP_PCIE_BAR(_pf)      (0x30000 + ((_pf) & 7) * 0xc0)
+#define NFP_PCIE_BAR_EXPLICIT_BAR0(_x, _y) \
+       (0x00000080 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
+#define   NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(_x)     (((_x) & 0x3) << 30)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType_of(_x)  (((_x) >> 30) & 0x3)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR0_Token(_x)          (((_x) & 0x3) << 28)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR0_Token_of(_x)       (((_x) >> 28) & 0x3)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR0_Address(_x)        (((_x) & 0xffffff) << 0)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR0_Address_of(_x)     (((_x) >> 0) & 0xffffff)
+#define NFP_PCIE_BAR_EXPLICIT_BAR1(_x, _y) \
+       (0x00000084 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
+#define   NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(_x)      (((_x) & 0x7f) << 24)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef_of(_x)   (((_x) >> 24) & 0x7f)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(_x)     (((_x) & 0x3ff) << 14)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster_of(_x)  (((_x) >> 14) & 0x3ff)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(_x)        (((_x) & 0x3fff) << 0)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef_of(_x)     (((_x) >> 0) & 0x3fff)
+#define NFP_PCIE_BAR_EXPLICIT_BAR2(_x, _y) \
+       (0x00000088 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3)))
+#define   NFP_PCIE_BAR_EXPLICIT_BAR2_Target(_x)         (((_x) & 0xf) << 28)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR2_Target_of(_x)      (((_x) >> 28) & 0xf)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR2_Action(_x)         (((_x) & 0x1f) << 23)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR2_Action_of(_x)      (((_x) >> 23) & 0x1f)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR2_Length(_x)         (((_x) & 0x1f) << 18)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR2_Length_of(_x)      (((_x) >> 18) & 0x1f)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(_x)       (((_x) & 0xff) << 10)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask_of(_x)    (((_x) >> 10) & 0xff)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(_x)   (((_x) & 0x3ff) << 0)
+#define   NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster_of(_x) (((_x) >> 0) & 0x3ff)
+
+#define   NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(_x)  (((_x) & 0x1f) << 16)
+#define   NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(_x) (((_x) >> 16) & 0x1f)
+#define   NFP_PCIE_BAR_PCIE2CPP_BaseAddress(_x)         (((_x) & 0xffff) << 0)
+#define   NFP_PCIE_BAR_PCIE2CPP_BaseAddress_of(_x)      (((_x) >> 0) & 0xffff)
+#define   NFP_PCIE_BAR_PCIE2CPP_LengthSelect(_x)        (((_x) & 0x3) << 27)
+#define   NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(_x)     (((_x) >> 27) & 0x3)
+#define     NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT    0
+#define     NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT    1
+#define     NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE    3
+#define   NFP_PCIE_BAR_PCIE2CPP_MapType(_x)             (((_x) & 0x7) << 29)
+#define   NFP_PCIE_BAR_PCIE2CPP_MapType_of(_x)          (((_x) >> 29) & 0x7)
+#define     NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED         0
+#define     NFP_PCIE_BAR_PCIE2CPP_MapType_BULK          1
+#define     NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET        2
+#define     NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL       3
+#define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0     4
+#define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1     5
+#define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2     6
+#define     NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3     7
+#define   NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(_x)  (((_x) & 0xf) << 23)
+#define   NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(_x) (((_x) >> 23) & 0xf)
+#define   NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(_x)   (((_x) & 0x3) << 21)
+#define   NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(_x) (((_x) >> 21) & 0x3)
+#define NFP_PCIE_EM                                     0x020000
+#define NFP_PCIE_SRAM                                   0x000000
+
+#define NFP_PCIE_P2C_FIXED_SIZE(bar)               (1 << (bar)->bitsize)
+#define NFP_PCIE_P2C_BULK_SIZE(bar)                (1 << (bar)->bitsize)
+#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2))
+#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4))
+#define NFP_PCIE_P2C_GENERAL_SIZE(bar)             (1 << ((bar)->bitsize - 4))
+
+#define NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
+       (0x400 + ((bar) * 8 + (slot)) * 4)
+
+#define NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(bar, slot) \
+       (((bar) * 8 + (slot)) * 4)
+
+/* The number of explicit BARs to reserve.
+ * Minimum is 0, maximum is 4 on the NFP6000.
+ */
+#define NFP_PCIE_EXPLICIT_BARS         2
+
+struct nfp6000_pcie;
+struct nfp6000_area_priv;
+
+/**
+ * struct nfp_bar - describes BAR configuration and usage
+ * @nfp:       backlink to owner
+ * @barcfg:    cached contents of BAR config CSR
+ * @base:      the BAR's base CPP offset
+ * @mask:       mask for the BAR aperture (read only)
+ * @bitsize:   bitsize of BAR aperture (read only)
+ * @index:     index of the BAR
+ * @refcnt:    number of current users
+ * @iomem:     mapped IO memory
+ * @resource:  iomem resource window
+ */
+struct nfp_bar {
+       struct nfp6000_pcie *nfp;
+       u32 barcfg;
+       u64 base;          /* CPP address base */
+       u64 mask;          /* Bit mask of the bar */
+       u32 bitsize;       /* Bit size of the bar */
+       int index;
+       atomic_t refcnt;
+
+       void __iomem *iomem;
+       struct resource *resource;
+};
+
+#define NFP_PCI_BAR_MAX    (PCI_64BIT_BAR_COUNT * 8)
+
+struct nfp6000_pcie {
+       struct pci_dev *pdev;
+       struct device *dev;
+
+       /* PCI BAR management */
+       spinlock_t bar_lock;            /* Protect the PCI2CPP BAR cache */
+       int bars;
+       struct nfp_bar bar[NFP_PCI_BAR_MAX];
+       wait_queue_head_t bar_waiters;
+
+       /* Reserved BAR access */
+       struct {
+               void __iomem *csr;
+               void __iomem *em;
+               void __iomem *expl[4];
+       } iomem;
+
+       /* Explicit IO access */
+       struct {
+               struct mutex mutex; /* Lock access to this explicit group */
+               u8 master_id;
+               u8 signal_ref;
+               void __iomem *data;
+               struct {
+                       void __iomem *addr;
+                       int bitsize;
+                       int free[4];
+               } group[4];
+       } expl;
+};
+
+static u32 nfp_bar_maptype(struct nfp_bar *bar)
+{
+       return NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
+}
+
+static resource_size_t nfp_bar_resource_len(struct nfp_bar *bar)
+{
+       return pci_resource_len(bar->nfp->pdev, (bar->index / 8) * 2) / 8;
+}
+
+static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar)
+{
+       return pci_resource_start(bar->nfp->pdev, (bar->index / 8) * 2)
+               + nfp_bar_resource_len(bar) * (bar->index & 7);
+}
+
+#define TARGET_WIDTH_32    4
+#define TARGET_WIDTH_64    8
+
+static int
+compute_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
+           u32 *bar_config, u64 *bar_base,
+           int tgt, int act, int tok, u64 offset, size_t size, int width)
+{
+       int bitsize;
+       u32 newcfg;
+
+       if (tgt >= NFP_CPP_NUM_TARGETS)
+               return -EINVAL;
+
+       switch (width) {
+       case 8:
+               newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+                       NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT);
+               break;
+       case 4:
+               newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+                       NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT);
+               break;
+       case 0:
+               newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect(
+                       NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (act != NFP_CPP_ACTION_RW && act != 0) {
+               /* Fixed CPP mapping with specific action */
+               u64 mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1);
+
+               newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
+                         NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED);
+               newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
+               newcfg |= NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(act);
+               newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
+
+               if ((offset & mask) != ((offset + size - 1) & mask))
+                       return -EINVAL;
+               offset &= mask;
+
+               bitsize = 40 - 16;
+       } else {
+               u64 mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1);
+
+               /* Bulk mapping */
+               newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType(
+                       NFP_PCIE_BAR_PCIE2CPP_MapType_BULK);
+               newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt);
+               newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok);
+
+               if ((offset & mask) != ((offset + size - 1) & mask))
+                       return -EINVAL;
+
+               offset &= mask;
+
+               bitsize = 40 - 21;
+       }
+
+       if (bar->bitsize < bitsize)
+               return -EINVAL;
+
+       newcfg |= offset >> bitsize;
+
+       if (bar_base)
+               *bar_base = offset;
+
+       if (bar_config)
+               *bar_config = newcfg;
+
+       return 0;
+}
+
+static int
+nfp6000_bar_write(struct nfp6000_pcie *nfp, struct nfp_bar *bar, u32 newcfg)
+{
+       int base, slot;
+       int xbar;
+
+       base = bar->index >> 3;
+       slot = bar->index & 7;
+
+       if (nfp->iomem.csr) {
+               xbar = NFP_PCIE_CPP_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
+               writel(newcfg, nfp->iomem.csr + xbar);
+               /* Readback to ensure BAR is flushed */
+               readl(nfp->iomem.csr + xbar);
+       } else {
+               xbar = NFP_PCIE_CFG_BAR_PCIETOCPPEXPANSIONBAR(base, slot);
+               pci_write_config_dword(nfp->pdev, xbar, newcfg);
+       }
+
+       bar->barcfg = newcfg;
+
+       return 0;
+}
+
+static int
+reconfigure_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar,
+               int tgt, int act, int tok, u64 offset, size_t size, int width)
+{
+       u64 newbase;
+       u32 newcfg;
+       int err;
+
+       err = compute_bar(nfp, bar, &newcfg, &newbase,
+                         tgt, act, tok, offset, size, width);
+       if (err)
+               return err;
+
+       bar->base = newbase;
+
+       return nfp6000_bar_write(nfp, bar, newcfg);
+}
+
+/* Check if BAR can be used with the given parameters. */
+static int matching_bar(struct nfp_bar *bar, u32 tgt, u32 act, u32 tok,
+                       u64 offset, size_t size, int width)
+{
+       int bartgt, baract, bartok;
+       int barwidth;
+       u32 maptype;
+
+       maptype = NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg);
+       bartgt = NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(bar->barcfg);
+       bartok = NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(bar->barcfg);
+       baract = NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(bar->barcfg);
+
+       barwidth = NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(bar->barcfg);
+       switch (barwidth) {
+       case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT:
+               barwidth = 4;
+               break;
+       case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT:
+               barwidth = 8;
+               break;
+       case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE:
+               barwidth = 0;
+               break;
+       default:
+               barwidth = -1;
+               break;
+       }
+
+       switch (maptype) {
+       case NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET:
+               bartok = -1;
+               /* FALLTHROUGH */
+       case NFP_PCIE_BAR_PCIE2CPP_MapType_BULK:
+               baract = NFP_CPP_ACTION_RW;
+               if (act == 0)
+                       act = NFP_CPP_ACTION_RW;
+               /* FALLTHROUGH */
+       case NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED:
+               break;
+       default:
+               /* We don't match explicit bars through the area interface */
+               return 0;
+       }
+
+       /* Make sure to match up the width */
+       if (barwidth != width)
+               return 0;
+
+       if ((bartgt < 0 || bartgt == tgt) &&
+           (bartok < 0 || bartok == tok) &&
+           (baract == act) &&
+           bar->base <= offset &&
+           (bar->base + (1 << bar->bitsize)) >= (offset + size))
+               return 1;
+
+       /* No match */
+       return 0;
+}
+
+static int
+find_matching_bar(struct nfp6000_pcie *nfp,
+                 u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
+{
+       int n;
+
+       for (n = 0; n < nfp->bars; n++) {
+               struct nfp_bar *bar = &nfp->bar[n];
+
+               if (matching_bar(bar, tgt, act, tok, offset, size, width))
+                       return n;
+       }
+
+       return -1;
+}
+
+/* Return EAGAIN if no resource is available */
+static int
+find_unused_bar_noblock(struct nfp6000_pcie *nfp,
+                       int tgt, int act, int tok,
+                       u64 offset, size_t size, int width)
+{
+       int n, invalid = 0;
+
+       for (n = 0; n < nfp->bars; n++) {
+               struct nfp_bar *bar = &nfp->bar[n];
+               int err;
+
+               if (bar->bitsize == 0) {
+                       invalid++;
+                       continue;
+               }
+
+               if (atomic_read(&bar->refcnt) != 0)
+                       continue;
+
+               /* Just check to see if we can make it fit... */
+               err = compute_bar(nfp, bar, NULL, NULL,
+                                 tgt, act, tok, offset, size, width);
+
+               if (err < 0)
+                       invalid++;
+               else
+                       return n;
+       }
+
+       return (n == invalid) ? -EINVAL : -EAGAIN;
+}
+
+static int
+find_unused_bar_and_lock(struct nfp6000_pcie *nfp,
+                        int tgt, int act, int tok,
+                        u64 offset, size_t size, int width)
+{
+       unsigned long flags;
+       int n;
+
+       spin_lock_irqsave(&nfp->bar_lock, flags);
+
+       n = find_unused_bar_noblock(nfp, tgt, act, tok, offset, size, width);
+       if (n < 0)
+               spin_unlock_irqrestore(&nfp->bar_lock, flags);
+       else
+               __release(&nfp->bar_lock);
+
+       return n;
+}
+
+static void nfp_bar_get(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
+{
+       atomic_inc(&bar->refcnt);
+}
+
+static void nfp_bar_put(struct nfp6000_pcie *nfp, struct nfp_bar *bar)
+{
+       if (atomic_dec_and_test(&bar->refcnt))
+               wake_up_interruptible(&nfp->bar_waiters);
+}
+
+static int
+nfp_wait_for_bar(struct nfp6000_pcie *nfp, int *barnum,
+                u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width)
+{
+       return wait_event_interruptible(nfp->bar_waiters,
+               (*barnum = find_unused_bar_and_lock(nfp, tgt, act, tok,
+                                                   offset, size, width))
+                                       != -EAGAIN);
+}
+
+static int
+nfp_alloc_bar(struct nfp6000_pcie *nfp,
+             u32 tgt, u32 act, u32 tok,
+             u64 offset, size_t size, int width, int nonblocking)
+{
+       unsigned long irqflags;
+       int barnum, retval;
+
+       if (size > (1 << 24))
+               return -EINVAL;
+
+       spin_lock_irqsave(&nfp->bar_lock, irqflags);
+       barnum = find_matching_bar(nfp, tgt, act, tok, offset, size, width);
+       if (barnum >= 0) {
+               /* Found a perfect match. */
+               nfp_bar_get(nfp, &nfp->bar[barnum]);
+               spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
+               return barnum;
+       }
+
+       barnum = find_unused_bar_noblock(nfp, tgt, act, tok,
+                                        offset, size, width);
+       if (barnum < 0) {
+               if (nonblocking)
+                       goto err_nobar;
+
+               /* Wait until a BAR becomes available.  The
+                * find_unused_bar function will reclaim the bar_lock
+                * if a free BAR is found.
+                */
+               spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
+               retval = nfp_wait_for_bar(nfp, &barnum, tgt, act, tok,
+                                         offset, size, width);
+               if (retval)
+                       return retval;
+               __acquire(&nfp->bar_lock);
+       }
+
+       nfp_bar_get(nfp, &nfp->bar[barnum]);
+       retval = reconfigure_bar(nfp, &nfp->bar[barnum],
+                                tgt, act, tok, offset, size, width);
+       if (retval < 0) {
+               nfp_bar_put(nfp, &nfp->bar[barnum]);
+               barnum = retval;
+       }
+
+err_nobar:
+       spin_unlock_irqrestore(&nfp->bar_lock, irqflags);
+       return barnum;
+}
+
+static void disable_bars(struct nfp6000_pcie *nfp);
+
+static int bar_cmp(const void *aptr, const void *bptr)
+{
+       const struct nfp_bar *a = aptr, *b = bptr;
+
+       if (a->bitsize == b->bitsize)
+               return a->index - b->index;
+       else
+               return a->bitsize - b->bitsize;
+}
+
+/* Map all PCI bars and fetch the actual BAR configurations from the
+ * board.  We assume that the BAR with the PCIe config block is
+ * already mapped.
+ *
+ * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM)
+ * BAR0.1: Reserved for XPB access (for MSI-X access to PCIe PBA)
+ * BAR0.2: --
+ * BAR0.3: --
+ * BAR0.4: Reserved for Explicit 0.0-0.3 access
+ * BAR0.5: Reserved for Explicit 1.0-1.3 access
+ * BAR0.6: Reserved for Explicit 2.0-2.3 access
+ * BAR0.7: Reserved for Explicit 3.0-3.3 access
+ *
+ * BAR1.0-BAR1.7: --
+ * BAR2.0-BAR2.7: --
+ */
+static int enable_bars(struct nfp6000_pcie *nfp, u16 interface)
+{
+       const u32 barcfg_msix_general =
+               NFP_PCIE_BAR_PCIE2CPP_MapType(
+                       NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) |
+               NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT;
+       const u32 barcfg_msix_xpb =
+               NFP_PCIE_BAR_PCIE2CPP_MapType(
+                       NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) |
+               NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT |
+               NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(
+                       NFP_CPP_TARGET_ISLAND_XPB);
+       const u32 barcfg_explicit[4] = {
+               NFP_PCIE_BAR_PCIE2CPP_MapType(
+                       NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0),
+               NFP_PCIE_BAR_PCIE2CPP_MapType(
+                       NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1),
+               NFP_PCIE_BAR_PCIE2CPP_MapType(
+                       NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2),
+               NFP_PCIE_BAR_PCIE2CPP_MapType(
+                       NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3),
+       };
+       struct nfp_bar *bar;
+       int i, bars_free;
+       int expl_groups;
+
+       bar = &nfp->bar[0];
+       for (i = 0; i < ARRAY_SIZE(nfp->bar); i++, bar++) {
+               struct resource *res;
+
+               res = &nfp->pdev->resource[(i >> 3) * 2];
+
+               /* Skip over BARs that are not IORESOURCE_MEM */
+               if (!(resource_type(res) & IORESOURCE_MEM)) {
+                       bar--;
+                       continue;
+               }
+
+               bar->resource = res;
+               bar->barcfg = 0;
+
+               bar->nfp = nfp;
+               bar->index = i;
+               bar->mask = nfp_bar_resource_len(bar) - 1;
+               bar->bitsize = fls(bar->mask);
+               bar->base = 0;
+               bar->iomem = NULL;
+       }
+
+       nfp->bars = bar - &nfp->bar[0];
+       if (nfp->bars < 8) {
+               dev_err(nfp->dev, "No usable BARs found!\n");
+               return -EINVAL;
+       }
+
+       bars_free = nfp->bars;
+
+       /* Convert unit ID (0..3) to signal master/data master ID (0x40..0x70)
+        */
+       mutex_init(&nfp->expl.mutex);
+
+       nfp->expl.master_id = ((NFP_CPP_INTERFACE_UNIT_of(interface) & 3) + 4)
+               << 4;
+       nfp->expl.signal_ref = 0x10;
+
+       /* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */
+       bar = &nfp->bar[0];
+       bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+                                    nfp_bar_resource_len(bar));
+       if (bar->iomem) {
+               dev_info(nfp->dev,
+                        "BAR0.0 RESERVED: General Mapping/MSI-X SRAM\n");
+               atomic_inc(&bar->refcnt);
+               bars_free--;
+
+               nfp6000_bar_write(nfp, bar, barcfg_msix_general);
+
+               nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000;
+       }
+
+       if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 ||
+           nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) {
+               nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0);
+               expl_groups = 4;
+       } else {
+               int pf = nfp->pdev->devfn & 7;
+
+               nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf);
+               expl_groups = 1;
+       }
+       nfp->iomem.em = bar->iomem + NFP_PCIE_EM;
+
+       /* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */
+       bar = &nfp->bar[1];
+       dev_info(nfp->dev, "BAR0.1 RESERVED: PCIe XPB/MSI-X PBA\n");
+       atomic_inc(&bar->refcnt);
+       bars_free--;
+
+       nfp6000_bar_write(nfp, bar, barcfg_msix_xpb);
+
+       /* Use BAR0.4..BAR0.7 for EXPL IO */
+       for (i = 0; i < 4; i++) {
+               int j;
+
+               if (i >= NFP_PCIE_EXPLICIT_BARS || i >= expl_groups) {
+                       nfp->expl.group[i].bitsize = 0;
+                       continue;
+               }
+
+               bar = &nfp->bar[4 + i];
+               bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar),
+                                            nfp_bar_resource_len(bar));
+               if (bar->iomem) {
+                       dev_info(nfp->dev,
+                                "BAR0.%d RESERVED: Explicit%d Mapping\n",
+                                4 + i, i);
+                       atomic_inc(&bar->refcnt);
+                       bars_free--;
+
+                       nfp->expl.group[i].bitsize = bar->bitsize;
+                       nfp->expl.group[i].addr = bar->iomem;
+                       nfp6000_bar_write(nfp, bar, barcfg_explicit[i]);
+
+                       for (j = 0; j < 4; j++)
+                               nfp->expl.group[i].free[j] = true;
+               }
+               nfp->iomem.expl[i] = bar->iomem;
+       }
+
+       /* Sort bars by bit size - use the smallest possible first. */
+       sort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]),
+            bar_cmp, NULL);
+
+       dev_info(nfp->dev, "%d NFP PCI2CPP BARs, %d free\n",
+                nfp->bars, bars_free);
+
+       return 0;
+}
+
+static void disable_bars(struct nfp6000_pcie *nfp)
+{
+       struct nfp_bar *bar = &nfp->bar[0];
+       int n;
+
+       for (n = 0; n < nfp->bars; n++, bar++) {
+               if (bar->iomem) {
+                       iounmap(bar->iomem);
+                       bar->iomem = NULL;
+               }
+       }
+}
+
+/*
+ * Generic CPP bus access interface.
+ */
+
+struct nfp6000_area_priv {
+       atomic_t refcnt;
+
+       struct nfp_bar *bar;
+       u32 bar_offset;
+
+       u32 target;
+       u32 action;
+       u32 token;
+       u64 offset;
+       struct {
+               int read;
+               int write;
+               int bar;
+       } width;
+       size_t size;
+
+       void __iomem *iomem;
+       phys_addr_t phys;
+       struct resource resource;
+};
+
+static int nfp6000_area_init(struct nfp_cpp_area *area, u32 dest,
+                            unsigned long long address, unsigned long size)
+{
+       struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+       u32 target = NFP_CPP_ID_TARGET_of(dest);
+       u32 action = NFP_CPP_ID_ACTION_of(dest);
+       u32 token = NFP_CPP_ID_TOKEN_of(dest);
+       int pp;
+
+       pp = nfp_target_pushpull(NFP_CPP_ID(target, action, token), address);
+       if (pp < 0)
+               return pp;
+
+       priv->width.read = PUSH_WIDTH(pp);
+       priv->width.write = PULL_WIDTH(pp);
+       if (priv->width.read > 0 &&
+           priv->width.write > 0 &&
+           priv->width.read != priv->width.write) {
+               return -EINVAL;
+       }
+
+       if (priv->width.read > 0)
+               priv->width.bar = priv->width.read;
+       else
+               priv->width.bar = priv->width.write;
+
+       atomic_set(&priv->refcnt, 0);
+       priv->bar = NULL;
+
+       priv->target = target;
+       priv->action = action;
+       priv->token = token;
+       priv->offset = address;
+       priv->size = size;
+       memset(&priv->resource, 0, sizeof(priv->resource));
+
+       return 0;
+}
+
+static void nfp6000_area_cleanup(struct nfp_cpp_area *area)
+{
+}
+
+static void priv_area_get(struct nfp_cpp_area *area)
+{
+       struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+
+       atomic_inc(&priv->refcnt);
+}
+
+static int priv_area_put(struct nfp_cpp_area *area)
+{
+       struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+
+       if (WARN_ON(!atomic_read(&priv->refcnt)))
+               return 0;
+
+       return atomic_dec_and_test(&priv->refcnt);
+}
+
+static int nfp6000_area_acquire(struct nfp_cpp_area *area)
+{
+       struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
+       struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+       int barnum, err;
+
+       if (priv->bar) {
+               /* Already allocated. */
+               priv_area_get(area);
+               return 0;
+       }
+
+       barnum = nfp_alloc_bar(nfp, priv->target, priv->action, priv->token,
+                              priv->offset, priv->size, priv->width.bar, 1);
+
+       if (barnum < 0) {
+               err = barnum;
+               goto err_alloc_bar;
+       }
+       priv->bar = &nfp->bar[barnum];
+
+       /* Calculate offset into BAR. */
+       if (nfp_bar_maptype(priv->bar) ==
+           NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) {
+               priv->bar_offset = priv->offset &
+                       (NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1);
+               priv->bar_offset += NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(
+                       priv->bar, priv->target);
+               priv->bar_offset += NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(
+                       priv->bar, priv->token);
+       } else {
+               priv->bar_offset = priv->offset & priv->bar->mask;
+       }
+
+       /* We don't actually try to acquire the resource area using
+        * request_resource.  This would prevent sharing the mapped
+        * BAR between multiple CPP areas and prevent us from
+        * effectively utilizing the limited amount of BAR resources.
+        */
+       priv->phys = nfp_bar_resource_start(priv->bar) + priv->bar_offset;
+       priv->resource.name = nfp_cpp_area_name(area);
+       priv->resource.start = priv->phys;
+       priv->resource.end = priv->resource.start + priv->size - 1;
+       priv->resource.flags = IORESOURCE_MEM;
+
+       /* If the bar is already mapped in, use its mapping */
+       if (priv->bar->iomem)
+               priv->iomem = priv->bar->iomem + priv->bar_offset;
+       else
+               /* Must have been too big. Sub-allocate. */
+               priv->iomem = ioremap_nocache(priv->phys, priv->size);
+
+       if (IS_ERR_OR_NULL(priv->iomem)) {
+               dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n",
+                       (int)priv->size, priv->bar->index);
+               err = !priv->iomem ? -ENOMEM : PTR_ERR(priv->iomem);
+               priv->iomem = NULL;
+               goto err_iomem_remap;
+       }
+
+       priv_area_get(area);
+       return 0;
+
+err_iomem_remap:
+       nfp_bar_put(nfp, priv->bar);
+       priv->bar = NULL;
+err_alloc_bar:
+       return err;
+}
+
+static void nfp6000_area_release(struct nfp_cpp_area *area)
+{
+       struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area));
+       struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+
+       if (!priv_area_put(area))
+               return;
+
+       if (!priv->bar->iomem)
+               iounmap(priv->iomem);
+
+       nfp_bar_put(nfp, priv->bar);
+
+       priv->bar = NULL;
+       priv->iomem = NULL;
+}
+
+static phys_addr_t nfp6000_area_phys(struct nfp_cpp_area *area)
+{
+       struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+
+       return priv->phys;
+}
+
+static void __iomem *nfp6000_area_iomem(struct nfp_cpp_area *area)
+{
+       struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+
+       return priv->iomem;
+}
+
+static struct resource *nfp6000_area_resource(struct nfp_cpp_area *area)
+{
+       /* Use the BAR resource as the resource for the CPP area.
+        * This enables us to share the BAR among multiple CPP areas
+        * without resource conflicts.
+        */
+       struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area);
+
+       return priv->bar->resource;
+}
+
+static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr,
+                            unsigned long offset, unsigned int length)
+{
+       u64 __maybe_unused *wrptr64 = kernel_vaddr;
+       const u64 __iomem __maybe_unused *rdptr64;
+       struct nfp6000_area_priv *priv;
+       u32 *wrptr32 = kernel_vaddr;
+       const u32 __iomem *rdptr32;
+       int n, width;
+       bool is_64;
+
+       priv = nfp_cpp_area_priv(area);
+       rdptr64 = priv->iomem + offset;
+       rdptr32 = priv->iomem + offset;
+
+       if (offset + length > priv->size)
+               return -EFAULT;
+
+       width = priv->width.read;
+
+       if (width <= 0)
+               return -EINVAL;
+
+       /* Unaligned? Translate to an explicit access */
+       if ((priv->offset + offset) & (width - 1))
+               return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area),
+                                            NFP_CPP_ID(priv->target,
+                                                       priv->action,
+                                                       priv->token),
+                                            priv->offset + offset,
+                                            kernel_vaddr, length, width);
+
+       is_64 = width == TARGET_WIDTH_64;
+
+       /* MU reads via a PCIe2CPP BAR supports 32bit (and other) lengths */
+       if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
+           priv->action == NFP_CPP_ACTION_RW)
+               is_64 = false;
+
+       if (is_64) {
+               if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
+                       return -EINVAL;
+       } else {
+               if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
+                       return -EINVAL;
+       }
+
+       if (WARN_ON(!priv->bar))
+               return -EFAULT;
+
+       if (is_64)
+#ifndef __raw_readq
+               return -EINVAL;
+#else
+               for (n = 0; n < length; n += sizeof(u64))
+                       *wrptr64++ = __raw_readq(rdptr64++);
+#endif
+       else
+               for (n = 0; n < length; n += sizeof(u32))
+                       *wrptr32++ = __raw_readl(rdptr32++);
+
+       return n;
+}
+
+static int
+nfp6000_area_write(struct nfp_cpp_area *area,
+                  const void *kernel_vaddr,
+                  unsigned long offset, unsigned int length)
+{
+       const u64 __maybe_unused *rdptr64 = kernel_vaddr;
+       u64 __iomem __maybe_unused *wrptr64;
+       const u32 *rdptr32 = kernel_vaddr;
+       struct nfp6000_area_priv *priv;
+       u32 __iomem *wrptr32;
+       int n, width;
+       bool is_64;
+
+       priv = nfp_cpp_area_priv(area);
+       wrptr64 = priv->iomem + offset;
+       wrptr32 = priv->iomem + offset;
+
+       if (offset + length > priv->size)
+               return -EFAULT;
+
+       width = priv->width.write;
+
+       if (width <= 0)
+               return -EINVAL;
+
+       /* Unaligned? Translate to an explicit access */
+       if ((priv->offset + offset) & (width - 1))
+               return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area),
+                                             NFP_CPP_ID(priv->target,
+                                                        priv->action,
+                                                        priv->token),
+                                             priv->offset + offset,
+                                             kernel_vaddr, length, width);
+
+       is_64 = width == TARGET_WIDTH_64;
+
+       /* MU writes via a PCIe2CPP BAR supports 32bit (and other) lengths */
+       if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) &&
+           priv->action == NFP_CPP_ACTION_RW)
+               is_64 = false;
+
+       if (is_64) {
+               if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0)
+                       return -EINVAL;
+       } else {
+               if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0)
+                       return -EINVAL;
+       }
+
+       if (WARN_ON(!priv->bar))
+               return -EFAULT;
+
+       if (is_64)
+#ifndef __raw_writeq
+               return -EINVAL;
+#else
+               for (n = 0; n < length; n += sizeof(u64)) {
+                       __raw_writeq(*rdptr64++, wrptr64++);
+                       wmb();
+               }
+#endif
+       else
+               for (n = 0; n < length; n += sizeof(u32)) {
+                       __raw_writel(*rdptr32++, wrptr32++);
+                       wmb();
+               }
+
+       return n;
+}
+
+struct nfp6000_explicit_priv {
+       struct nfp6000_pcie *nfp;
+       struct {
+               int group;
+               int area;
+       } bar;
+       int bitsize;
+       void __iomem *data;
+       void __iomem *addr;
+};
+
+static int nfp6000_explicit_acquire(struct nfp_cpp_explicit *expl)
+{
+       struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_explicit_cpp(expl));
+       struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
+       int i, j;
+
+       mutex_lock(&nfp->expl.mutex);
+       for (i = 0; i < ARRAY_SIZE(nfp->expl.group); i++) {
+               if (!nfp->expl.group[i].bitsize)
+                       continue;
+
+               for (j = 0; j < ARRAY_SIZE(nfp->expl.group[i].free); j++) {
+                       u16 data_offset;
+
+                       if (!nfp->expl.group[i].free[j])
+                               continue;
+
+                       priv->nfp = nfp;
+                       priv->bar.group = i;
+                       priv->bar.area = j;
+                       priv->bitsize = nfp->expl.group[i].bitsize - 2;
+
+                       data_offset = (priv->bar.group << 9) +
+                               (priv->bar.area << 7);
+                       priv->data = nfp->expl.data + data_offset;
+                       priv->addr = nfp->expl.group[i].addr +
+                               (priv->bar.area << priv->bitsize);
+                       nfp->expl.group[i].free[j] = false;
+
+                       mutex_unlock(&nfp->expl.mutex);
+                       return 0;
+               }
+       }
+       mutex_unlock(&nfp->expl.mutex);
+
+       return -EAGAIN;
+}
+
+static void nfp6000_explicit_release(struct nfp_cpp_explicit *expl)
+{
+       struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
+       struct nfp6000_pcie *nfp = priv->nfp;
+
+       mutex_lock(&nfp->expl.mutex);
+       nfp->expl.group[priv->bar.group].free[priv->bar.area] = true;
+       mutex_unlock(&nfp->expl.mutex);
+}
+
+static int nfp6000_explicit_put(struct nfp_cpp_explicit *expl,
+                               const void *buff, size_t len)
+{
+       struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
+       const u32 *src = buff;
+       size_t i;
+
+       for (i = 0; i < len; i += sizeof(u32))
+               writel(*(src++), priv->data + i);
+
+       return i;
+}
+
+static int
+nfp6000_explicit_do(struct nfp_cpp_explicit *expl,
+                   const struct nfp_cpp_explicit_command *cmd, u64 address)
+{
+       struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
+       u8 signal_master, signal_ref, data_master;
+       struct nfp6000_pcie *nfp = priv->nfp;
+       int sigmask = 0;
+       u16 data_ref;
+       u32 csr[3];
+
+       if (cmd->siga_mode)
+               sigmask |= 1 << cmd->siga;
+       if (cmd->sigb_mode)
+               sigmask |= 1 << cmd->sigb;
+
+       signal_master = cmd->signal_master;
+       if (!signal_master)
+               signal_master = nfp->expl.master_id;
+
+       signal_ref = cmd->signal_ref;
+       if (signal_master == nfp->expl.master_id)
+               signal_ref = nfp->expl.signal_ref +
+                       ((priv->bar.group * 4 + priv->bar.area) << 1);
+
+       data_master = cmd->data_master;
+       if (!data_master)
+               data_master = nfp->expl.master_id;
+
+       data_ref = cmd->data_ref;
+       if (data_master == nfp->expl.master_id)
+               data_ref = 0x1000 +
+                       (priv->bar.group << 9) + (priv->bar.area << 7);
+
+       csr[0] = NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(sigmask) |
+               NFP_PCIE_BAR_EXPLICIT_BAR0_Token(
+                       NFP_CPP_ID_TOKEN_of(cmd->cpp_id)) |
+               NFP_PCIE_BAR_EXPLICIT_BAR0_Address(address >> 16);
+
+       csr[1] = NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(signal_ref) |
+               NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(data_master) |
+               NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(data_ref);
+
+       csr[2] = NFP_PCIE_BAR_EXPLICIT_BAR2_Target(
+                       NFP_CPP_ID_TARGET_of(cmd->cpp_id)) |
+               NFP_PCIE_BAR_EXPLICIT_BAR2_Action(
+                       NFP_CPP_ID_ACTION_of(cmd->cpp_id)) |
+               NFP_PCIE_BAR_EXPLICIT_BAR2_Length(cmd->len) |
+               NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(cmd->byte_mask) |
+               NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(signal_master);
+
+       if (nfp->iomem.csr) {
+               writel(csr[0], nfp->iomem.csr +
+                      NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
+                                                 priv->bar.area));
+               writel(csr[1], nfp->iomem.csr +
+                      NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
+                                                 priv->bar.area));
+               writel(csr[2], nfp->iomem.csr +
+                      NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
+                                                 priv->bar.area));
+               /* Readback to ensure BAR is flushed */
+               readl(nfp->iomem.csr +
+                     NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group,
+                                                priv->bar.area));
+               readl(nfp->iomem.csr +
+                     NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group,
+                                                priv->bar.area));
+               readl(nfp->iomem.csr +
+                     NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group,
+                                                priv->bar.area));
+       } else {
+               pci_write_config_dword(nfp->pdev, 0x400 +
+                                      NFP_PCIE_BAR_EXPLICIT_BAR0(
+                                              priv->bar.group, priv->bar.area),
+                                      csr[0]);
+
+               pci_write_config_dword(nfp->pdev, 0x400 +
+                                      NFP_PCIE_BAR_EXPLICIT_BAR1(
+                                              priv->bar.group, priv->bar.area),
+                                      csr[1]);
+
+               pci_write_config_dword(nfp->pdev, 0x400 +
+                                      NFP_PCIE_BAR_EXPLICIT_BAR2(
+                                              priv->bar.group, priv->bar.area),
+                                      csr[2]);
+       }
+
+       /* Issue the 'kickoff' transaction */
+       readb(priv->addr + (address & ((1 << priv->bitsize) - 1)));
+
+       return sigmask;
+}
+
+static int nfp6000_explicit_get(struct nfp_cpp_explicit *expl,
+                               void *buff, size_t len)
+{
+       struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl);
+       u32 *dst = buff;
+       size_t i;
+
+       for (i = 0; i < len; i += sizeof(u32))
+               *(dst++) = readl(priv->data + i);
+
+       return i;
+}
+
+static int nfp6000_init(struct nfp_cpp *cpp)
+{
+       nfp_cpp_area_cache_add(cpp, SZ_64K);
+       nfp_cpp_area_cache_add(cpp, SZ_64K);
+       nfp_cpp_area_cache_add(cpp, SZ_256K);
+
+       return 0;
+}
+
+static void nfp6000_free(struct nfp_cpp *cpp)
+{
+       struct nfp6000_pcie *nfp = nfp_cpp_priv(cpp);
+
+       disable_bars(nfp);
+       kfree(nfp);
+}
+
+static void nfp6000_read_serial(struct device *dev, u8 *serial)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       int pos;
+       u32 reg;
+
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+       if (!pos) {
+               memset(serial, 0, NFP_SERIAL_LEN);
+               return;
+       }
+
+       pci_read_config_dword(pdev, pos + 4, &reg);
+       put_unaligned_be16(reg >> 16, serial + 4);
+       pci_read_config_dword(pdev, pos + 8, &reg);
+       put_unaligned_be32(reg, serial);
+}
+
+static u16 nfp6000_get_interface(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       int pos;
+       u32 reg;
+
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+       if (!pos)
+               return NFP_CPP_INTERFACE(NFP_CPP_INTERFACE_TYPE_PCI, 0, 0xff);
+
+       pci_read_config_dword(pdev, pos + 4, &reg);
+
+       return reg & 0xffff;
+}
+
+static const struct nfp_cpp_operations nfp6000_pcie_ops = {
+       .owner                  = THIS_MODULE,
+
+       .init                   = nfp6000_init,
+       .free                   = nfp6000_free,
+
+       .read_serial            = nfp6000_read_serial,
+       .get_interface          = nfp6000_get_interface,
+
+       .area_priv_size         = sizeof(struct nfp6000_area_priv),
+       .area_init              = nfp6000_area_init,
+       .area_cleanup           = nfp6000_area_cleanup,
+       .area_acquire           = nfp6000_area_acquire,
+       .area_release           = nfp6000_area_release,
+       .area_phys              = nfp6000_area_phys,
+       .area_iomem             = nfp6000_area_iomem,
+       .area_resource          = nfp6000_area_resource,
+       .area_read              = nfp6000_area_read,
+       .area_write             = nfp6000_area_write,
+
+       .explicit_priv_size     = sizeof(struct nfp6000_explicit_priv),
+       .explicit_acquire       = nfp6000_explicit_acquire,
+       .explicit_release       = nfp6000_explicit_release,
+       .explicit_put           = nfp6000_explicit_put,
+       .explicit_do            = nfp6000_explicit_do,
+       .explicit_get           = nfp6000_explicit_get,
+};
+
+/**
+ * nfp_cpp_from_nfp6000_pcie() - Build a NFP CPP bus from a NFP6000 PCI device
+ * @pdev:      NFP6000 PCI device
+ *
+ * Return: NFP CPP handle
+ */
+struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev)
+{
+       struct nfp6000_pcie *nfp;
+       u16 interface;
+       int err;
+
+       /*  Finished with card initialization. */
+       dev_info(&pdev->dev,
+                "Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n");
+
+       nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
+       if (!nfp) {
+               err = -ENOMEM;
+               goto err_ret;
+       }
+
+       nfp->dev = &pdev->dev;
+       nfp->pdev = pdev;
+       init_waitqueue_head(&nfp->bar_waiters);
+       spin_lock_init(&nfp->bar_lock);
+
+       interface = nfp6000_get_interface(&pdev->dev);
+
+       if (NFP_CPP_INTERFACE_TYPE_of(interface) !=
+           NFP_CPP_INTERFACE_TYPE_PCI) {
+               dev_err(&pdev->dev,
+                       "Interface type %d is not the expected %d\n",
+                       NFP_CPP_INTERFACE_TYPE_of(interface),
+                       NFP_CPP_INTERFACE_TYPE_PCI);
+               err = -ENODEV;
+               goto err_free_nfp;
+       }
+
+       if (NFP_CPP_INTERFACE_CHANNEL_of(interface) !=
+           NFP_CPP_INTERFACE_CHANNEL_PEROPENER) {
+               dev_err(&pdev->dev, "Interface channel %d is not the expected %d\n",
+                       NFP_CPP_INTERFACE_CHANNEL_of(interface),
+                       NFP_CPP_INTERFACE_CHANNEL_PEROPENER);
+               err = -ENODEV;
+               goto err_free_nfp;
+       }
+
+       err = enable_bars(nfp, interface);
+       if (err)
+               goto err_free_nfp;
+
+       /* Probe for all the common NFP devices */
+       return nfp_cpp_from_operations(&nfp6000_pcie_ops, &pdev->dev, nfp);
+
+err_free_nfp:
+       kfree(nfp);
+err_ret:
+       dev_err(&pdev->dev, "NFP6000 PCI setup failed\n");
+       return ERR_PTR(err);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h
new file mode 100644 (file)
index 0000000..245d8aa
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp6000_pcie.h
+ * Author: Jason McMullan <jason.mcmullan@netronome.com>
+ */
+
+#ifndef NFP6000_PCIE_H
+#define NFP6000_PCIE_H
+
+#include "nfp_cpp.h"
+
+struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev);
+
+#endif /* NFP6000_PCIE_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h
new file mode 100644 (file)
index 0000000..31fe922
--- /dev/null
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_arm.h
+ * Definitions for ARM-based registers and memory spaces
+ */
+
+#ifndef NFP_ARM_H
+#define NFP_ARM_H
+
+#define NFP_ARM_QUEUE(_q)              (0x100000 + (0x800 * ((_q) & 0xff)))
+#define NFP_ARM_IM                     0x200000
+#define NFP_ARM_EM                     0x300000
+#define NFP_ARM_GCSR                   0x400000
+#define NFP_ARM_MPCORE                 0x800000
+#define NFP_ARM_PL310                  0xa00000
+/* Register Type: BulkBARConfig */
+#define NFP_ARM_GCSR_BULK_BAR(_bar)    (0x0 + (0x4 * ((_bar) & 0x7)))
+#define   NFP_ARM_GCSR_BULK_BAR_TYPE                    (0x1 << 31)
+#define     NFP_ARM_GCSR_BULK_BAR_TYPE_BULK             (0x0)
+#define     NFP_ARM_GCSR_BULK_BAR_TYPE_EXPA             (0x80000000)
+#define   NFP_ARM_GCSR_BULK_BAR_TGT(_x)                 (((_x) & 0xf) << 27)
+#define   NFP_ARM_GCSR_BULK_BAR_TGT_of(_x)              (((_x) >> 27) & 0xf)
+#define   NFP_ARM_GCSR_BULK_BAR_TOK(_x)                 (((_x) & 0x3) << 25)
+#define   NFP_ARM_GCSR_BULK_BAR_TOK_of(_x)              (((_x) >> 25) & 0x3)
+#define   NFP_ARM_GCSR_BULK_BAR_LEN                     (0x1 << 24)
+#define     NFP_ARM_GCSR_BULK_BAR_LEN_32BIT             (0x0)
+#define     NFP_ARM_GCSR_BULK_BAR_LEN_64BIT             (0x1000000)
+#define   NFP_ARM_GCSR_BULK_BAR_ADDR(_x)                ((_x) & 0x7ff)
+#define   NFP_ARM_GCSR_BULK_BAR_ADDR_of(_x)             ((_x) & 0x7ff)
+/* Register Type: ExpansionBARConfig */
+#define NFP_ARM_GCSR_EXPA_BAR(_bar)    (0x20 + (0x4 * ((_bar) & 0xf)))
+#define   NFP_ARM_GCSR_EXPA_BAR_TYPE                    (0x1 << 31)
+#define     NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPA             (0x0)
+#define     NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPL             (0x80000000)
+#define   NFP_ARM_GCSR_EXPA_BAR_TGT(_x)                 (((_x) & 0xf) << 27)
+#define   NFP_ARM_GCSR_EXPA_BAR_TGT_of(_x)              (((_x) >> 27) & 0xf)
+#define   NFP_ARM_GCSR_EXPA_BAR_TOK(_x)                 (((_x) & 0x3) << 25)
+#define   NFP_ARM_GCSR_EXPA_BAR_TOK_of(_x)              (((_x) >> 25) & 0x3)
+#define   NFP_ARM_GCSR_EXPA_BAR_LEN                     (0x1 << 24)
+#define     NFP_ARM_GCSR_EXPA_BAR_LEN_32BIT             (0x0)
+#define     NFP_ARM_GCSR_EXPA_BAR_LEN_64BIT             (0x1000000)
+#define   NFP_ARM_GCSR_EXPA_BAR_ACT(_x)                 (((_x) & 0x1f) << 19)
+#define   NFP_ARM_GCSR_EXPA_BAR_ACT_of(_x)              (((_x) >> 19) & 0x1f)
+#define     NFP_ARM_GCSR_EXPA_BAR_ACT_DERIVED           (0)
+#define   NFP_ARM_GCSR_EXPA_BAR_ADDR(_x)                ((_x) & 0x7fff)
+#define   NFP_ARM_GCSR_EXPA_BAR_ADDR_of(_x)             ((_x) & 0x7fff)
+/* Register Type: ExplicitBARConfig0_Reg */
+#define NFP_ARM_GCSR_EXPL0_BAR(_bar)   (0x60 + (0x4 * ((_bar) & 0x7)))
+#define   NFP_ARM_GCSR_EXPL0_BAR_ADDR(_x)               ((_x) & 0x3ffff)
+#define   NFP_ARM_GCSR_EXPL0_BAR_ADDR_of(_x)            ((_x) & 0x3ffff)
+/* Register Type: ExplicitBARConfig1_Reg */
+#define NFP_ARM_GCSR_EXPL1_BAR(_bar)   (0x80 + (0x4 * ((_bar) & 0x7)))
+#define   NFP_ARM_GCSR_EXPL1_BAR_POSTED                 (0x1 << 31)
+#define   NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF(_x)         (((_x) & 0x7f) << 24)
+#define   NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF_of(_x)      (((_x) >> 24) & 0x7f)
+#define   NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER(_x)        (((_x) & 0xff) << 16)
+#define   NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER_of(_x)     (((_x) >> 16) & 0xff)
+#define   NFP_ARM_GCSR_EXPL1_BAR_DATA_REF(_x)           ((_x) & 0x3fff)
+#define   NFP_ARM_GCSR_EXPL1_BAR_DATA_REF_of(_x)        ((_x) & 0x3fff)
+/* Register Type: ExplicitBARConfig2_Reg */
+#define NFP_ARM_GCSR_EXPL2_BAR(_bar)   (0xa0 + (0x4 * ((_bar) & 0x7)))
+#define   NFP_ARM_GCSR_EXPL2_BAR_TGT(_x)                (((_x) & 0xf) << 28)
+#define   NFP_ARM_GCSR_EXPL2_BAR_TGT_of(_x)             (((_x) >> 28) & 0xf)
+#define   NFP_ARM_GCSR_EXPL2_BAR_ACT(_x)                (((_x) & 0x1f) << 23)
+#define   NFP_ARM_GCSR_EXPL2_BAR_ACT_of(_x)             (((_x) >> 23) & 0x1f)
+#define   NFP_ARM_GCSR_EXPL2_BAR_LEN(_x)                (((_x) & 0x1f) << 18)
+#define   NFP_ARM_GCSR_EXPL2_BAR_LEN_of(_x)             (((_x) >> 18) & 0x1f)
+#define   NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK(_x)          (((_x) & 0xff) << 10)
+#define   NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK_of(_x)       (((_x) >> 10) & 0xff)
+#define   NFP_ARM_GCSR_EXPL2_BAR_TOK(_x)                (((_x) & 0x3) << 8)
+#define   NFP_ARM_GCSR_EXPL2_BAR_TOK_of(_x)             (((_x) >> 8) & 0x3)
+#define   NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER(_x)      ((_x) & 0xff)
+#define   NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER_of(_x)   ((_x) & 0xff)
+/* Register Type: PostedCommandSignal */
+#define NFP_ARM_GCSR_EXPL_POST(_bar)   (0xc0 + (0x4 * ((_bar) & 0x7)))
+#define   NFP_ARM_GCSR_EXPL_POST_SIG_B(_x)              (((_x) & 0x7f) << 25)
+#define   NFP_ARM_GCSR_EXPL_POST_SIG_B_of(_x)           (((_x) >> 25) & 0x7f)
+#define   NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS              (0x1 << 24)
+#define     NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PULL       (0x0)
+#define     NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PUSH       (0x1000000)
+#define   NFP_ARM_GCSR_EXPL_POST_SIG_A(_x)              (((_x) & 0x7f) << 17)
+#define   NFP_ARM_GCSR_EXPL_POST_SIG_A_of(_x)           (((_x) >> 17) & 0x7f)
+#define   NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS              (0x1 << 16)
+#define     NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PULL       (0x0)
+#define     NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PUSH       (0x10000)
+#define   NFP_ARM_GCSR_EXPL_POST_SIG_B_RCVD             (0x1 << 7)
+#define   NFP_ARM_GCSR_EXPL_POST_SIG_B_VALID            (0x1 << 6)
+#define   NFP_ARM_GCSR_EXPL_POST_SIG_A_RCVD             (0x1 << 5)
+#define   NFP_ARM_GCSR_EXPL_POST_SIG_A_VALID            (0x1 << 4)
+#define   NFP_ARM_GCSR_EXPL_POST_CMD_COMPLETE           (0x1)
+/* Register Type: MPCoreBaseAddress */
+#define NFP_ARM_GCSR_MPCORE_BASE       0x00e0
+#define   NFP_ARM_GCSR_MPCORE_BASE_ADDR(_x)             (((_x) & 0x7ffff) << 13)
+#define   NFP_ARM_GCSR_MPCORE_BASE_ADDR_of(_x)          (((_x) >> 13) & 0x7ffff)
+/* Register Type: PL310BaseAddress */
+#define NFP_ARM_GCSR_PL310_BASE        0x00e4
+#define   NFP_ARM_GCSR_PL310_BASE_ADDR(_x)              (((_x) & 0xfffff) << 12)
+#define   NFP_ARM_GCSR_PL310_BASE_ADDR_of(_x)           (((_x) >> 12) & 0xfffff)
+/* Register Type: MPCoreConfig */
+#define NFP_ARM_GCSR_MP0_CFG           0x00e8
+#define   NFP_ARM_GCSR_MP0_CFG_SPI_BOOT                 (0x1 << 14)
+#define   NFP_ARM_GCSR_MP0_CFG_ENDIAN(_x)               (((_x) & 0x3) << 12)
+#define   NFP_ARM_GCSR_MP0_CFG_ENDIAN_of(_x)            (((_x) >> 12) & 0x3)
+#define     NFP_ARM_GCSR_MP0_CFG_ENDIAN_LITTLE          (0)
+#define     NFP_ARM_GCSR_MP0_CFG_ENDIAN_BIG             (1)
+#define   NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR             (0x1 << 8)
+#define     NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR_LO        (0x0)
+#define     NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR_HI        (0x100)
+#define   NFP_ARM_GCSR_MP0_CFG_OUTCLK_EN(_x)            (((_x) & 0xf) << 4)
+#define   NFP_ARM_GCSR_MP0_CFG_OUTCLK_EN_of(_x)         (((_x) >> 4) & 0xf)
+#define   NFP_ARM_GCSR_MP0_CFG_ARMID(_x)                ((_x) & 0xf)
+#define   NFP_ARM_GCSR_MP0_CFG_ARMID_of(_x)             ((_x) & 0xf)
+/* Register Type: MPCoreIDCacheDataError */
+#define NFP_ARM_GCSR_MP0_CACHE_ERR     0x00ec
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D7             (0x1 << 15)
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D6             (0x1 << 14)
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D5             (0x1 << 13)
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D4             (0x1 << 12)
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D3             (0x1 << 11)
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D2             (0x1 << 10)
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D1             (0x1 << 9)
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D0             (0x1 << 8)
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I7             (0x1 << 7)
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I6             (0x1 << 6)
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I5             (0x1 << 5)
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I4             (0x1 << 4)
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I3             (0x1 << 3)
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I2             (0x1 << 2)
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I1             (0x1 << 1)
+#define   NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I0             (0x1)
+/* Register Type: ARMDFT */
+#define NFP_ARM_GCSR_DFT               0x0100
+#define   NFP_ARM_GCSR_DFT_DBG_REQ                      (0x1 << 20)
+#define   NFP_ARM_GCSR_DFT_DBG_EN                       (0x1 << 19)
+#define   NFP_ARM_GCSR_DFT_WFE_EVT_TRG                  (0x1 << 18)
+#define   NFP_ARM_GCSR_DFT_ETM_WFI_RDY                  (0x1 << 17)
+#define   NFP_ARM_GCSR_DFT_ETM_PWR_ON                   (0x1 << 16)
+#define   NFP_ARM_GCSR_DFT_BIST_FAIL_of(_x)             (((_x) >> 8) & 0xf)
+#define   NFP_ARM_GCSR_DFT_BIST_DONE_of(_x)             (((_x) >> 4) & 0xf)
+#define   NFP_ARM_GCSR_DFT_BIST_RUN(_x)                 ((_x) & 0x7)
+#define   NFP_ARM_GCSR_DFT_BIST_RUN_of(_x)              ((_x) & 0x7)
+
+/* Gasket CSRs */
+/* NOTE: These cannot be remapped, and are always at this location.
+ */
+#define NFP_ARM_GCSR_START     (0xd6000000 + NFP_ARM_GCSR)
+#define NFP_ARM_GCSR_SIZE      SZ_64K
+
+/* BAR CSRs
+ */
+#define NFP_ARM_GCSR_BULK_BITS 11
+#define NFP_ARM_GCSR_EXPA_BITS 15
+#define NFP_ARM_GCSR_EXPL_BITS 18
+
+#define NFP_ARM_GCSR_BULK_SHIFT        (40 - 11)
+#define NFP_ARM_GCSR_EXPA_SHIFT        (40 - 15)
+#define NFP_ARM_GCSR_EXPL_SHIFT        (40 - 18)
+
+#define NFP_ARM_GCSR_BULK_SIZE (1 << NFP_ARM_GCSR_BULK_SHIFT)
+#define NFP_ARM_GCSR_EXPA_SIZE (1 << NFP_ARM_GCSR_EXPA_SHIFT)
+#define NFP_ARM_GCSR_EXPL_SIZE (1 << NFP_ARM_GCSR_EXPL_SHIFT)
+
+#define NFP_ARM_GCSR_EXPL2_CSR(target, action, length, \
+                              byte_mask, token, signal_master) \
+       (NFP_ARM_GCSR_EXPL2_BAR_TGT(target) | \
+        NFP_ARM_GCSR_EXPL2_BAR_ACT(action) | \
+        NFP_ARM_GCSR_EXPL2_BAR_LEN(length) | \
+        NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK(byte_mask) | \
+        NFP_ARM_GCSR_EXPL2_BAR_TOK(token) | \
+        NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER(signal_master))
+#define NFP_ARM_GCSR_EXPL1_CSR(posted, signal_ref, data_master, data_ref) \
+       (((posted) ? NFP_ARM_GCSR_EXPL1_BAR_POSTED : 0) | \
+        NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF(signal_ref) | \
+        NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER(data_master) | \
+        NFP_ARM_GCSR_EXPL1_BAR_DATA_REF(data_ref))
+#define NFP_ARM_GCSR_EXPL0_CSR(address) \
+       NFP_ARM_GCSR_EXPL0_BAR_ADDR((address) >> NFP_ARM_GCSR_EXPL_SHIFT)
+#define NFP_ARM_GCSR_EXPL_POST_EXPECT_A(sig_ref, is_push, is_required) \
+       (NFP_ARM_GCSR_EXPL_POST_SIG_A(sig_ref) | \
+        ((is_push) ? NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PUSH : \
+                     NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PULL) | \
+        ((is_required) ? NFP_ARM_GCSR_EXPL_POST_SIG_A_VALID : 0))
+#define NFP_ARM_GCSR_EXPL_POST_EXPECT_B(sig_ref, is_push, is_required) \
+       (NFP_ARM_GCSR_EXPL_POST_SIG_B(sig_ref) | \
+        ((is_push) ? NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PUSH : \
+                     NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PULL) | \
+        ((is_required) ? NFP_ARM_GCSR_EXPL_POST_SIG_B_VALID : 0))
+
+#define NFP_ARM_GCSR_EXPA_CSR(mode, target, token, is_64, action, address) \
+       (((mode) ? NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPL : \
+                  NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPA) | \
+        NFP_ARM_GCSR_EXPA_BAR_TGT(target) | \
+        NFP_ARM_GCSR_EXPA_BAR_TOK(token) | \
+        ((is_64) ? NFP_ARM_GCSR_EXPA_BAR_LEN_64BIT : \
+                   NFP_ARM_GCSR_EXPA_BAR_LEN_32BIT) | \
+        NFP_ARM_GCSR_EXPA_BAR_ACT(action) | \
+        NFP_ARM_GCSR_EXPA_BAR_ADDR((address) >> NFP_ARM_GCSR_EXPA_SHIFT))
+
+#define NFP_ARM_GCSR_BULK_CSR(mode, target, token, is_64, address) \
+       (((mode) ? NFP_ARM_GCSR_BULK_BAR_TYPE_EXPA : \
+                  NFP_ARM_GCSR_BULK_BAR_TYPE_BULK) | \
+        NFP_ARM_GCSR_BULK_BAR_TGT(target) | \
+        NFP_ARM_GCSR_BULK_BAR_TOK(token) | \
+        ((is_64) ? NFP_ARM_GCSR_BULK_BAR_LEN_64BIT : \
+                   NFP_ARM_GCSR_BULK_BAR_LEN_32BIT) | \
+        NFP_ARM_GCSR_BULK_BAR_ADDR((address) >> NFP_ARM_GCSR_BULK_SHIFT))
+
+       /* MP Core CSRs */
+#define NFP_ARM_MPCORE_SIZE    SZ_128K
+
+       /* PL320 CSRs */
+#define NFP_ARM_PCSR_SIZE      SZ_64K
+
+#endif /* NFP_ARM_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h
new file mode 100644 (file)
index 0000000..edecc0a
--- /dev/null
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_cpp.h
+ * Interface for low-level NFP CPP access.
+ * Authors: Jason McMullan <jason.mcmullan@netronome.com>
+ *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
+ */
+#ifndef __NFP_CPP_H__
+#define __NFP_CPP_H__
+
+#include <linux/ctype.h>
+#include <linux/types.h>
+
+#ifndef NFP_SUBSYS
+#define NFP_SUBSYS "nfp"
+#endif
+
+#define nfp_err(cpp, fmt, args...) \
+       dev_err(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
+#define nfp_warn(cpp, fmt, args...) \
+       dev_warn(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
+#define nfp_info(cpp, fmt, args...) \
+       dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
+#define nfp_dbg(cpp, fmt, args...) \
+       dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args)
+
+#define PCI_64BIT_BAR_COUNT             3
+
+#define NFP_CPP_NUM_TARGETS             16
+
+struct device;
+
+struct nfp_cpp_area;
+struct nfp_cpp;
+struct resource;
+
+/* Wildcard indicating a CPP read or write action
+ *
+ * The action used will be either read or write depending on whether a
+ * read or write instruction/call is performed on the NFP_CPP_ID.  It
+ * is recomended that the RW action is used even if all actions to be
+ * performed on a NFP_CPP_ID are known to be only reads or writes.
+ * Doing so will in many cases save NFP CPP internal software
+ * resources.
+ */
+#define NFP_CPP_ACTION_RW               32
+
+#define NFP_CPP_TARGET_ID_MASK          0x1f
+
+/**
+ * NFP_CPP_ID() - pack target, token, and action into a CPP ID.
+ * @target:     NFP CPP target id
+ * @action:     NFP CPP action id
+ * @token:      NFP CPP token id
+ *
+ * Create a 32-bit CPP identifier representing the access to be made.
+ * These identifiers are used as parameters to other NFP CPP
+ * functions.  Some CPP devices may allow wildcard identifiers to be
+ * specified.
+ *
+ * Return:      NFP CPP ID
+ */
+#define NFP_CPP_ID(target, action, token)                       \
+       ((((target) & 0x7f) << 24) | (((token)  & 0xff) << 16) | \
+        (((action) & 0xff) <<  8))
+
+/**
+ * NFP_CPP_ISLAND_ID() - pack target, token, action, and island into a CPP ID.
+ * @target:     NFP CPP target id
+ * @action:     NFP CPP action id
+ * @token:      NFP CPP token id
+ * @island:     NFP CPP island id
+ *
+ * Create a 32-bit CPP identifier representing the access to be made.
+ * These identifiers are used as parameters to other NFP CPP
+ * functions.  Some CPP devices may allow wildcard identifiers to be
+ * specified.
+ *
+ * Return:      NFP CPP ID
+ */
+#define NFP_CPP_ISLAND_ID(target, action, token, island)        \
+       ((((target) & 0x7f) << 24) | (((token)  & 0xff) << 16) | \
+        (((action) & 0xff) <<  8) | (((island) & 0xff) << 0))
+
+/**
+ * NFP_CPP_ID_TARGET_of() - Return the NFP CPP target of a NFP CPP ID
+ * @id:         NFP CPP ID
+ *
+ * Return:      NFP CPP target
+ */
+static inline u8 NFP_CPP_ID_TARGET_of(u32 id)
+{
+       return (id >> 24) & NFP_CPP_TARGET_ID_MASK;
+}
+
+/**
+ * NFP_CPP_ID_TOKEN_of() - Return the NFP CPP token of a NFP CPP ID
+ * @id:         NFP CPP ID
+ * Return:      NFP CPP token
+ */
+static inline u8 NFP_CPP_ID_TOKEN_of(u32 id)
+{
+       return (id >> 16) & 0xff;
+}
+
+/**
+ * NFP_CPP_ID_ACTION_of() - Return the NFP CPP action of a NFP CPP ID
+ * @id:         NFP CPP ID
+ *
+ * Return:      NFP CPP action
+ */
+static inline u8 NFP_CPP_ID_ACTION_of(u32 id)
+{
+       return (id >> 8) & 0xff;
+}
+
+/**
+ * NFP_CPP_ID_ISLAND_of() - Return the NFP CPP island of a NFP CPP ID
+ * @id: NFP CPP ID
+ *
+ * Return:      NFP CPP island
+ */
+static inline u8 NFP_CPP_ID_ISLAND_of(u32 id)
+{
+       return (id >> 0) & 0xff;
+}
+
+/* NFP Interface types - logical interface for this CPP connection
+ * 4 bits are reserved for interface type.
+ */
+#define NFP_CPP_INTERFACE_TYPE_INVALID      0x0
+#define NFP_CPP_INTERFACE_TYPE_PCI          0x1
+#define NFP_CPP_INTERFACE_TYPE_ARM          0x2
+#define NFP_CPP_INTERFACE_TYPE_RPC          0x3
+#define NFP_CPP_INTERFACE_TYPE_ILA          0x4
+
+/**
+ * NFP_CPP_INTERFACE() - Construct a 16-bit NFP Interface ID
+ * @type:       NFP Interface Type
+ * @unit:       Unit identifier for the interface type
+ * @channel:    Channel identifier for the interface unit
+ *
+ * Interface IDs consists of 4 bits of interface type,
+ * 4 bits of unit identifier, and 8 bits of channel identifier.
+ *
+ * The NFP Interface ID is used in the implementation of
+ * NFP CPP API mutexes, which use the MU Atomic CompareAndWrite
+ * operation - hence the limit to 16 bits to be able to
+ * use the NFP Interface ID as a lock owner.
+ *
+ * Return:      Interface ID
+ */
+#define NFP_CPP_INTERFACE(type, unit, channel) \
+       ((((type) & 0xf) << 12) |               \
+        (((unit) & 0xf) <<  8) |               \
+        (((channel) & 0xff) << 0))
+
+/**
+ * NFP_CPP_INTERFACE_TYPE_of() - Get the interface type
+ * @interface:  NFP Interface ID
+ * Return:      NFP Interface ID's type
+ */
+#define NFP_CPP_INTERFACE_TYPE_of(interface)   (((interface) >> 12) & 0xf)
+
+/**
+ * NFP_CPP_INTERFACE_UNIT_of() - Get the interface unit
+ * @interface:  NFP Interface ID
+ * Return:      NFP Interface ID's unit
+ */
+#define NFP_CPP_INTERFACE_UNIT_of(interface)   (((interface) >>  8) & 0xf)
+
+/**
+ * NFP_CPP_INTERFACE_CHANNEL_of() - Get the interface channel
+ * @interface:  NFP Interface ID
+ * Return:      NFP Interface ID's channel
+ */
+#define NFP_CPP_INTERFACE_CHANNEL_of(interface)   (((interface) >>  0) & 0xff)
+
+/* Implemented in nfp_cppcore.c */
+void nfp_cpp_free(struct nfp_cpp *cpp);
+u32 nfp_cpp_model(struct nfp_cpp *cpp);
+u16 nfp_cpp_interface(struct nfp_cpp *cpp);
+int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial);
+
+void *nfp_hwinfo_cache(struct nfp_cpp *cpp);
+void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val);
+void *nfp_rtsym_cache(struct nfp_cpp *cpp);
+void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val);
+
+void nfp_nffw_cache_flush(struct nfp_cpp *cpp);
+
+struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp,
+                                                 u32 cpp_id,
+                                                 const char *name,
+                                                 unsigned long long address,
+                                                 unsigned long size);
+struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 cpp_id,
+                                       unsigned long long address,
+                                       unsigned long size);
+void nfp_cpp_area_free(struct nfp_cpp_area *area);
+int nfp_cpp_area_acquire(struct nfp_cpp_area *area);
+int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area);
+void nfp_cpp_area_release(struct nfp_cpp_area *area);
+void nfp_cpp_area_release_free(struct nfp_cpp_area *area);
+int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset,
+                     void *buffer, size_t length);
+int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset,
+                      const void *buffer, size_t length);
+int nfp_cpp_area_check_range(struct nfp_cpp_area *area,
+                            unsigned long long offset, unsigned long size);
+const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area);
+void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area);
+struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area);
+struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area);
+phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area);
+void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area);
+
+int nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset,
+                      u32 *value);
+int nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset,
+                       u32 value);
+int nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset,
+                      u64 *value);
+int nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset,
+                       u64 value);
+int nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset,
+                     u32 value, size_t length);
+
+int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_tgt, u32 *value);
+int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_tgt, u32 value);
+int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt, u32 mask, u32 value);
+
+/* Implemented in nfp_cpplib.c */
+int nfp_cpp_read(struct nfp_cpp *cpp, u32 cpp_id,
+                unsigned long long address, void *kernel_vaddr, size_t length);
+int nfp_cpp_write(struct nfp_cpp *cpp, u32 cpp_id,
+                 unsigned long long address, const void *kernel_vaddr,
+                 size_t length);
+int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
+                 unsigned long long address, u32 *value);
+int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
+                  unsigned long long address, u32 value);
+int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
+                 unsigned long long address, u64 *value);
+int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
+                  unsigned long long address, u64 value);
+
+struct nfp_cpp_mutex;
+
+int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target,
+                      unsigned long long address, u32 key_id);
+struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
+                                         unsigned long long address,
+                                         u32 key_id);
+void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex);
+int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex);
+int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex);
+int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex);
+
+struct nfp_cpp_explicit;
+
+struct nfp_cpp_explicit_command {
+       u32 cpp_id;
+       u16 data_ref;
+       u8  data_master;
+       u8  len;
+       u8  byte_mask;
+       u8  signal_master;
+       u8  signal_ref;
+       u8  posted;
+       u8  siga;
+       u8  sigb;
+       s8   siga_mode;
+       s8   sigb_mode;
+};
+
+#define NFP_SERIAL_LEN         6
+
+/**
+ * struct nfp_cpp_operations - NFP CPP operations structure
+ * @area_priv_size:     Size of the nfp_cpp_area private data
+ * @owner:              Owner module
+ * @init:               Initialize the NFP CPP bus
+ * @free:               Free the bus
+ * @read_serial:       Read serial number to memory provided
+ * @get_interface:     Return CPP interface
+ * @area_init:          Initialize a new NFP CPP area (not serialized)
+ * @area_cleanup:       Clean up a NFP CPP area (not serialized)
+ * @area_acquire:       Acquire the NFP CPP area (serialized)
+ * @area_release:       Release area (serialized)
+ * @area_resource:      Get resource range of area (not serialized)
+ * @area_phys:          Get physical address of area (not serialized)
+ * @area_iomem:         Get iomem of area (not serialized)
+ * @area_read:          Perform a read from a NFP CPP area (serialized)
+ * @area_write:         Perform a write to a NFP CPP area (serialized)
+ * @explicit_priv_size: Size of an explicit's private area
+ * @explicit_acquire:   Acquire an explicit area
+ * @explicit_release:   Release an explicit area
+ * @explicit_put:       Write data to send
+ * @explicit_get:       Read data received
+ * @explicit_do:        Perform the transaction
+ */
+struct nfp_cpp_operations {
+       size_t area_priv_size;
+       struct module *owner;
+
+       int (*init)(struct nfp_cpp *cpp);
+       void (*free)(struct nfp_cpp *cpp);
+
+       void (*read_serial)(struct device *dev, u8 *serial);
+       u16 (*get_interface)(struct device *dev);
+
+       int (*area_init)(struct nfp_cpp_area *area,
+                        u32 dest, unsigned long long address,
+                        unsigned long size);
+       void (*area_cleanup)(struct nfp_cpp_area *area);
+       int (*area_acquire)(struct nfp_cpp_area *area);
+       void (*area_release)(struct nfp_cpp_area *area);
+       struct resource *(*area_resource)(struct nfp_cpp_area *area);
+       phys_addr_t (*area_phys)(struct nfp_cpp_area *area);
+       void __iomem *(*area_iomem)(struct nfp_cpp_area *area);
+       int (*area_read)(struct nfp_cpp_area *area, void *kernel_vaddr,
+                        unsigned long offset, unsigned int length);
+       int (*area_write)(struct nfp_cpp_area *area, const void *kernel_vaddr,
+                         unsigned long offset, unsigned int length);
+
+       size_t explicit_priv_size;
+       int (*explicit_acquire)(struct nfp_cpp_explicit *expl);
+       void (*explicit_release)(struct nfp_cpp_explicit *expl);
+       int (*explicit_put)(struct nfp_cpp_explicit *expl,
+                           const void *buff, size_t len);
+       int (*explicit_get)(struct nfp_cpp_explicit *expl,
+                           void *buff, size_t len);
+       int (*explicit_do)(struct nfp_cpp_explicit *expl,
+                          const struct nfp_cpp_explicit_command *cmd,
+                          u64 address);
+};
+
+struct nfp_cpp *
+nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
+                       struct device *parent, void *priv);
+void *nfp_cpp_priv(struct nfp_cpp *priv);
+
+int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size);
+
+/* The following section contains extensions to the
+ * NFP CPP API, to be used in a Linux kernel-space context.
+ */
+
+/* Use this channel ID for multiple virtual channel interfaces
+ * (ie ARM and PCIe) when setting up the interface field.
+ */
+#define NFP_CPP_INTERFACE_CHANNEL_PEROPENER    255
+struct device *nfp_cpp_device(struct nfp_cpp *cpp);
+
+/* Return code masks for nfp_cpp_explicit_do()
+ */
+#define NFP_SIGNAL_MASK_A      BIT(0)  /* Signal A fired */
+#define NFP_SIGNAL_MASK_B      BIT(1)  /* Signal B fired */
+
+enum nfp_cpp_explicit_signal_mode {
+       NFP_SIGNAL_NONE = 0,
+       NFP_SIGNAL_PUSH = 1,
+       NFP_SIGNAL_PUSH_OPTIONAL = -1,
+       NFP_SIGNAL_PULL = 2,
+       NFP_SIGNAL_PULL_OPTIONAL = -2,
+};
+
+struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp);
+int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl, u32 cpp_id,
+                               u8 len, u8 mask);
+int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl,
+                             u8 data_master, u16 data_ref);
+int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl,
+                               u8 signal_master, u8 signal_ref);
+int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted,
+                               u8 siga,
+                               enum nfp_cpp_explicit_signal_mode siga_mode,
+                               u8 sigb,
+                               enum nfp_cpp_explicit_signal_mode sigb_mode);
+int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl,
+                        const void *buff, size_t len);
+int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address);
+int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len);
+void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl);
+struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *expl);
+void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit);
+
+/* Implemented in nfp_cpplib.c */
+
+int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model);
+
+int nfp_cpp_explicit_read(struct nfp_cpp *cpp, u32 cpp_id,
+                         u64 addr, void *buff, size_t len,
+                         int width_read);
+
+int nfp_cpp_explicit_write(struct nfp_cpp *cpp, u32 cpp_id,
+                          u64 addr, const void *buff, size_t len,
+                          int width_write);
+
+#endif /* !__NFP_CPP_H__ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
new file mode 100644 (file)
index 0000000..40108e6
--- /dev/null
@@ -0,0 +1,1746 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_cppcore.c
+ * Provides low-level access to the NFP's internal CPP bus
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ *          Jason McMullan <jason.mcmullan@netronome.com>
+ *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include "nfp_arm.h"
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+
+#define NFP_ARM_GCSR_SOFTMODEL2                              0x0000014c
+#define NFP_ARM_GCSR_SOFTMODEL3                              0x00000150
+
+struct nfp_cpp_resource {
+       struct list_head list;
+       const char *name;
+       u32 cpp_id;
+       u64 start;
+       u64 end;
+};
+
+struct nfp_cpp_mutex {
+       struct list_head list;
+       struct nfp_cpp *cpp;
+       int target;
+       u16 usage;
+       u16 depth;
+       unsigned long long address;
+       u32 key;
+};
+
+struct nfp_cpp {
+       struct device dev;
+
+       void *priv; /* Private data of the low-level implementation */
+
+       u32 model;
+       u16 interface;
+       u8 serial[NFP_SERIAL_LEN];
+
+       const struct nfp_cpp_operations *op;
+       struct list_head resource_list; /* NFP CPP resource list */
+       struct list_head mutex_cache;   /* Mutex cache */
+       rwlock_t resource_lock;
+       wait_queue_head_t waitq;
+
+       /* NFP6000 CPP Mapping Table */
+       u32 imb_cat_table[16];
+
+       /* Cached areas for cpp/xpb readl/writel speedups */
+       struct mutex area_cache_mutex;  /* Lock for the area cache */
+       struct list_head area_cache_list;
+
+       /* Cached information */
+       void *hwinfo;
+       void *rtsym;
+};
+
+/* Element of the area_cache_list */
+struct nfp_cpp_area_cache {
+       struct list_head entry;
+       u32 id;
+       u64 addr;
+       u32 size;
+       struct nfp_cpp_area *area;
+};
+
+struct nfp_cpp_area {
+       struct nfp_cpp *cpp;
+       struct kref kref;
+       atomic_t refcount;
+       struct mutex mutex;     /* Lock for the area's refcount */
+       unsigned long long offset;
+       unsigned long size;
+       struct nfp_cpp_resource resource;
+       void __iomem *iomem;
+       /* Here follows the 'priv' part of nfp_cpp_area. */
+};
+
+struct nfp_cpp_explicit {
+       struct nfp_cpp *cpp;
+       struct nfp_cpp_explicit_command cmd;
+       /* Here follows the 'priv' part of nfp_cpp_area. */
+};
+
+static void __resource_add(struct list_head *head, struct nfp_cpp_resource *res)
+{
+       struct nfp_cpp_resource *tmp;
+       struct list_head *pos;
+
+       list_for_each(pos, head) {
+               tmp = container_of(pos, struct nfp_cpp_resource, list);
+
+               if (tmp->cpp_id > res->cpp_id)
+                       break;
+
+               if (tmp->cpp_id == res->cpp_id && tmp->start > res->start)
+                       break;
+       }
+
+       list_add_tail(&res->list, pos);
+}
+
+static void __resource_del(struct nfp_cpp_resource *res)
+{
+       list_del_init(&res->list);
+}
+
+static void __release_cpp_area(struct kref *kref)
+{
+       struct nfp_cpp_area *area =
+               container_of(kref, struct nfp_cpp_area, kref);
+       struct nfp_cpp *cpp = nfp_cpp_area_cpp(area);
+
+       if (area->cpp->op->area_cleanup)
+               area->cpp->op->area_cleanup(area);
+
+       write_lock(&cpp->resource_lock);
+       __resource_del(&area->resource);
+       write_unlock(&cpp->resource_lock);
+       kfree(area);
+}
+
+static void nfp_cpp_area_put(struct nfp_cpp_area *area)
+{
+       kref_put(&area->kref, __release_cpp_area);
+}
+
+static struct nfp_cpp_area *nfp_cpp_area_get(struct nfp_cpp_area *area)
+{
+       kref_get(&area->kref);
+
+       return area;
+}
+
+/**
+ * nfp_cpp_free() - free the CPP handle
+ * @cpp:       CPP handle
+ */
+void nfp_cpp_free(struct nfp_cpp *cpp)
+{
+       struct nfp_cpp_area_cache *cache, *ctmp;
+       struct nfp_cpp_resource *res, *rtmp;
+       struct nfp_cpp_mutex *mutex, *mtmp;
+
+       /* There should be no mutexes in the cache at this point. */
+       WARN_ON(!list_empty(&cpp->mutex_cache));
+       /* .. but if there are, unlock them and complain. */
+       list_for_each_entry_safe(mutex, mtmp, &cpp->mutex_cache, list) {
+               dev_err(cpp->dev.parent, "Dangling mutex: @%d::0x%llx, %d locks held by %d owners\n",
+                       mutex->target, (unsigned long long)mutex->address,
+                       mutex->depth, mutex->usage);
+
+               /* Forcing an unlock */
+               mutex->depth = 1;
+               nfp_cpp_mutex_unlock(mutex);
+
+               /* Forcing a free */
+               mutex->usage = 1;
+               nfp_cpp_mutex_free(mutex);
+       }
+
+       /* Remove all caches */
+       list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) {
+               list_del(&cache->entry);
+               if (cache->id)
+                       nfp_cpp_area_release(cache->area);
+               nfp_cpp_area_free(cache->area);
+               kfree(cache);
+       }
+
+       /* There should be no dangling areas at this point */
+       WARN_ON(!list_empty(&cpp->resource_list));
+
+       /* .. but if they weren't, try to clean up. */
+       list_for_each_entry_safe(res, rtmp, &cpp->resource_list, list) {
+               struct nfp_cpp_area *area = container_of(res,
+                                                        struct nfp_cpp_area,
+                                                        resource);
+
+               dev_err(cpp->dev.parent, "Dangling area: %d:%d:%d:0x%0llx-0x%0llx%s%s\n",
+                       NFP_CPP_ID_TARGET_of(res->cpp_id),
+                       NFP_CPP_ID_ACTION_of(res->cpp_id),
+                       NFP_CPP_ID_TOKEN_of(res->cpp_id),
+                       res->start, res->end,
+                       res->name ? " " : "",
+                       res->name ? res->name : "");
+
+               if (area->cpp->op->area_release)
+                       area->cpp->op->area_release(area);
+
+               __release_cpp_area(&area->kref);
+       }
+
+       if (cpp->op->free)
+               cpp->op->free(cpp);
+
+       kfree(cpp->hwinfo);
+       kfree(cpp->rtsym);
+
+       device_unregister(&cpp->dev);
+
+       kfree(cpp);
+}
+
+/**
+ * nfp_cpp_model() - Retrieve the Model ID of the NFP
+ * @cpp:       NFP CPP handle
+ *
+ * Return: NFP CPP Model ID
+ */
+u32 nfp_cpp_model(struct nfp_cpp *cpp)
+{
+       return cpp->model;
+}
+
+/**
+ * nfp_cpp_interface() - Retrieve the Interface ID of the NFP
+ * @cpp:       NFP CPP handle
+ *
+ * Return: NFP CPP Interface ID
+ */
+u16 nfp_cpp_interface(struct nfp_cpp *cpp)
+{
+       return cpp->interface;
+}
+
+/**
+ * nfp_cpp_serial() - Retrieve the Serial ID of the NFP
+ * @cpp:       NFP CPP handle
+ * @serial:    Pointer to NFP serial number
+ *
+ * Return:  Length of NFP serial number
+ */
+int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial)
+{
+       *serial = &cpp->serial[0];
+       return sizeof(cpp->serial);
+}
+
+void *nfp_hwinfo_cache(struct nfp_cpp *cpp)
+{
+       return cpp->hwinfo;
+}
+
+void nfp_hwinfo_cache_set(struct nfp_cpp *cpp, void *val)
+{
+       cpp->hwinfo = val;
+}
+
+void *nfp_rtsym_cache(struct nfp_cpp *cpp)
+{
+       return cpp->rtsym;
+}
+
+void nfp_rtsym_cache_set(struct nfp_cpp *cpp, void *val)
+{
+       cpp->rtsym = val;
+}
+
+/**
+ * nfp_nffw_cache_flush() - Flush cached firmware information
+ * @cpp:       NFP CPP handle
+ *
+ * Flush cached firmware information.  This function should be called
+ * every time firmware is loaded on unloaded.
+ */
+void nfp_nffw_cache_flush(struct nfp_cpp *cpp)
+{
+       kfree(nfp_rtsym_cache(cpp));
+       nfp_rtsym_cache_set(cpp, NULL);
+}
+
+/**
+ * nfp_cpp_area_alloc_with_name() - allocate a new CPP area
+ * @cpp:       CPP device handle
+ * @dest:      NFP CPP ID
+ * @name:      Name of region
+ * @address:   Address of region
+ * @size:      Size of region
+ *
+ * Allocate and initialize a CPP area structure.  The area must later
+ * be locked down with an 'acquire' before it can be safely accessed.
+ *
+ * NOTE: @address and @size must be 32-bit aligned values.
+ *
+ * Return: NFP CPP area handle, or NULL
+ */
+struct nfp_cpp_area *
+nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 dest, const char *name,
+                            unsigned long long address, unsigned long size)
+{
+       struct nfp_cpp_area *area;
+       u64 tmp64 = address;
+       int err, name_len;
+
+       /* Remap from cpp_island to cpp_target */
+       err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table);
+       if (err < 0)
+               return NULL;
+
+       address = tmp64;
+
+       if (!name)
+               name = "(reserved)";
+
+       name_len = strlen(name) + 1;
+       area = kzalloc(sizeof(*area) + cpp->op->area_priv_size + name_len,
+                      GFP_KERNEL);
+       if (!area)
+               return NULL;
+
+       area->cpp = cpp;
+       area->resource.name = (void *)area + sizeof(*area) +
+               cpp->op->area_priv_size;
+       memcpy((char *)area->resource.name, name, name_len);
+
+       area->resource.cpp_id = dest;
+       area->resource.start = address;
+       area->resource.end = area->resource.start + size - 1;
+       INIT_LIST_HEAD(&area->resource.list);
+
+       atomic_set(&area->refcount, 0);
+       kref_init(&area->kref);
+       mutex_init(&area->mutex);
+
+       if (cpp->op->area_init) {
+               int err;
+
+               err = cpp->op->area_init(area, dest, address, size);
+               if (err < 0) {
+                       kfree(area);
+                       return NULL;
+               }
+       }
+
+       write_lock(&cpp->resource_lock);
+       __resource_add(&cpp->resource_list, &area->resource);
+       write_unlock(&cpp->resource_lock);
+
+       area->offset = address;
+       area->size = size;
+
+       return area;
+}
+
+/**
+ * nfp_cpp_area_alloc() - allocate a new CPP area
+ * @cpp:       CPP handle
+ * @dest:      CPP id
+ * @address:   Start address on CPP target
+ * @size:      Size of area in bytes
+ *
+ * Allocate and initialize a CPP area structure.  The area must later
+ * be locked down with an 'acquire' before it can be safely accessed.
+ *
+ * NOTE: @address and @size must be 32-bit aligned values.
+ *
+ * Return: NFP CPP Area handle, or NULL
+ */
+struct nfp_cpp_area *
+nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
+                  unsigned long long address, unsigned long size)
+{
+       return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size);
+}
+
+/**
+ * nfp_cpp_area_free() - free up the CPP area
+ * @area:      CPP area handle
+ *
+ * Frees up memory resources held by the CPP area.
+ */
+void nfp_cpp_area_free(struct nfp_cpp_area *area)
+{
+       nfp_cpp_area_put(area);
+}
+
+/**
+ * nfp_cpp_area_acquire() - lock down a CPP area for access
+ * @area:      CPP area handle
+ *
+ * Locks down the CPP area for a potential long term activity.  Area
+ * must always be locked down before being accessed.
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_area_acquire(struct nfp_cpp_area *area)
+{
+       mutex_lock(&area->mutex);
+       if (atomic_inc_return(&area->refcount) == 1) {
+               int (*a_a)(struct nfp_cpp_area *);
+
+               a_a = area->cpp->op->area_acquire;
+               if (a_a) {
+                       int err;
+
+                       wait_event_interruptible(area->cpp->waitq,
+                                                (err = a_a(area)) != -EAGAIN);
+                       if (err < 0) {
+                               atomic_dec(&area->refcount);
+                               mutex_unlock(&area->mutex);
+                               return err;
+                       }
+               }
+       }
+       mutex_unlock(&area->mutex);
+
+       nfp_cpp_area_get(area);
+       return 0;
+}
+
+/**
+ * nfp_cpp_area_acquire_nonblocking() - lock down a CPP area for access
+ * @area:      CPP area handle
+ *
+ * Locks down the CPP area for a potential long term activity.  Area
+ * must always be locked down before being accessed.
+ *
+ * NOTE: Returns -EAGAIN is no area is available
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area)
+{
+       mutex_lock(&area->mutex);
+       if (atomic_inc_return(&area->refcount) == 1) {
+               if (area->cpp->op->area_acquire) {
+                       int err;
+
+                       err = area->cpp->op->area_acquire(area);
+                       if (err < 0) {
+                               atomic_dec(&area->refcount);
+                               mutex_unlock(&area->mutex);
+                               return err;
+                       }
+               }
+       }
+       mutex_unlock(&area->mutex);
+
+       nfp_cpp_area_get(area);
+       return 0;
+}
+
+/**
+ * nfp_cpp_area_release() - release a locked down CPP area
+ * @area:      CPP area handle
+ *
+ * Releases a previously locked down CPP area.
+ */
+void nfp_cpp_area_release(struct nfp_cpp_area *area)
+{
+       mutex_lock(&area->mutex);
+       /* Only call the release on refcount == 0 */
+       if (atomic_dec_and_test(&area->refcount)) {
+               if (area->cpp->op->area_release) {
+                       area->cpp->op->area_release(area);
+                       /* Let anyone waiting for a BAR try to get one.. */
+                       wake_up_interruptible_all(&area->cpp->waitq);
+               }
+       }
+       mutex_unlock(&area->mutex);
+
+       nfp_cpp_area_put(area);
+}
+
+/**
+ * nfp_cpp_area_release_free() - release CPP area and free it
+ * @area:      CPP area handle
+ *
+ * Releases CPP area and frees up memory resources held by the it.
+ */
+void nfp_cpp_area_release_free(struct nfp_cpp_area *area)
+{
+       nfp_cpp_area_release(area);
+       nfp_cpp_area_free(area);
+}
+
+/**
+ * nfp_cpp_area_read() - read data from CPP area
+ * @area:        CPP area handle
+ * @offset:      offset into CPP area
+ * @kernel_vaddr: kernel address to put data into
+ * @length:      number of bytes to read
+ *
+ * Read data from indicated CPP region.
+ *
+ * NOTE: @offset and @length must be 32-bit aligned values.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: length of io, or -ERRNO
+ */
+int nfp_cpp_area_read(struct nfp_cpp_area *area,
+                     unsigned long offset, void *kernel_vaddr,
+                     size_t length)
+{
+       return area->cpp->op->area_read(area, kernel_vaddr, offset, length);
+}
+
+/**
+ * nfp_cpp_area_write() - write data to CPP area
+ * @area:      CPP area handle
+ * @offset:    offset into CPP area
+ * @kernel_vaddr: kernel address to read data from
+ * @length:    number of bytes to write
+ *
+ * Write data to indicated CPP region.
+ *
+ * NOTE: @offset and @length must be 32-bit aligned values.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: length of io, or -ERRNO
+ */
+int nfp_cpp_area_write(struct nfp_cpp_area *area,
+                      unsigned long offset, const void *kernel_vaddr,
+                      size_t length)
+{
+       return area->cpp->op->area_write(area, kernel_vaddr, offset, length);
+}
+
+/**
+ * nfp_cpp_area_check_range() - check if address range fits in CPP area
+ * @area:      CPP area handle
+ * @offset:    offset into CPP target
+ * @length:    size of address range in bytes
+ *
+ * Check if address range fits within CPP area.  Return 0 if area
+ * fits or -EFAULT on error.
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_area_check_range(struct nfp_cpp_area *area,
+                            unsigned long long offset, unsigned long length)
+{
+       if (offset < area->offset ||
+           offset + length > area->offset + area->size)
+               return -EFAULT;
+
+       return 0;
+}
+
+/**
+ * nfp_cpp_area_name() - return name of a CPP area
+ * @cpp_area:  CPP area handle
+ *
+ * Return: Name of the area, or NULL
+ */
+const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area)
+{
+       return cpp_area->resource.name;
+}
+
+/**
+ * nfp_cpp_area_priv() - return private struct for CPP area
+ * @cpp_area:  CPP area handle
+ *
+ * Return: Private data for the CPP area
+ */
+void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area)
+{
+       return &cpp_area[1];
+}
+
+/**
+ * nfp_cpp_area_cpp() - return CPP handle for CPP area
+ * @cpp_area:  CPP area handle
+ *
+ * Return: NFP CPP handle
+ */
+struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area)
+{
+       return cpp_area->cpp;
+}
+
+/**
+ * nfp_cpp_area_resource() - get resource
+ * @area:      CPP area handle
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: struct resource pointer, or NULL
+ */
+struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area)
+{
+       struct resource *res = NULL;
+
+       if (area->cpp->op->area_resource)
+               res = area->cpp->op->area_resource(area);
+
+       return res;
+}
+
+/**
+ * nfp_cpp_area_phys() - get physical address of CPP area
+ * @area:      CPP area handle
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: phy_addr_t of the area, or NULL
+ */
+phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area)
+{
+       phys_addr_t addr = ~0;
+
+       if (area->cpp->op->area_phys)
+               addr = area->cpp->op->area_phys(area);
+
+       return addr;
+}
+
+/**
+ * nfp_cpp_area_iomem() - get IOMEM region for CPP area
+ * @area:      CPP area handle
+ *
+ * Returns an iomem pointer for use with readl()/writel() style
+ * operations.
+ *
+ * NOTE: Area must have been locked down with an 'acquire'.
+ *
+ * Return: __iomem pointer to the area, or NULL
+ */
+void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area)
+{
+       void __iomem *iomem = NULL;
+
+       if (area->cpp->op->area_iomem)
+               iomem = area->cpp->op->area_iomem(area);
+
+       return iomem;
+}
+
+/**
+ * nfp_cpp_area_readl() - Read a u32 word from an area
+ * @area:      CPP Area handle
+ * @offset:    Offset into area
+ * @value:     Pointer to read buffer
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_area_readl(struct nfp_cpp_area *area,
+                      unsigned long offset, u32 *value)
+{
+       u8 tmp[4];
+       int err;
+
+       err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+       *value = get_unaligned_le32(tmp);
+
+       return err;
+}
+
+/**
+ * nfp_cpp_area_writel() - Write a u32 word to an area
+ * @area:      CPP Area handle
+ * @offset:    Offset into area
+ * @value:     Value to write
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_area_writel(struct nfp_cpp_area *area,
+                       unsigned long offset, u32 value)
+{
+       u8 tmp[4];
+
+       put_unaligned_le32(value, tmp);
+
+       return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
+}
+
+/**
+ * nfp_cpp_area_readq() - Read a u64 word from an area
+ * @area:      CPP Area handle
+ * @offset:    Offset into area
+ * @value:     Pointer to read buffer
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_area_readq(struct nfp_cpp_area *area,
+                      unsigned long offset, u64 *value)
+{
+       u8 tmp[8];
+       int err;
+
+       err = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
+       *value = get_unaligned_le64(tmp);
+
+       return err;
+}
+
+/**
+ * nfp_cpp_area_writeq() - Write a u64 word to an area
+ * @area:      CPP Area handle
+ * @offset:    Offset into area
+ * @value:     Value to write
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_area_writeq(struct nfp_cpp_area *area,
+                       unsigned long offset, u64 value)
+{
+       u8 tmp[8];
+
+       put_unaligned_le64(value, tmp);
+
+       return nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
+}
+
+/**
+ * nfp_cpp_area_fill() - fill a CPP area with a value
+ * @area:      CPP area
+ * @offset:    offset into CPP area
+ * @value:     value to fill with
+ * @length:    length of area to fill
+ *
+ * Fill indicated area with given value.
+ *
+ * Return: length of io, or -ERRNO
+ */
+int nfp_cpp_area_fill(struct nfp_cpp_area *area,
+                     unsigned long offset, u32 value, size_t length)
+{
+       u8 tmp[4];
+       size_t i;
+       int k;
+
+       put_unaligned_le32(value, tmp);
+
+       if (offset % sizeof(tmp) || length % sizeof(tmp))
+               return -EINVAL;
+
+       for (i = 0; i < length; i += sizeof(tmp)) {
+               k = nfp_cpp_area_write(area, offset + i, &tmp, sizeof(tmp));
+               if (k < 0)
+                       return k;
+       }
+
+       return i;
+}
+
+/**
+ * nfp_cpp_area_cache_add() - Permanently reserve and area for the hot cache
+ * @cpp:       NFP CPP handle
+ * @size:      Size of the area - MUST BE A POWER OF 2.
+ */
+int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
+{
+       struct nfp_cpp_area_cache *cache;
+       struct nfp_cpp_area *area;
+
+       /* Allocate an area - we use the MU target's base as a placeholder,
+        * as all supported chips have a MU.
+        */
+       area = nfp_cpp_area_alloc(cpp, NFP_CPP_ID(7, NFP_CPP_ACTION_RW, 0),
+                                 0, size);
+       if (!area)
+               return -ENOMEM;
+
+       cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+       if (!cache)
+               return -ENOMEM;
+
+       cache->id = 0;
+       cache->addr = 0;
+       cache->size = size;
+       cache->area = area;
+       mutex_lock(&cpp->area_cache_mutex);
+       list_add_tail(&cache->entry, &cpp->area_cache_list);
+       mutex_unlock(&cpp->area_cache_mutex);
+
+       return 0;
+}
+
+static struct nfp_cpp_area_cache *
+area_cache_get(struct nfp_cpp *cpp, u32 id,
+              u64 addr, unsigned long *offset, size_t length)
+{
+       struct nfp_cpp_area_cache *cache;
+       int err;
+
+       /* Early exit when length == 0, which prevents
+        * the need for special case code below when
+        * checking against available cache size.
+        */
+       if (length == 0)
+               return NULL;
+
+       if (list_empty(&cpp->area_cache_list) || id == 0)
+               return NULL;
+
+       /* Remap from cpp_island to cpp_target */
+       err = nfp_target_cpp(id, addr, &id, &addr, cpp->imb_cat_table);
+       if (err < 0)
+               return NULL;
+
+       addr += *offset;
+
+       mutex_lock(&cpp->area_cache_mutex);
+
+       /* See if we have a match */
+       list_for_each_entry(cache, &cpp->area_cache_list, entry) {
+               if (id == cache->id &&
+                   addr >= cache->addr &&
+                   addr + length <= cache->addr + cache->size)
+                       goto exit;
+       }
+
+       /* No matches - inspect the tail of the LRU */
+       cache = list_entry(cpp->area_cache_list.prev,
+                          struct nfp_cpp_area_cache, entry);
+
+       /* Can we fit in the cache entry? */
+       if (round_down(addr + length - 1, cache->size) !=
+           round_down(addr, cache->size)) {
+               mutex_unlock(&cpp->area_cache_mutex);
+               return NULL;
+       }
+
+       /* If id != 0, we will need to release it */
+       if (cache->id) {
+               nfp_cpp_area_release(cache->area);
+               cache->id = 0;
+               cache->addr = 0;
+       }
+
+       /* Adjust the start address to be cache size aligned */
+       cache->id = id;
+       cache->addr = addr & ~(u64)(cache->size - 1);
+
+       /* Re-init to the new ID and address */
+       if (cpp->op->area_init) {
+               err = cpp->op->area_init(cache->area,
+                                        id, cache->addr, cache->size);
+               if (err < 0) {
+                       mutex_unlock(&cpp->area_cache_mutex);
+                       return NULL;
+               }
+       }
+
+       /* Attempt to acquire */
+       err = nfp_cpp_area_acquire(cache->area);
+       if (err < 0) {
+               mutex_unlock(&cpp->area_cache_mutex);
+               return NULL;
+       }
+
+exit:
+       /* Adjust offset */
+       *offset = addr - cache->addr;
+       return cache;
+}
+
+static void
+area_cache_put(struct nfp_cpp *cpp, struct nfp_cpp_area_cache *cache)
+{
+       if (!cache)
+               return;
+
+       /* Move to front of LRU */
+       list_del(&cache->entry);
+       list_add(&cache->entry, &cpp->area_cache_list);
+
+       mutex_unlock(&cpp->area_cache_mutex);
+}
+
+/**
+ * nfp_cpp_read() - read from CPP target
+ * @cpp:               CPP handle
+ * @destination:       CPP id
+ * @address:           offset into CPP target
+ * @kernel_vaddr:      kernel buffer for result
+ * @length:            number of bytes to read
+ *
+ * Return: length of io, or -ERRNO
+ */
+int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
+                unsigned long long address, void *kernel_vaddr, size_t length)
+{
+       struct nfp_cpp_area_cache *cache;
+       struct nfp_cpp_area *area;
+       unsigned long offset = 0;
+       int err;
+
+       cache = area_cache_get(cpp, destination, address, &offset, length);
+       if (cache) {
+               area = cache->area;
+       } else {
+               area = nfp_cpp_area_alloc(cpp, destination, address, length);
+               if (!area)
+                       return -ENOMEM;
+
+               err = nfp_cpp_area_acquire(area);
+               if (err)
+                       goto out;
+       }
+
+       err = nfp_cpp_area_read(area, offset, kernel_vaddr, length);
+out:
+       if (cache)
+               area_cache_put(cpp, cache);
+       else
+               nfp_cpp_area_release_free(area);
+
+       return err;
+}
+
+/**
+ * nfp_cpp_write() - write to CPP target
+ * @cpp:               CPP handle
+ * @destination:       CPP id
+ * @address:           offset into CPP target
+ * @kernel_vaddr:      kernel buffer to read from
+ * @length:            number of bytes to write
+ *
+ * Return: length of io, or -ERRNO
+ */
+int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
+                 unsigned long long address,
+                 const void *kernel_vaddr, size_t length)
+{
+       struct nfp_cpp_area_cache *cache;
+       struct nfp_cpp_area *area;
+       unsigned long offset = 0;
+       int err;
+
+       cache = area_cache_get(cpp, destination, address, &offset, length);
+       if (cache) {
+               area = cache->area;
+       } else {
+               area = nfp_cpp_area_alloc(cpp, destination, address, length);
+               if (!area)
+                       return -ENOMEM;
+
+               err = nfp_cpp_area_acquire(area);
+               if (err)
+                       goto out;
+       }
+
+       err = nfp_cpp_area_write(area, offset, kernel_vaddr, length);
+
+out:
+       if (cache)
+               area_cache_put(cpp, cache);
+       else
+               nfp_cpp_area_release_free(area);
+
+       return err;
+}
+
+/* Return the correct CPP address, and fixup xpb_addr as needed. */
+static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr)
+{
+       int island;
+       u32 xpb;
+
+       xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0);
+       /* Ensure that non-local XPB accesses go
+        * out through the global XPBM bus.
+        */
+       island = (*xpb_addr >> 24) & 0x3f;
+       if (!island)
+               return xpb;
+
+       if (island != 1) {
+               *xpb_addr |= 1 << 30;
+               return xpb;
+       }
+
+       /* Accesses to the ARM Island overlay uses Island 0 / Global Bit */
+       *xpb_addr &= ~0x7f000000;
+       if (*xpb_addr < 0x60000) {
+               *xpb_addr |= 1 << 30;
+       } else {
+               /* And only non-ARM interfaces use the island id = 1 */
+               if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp))
+                   != NFP_CPP_INTERFACE_TYPE_ARM)
+                       *xpb_addr |= 1 << 24;
+       }
+
+       return xpb;
+}
+
+/**
+ * nfp_xpb_readl() - Read a u32 word from a XPB location
+ * @cpp:       CPP device handle
+ * @xpb_addr:  Address for operation
+ * @value:     Pointer to read buffer
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
+{
+       u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
+
+       return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value);
+}
+
+/**
+ * nfp_xpb_writel() - Write a u32 word to a XPB location
+ * @cpp:       CPP device handle
+ * @xpb_addr:  Address for operation
+ * @value:     Value to write
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
+{
+       u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
+
+       return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value);
+}
+
+/**
+ * nfp_xpb_writelm() - Modify bits of a 32-bit value from the XPB bus
+ * @cpp:       NFP CPP device handle
+ * @xpb_tgt:   XPB target and address
+ * @mask:      mask of bits to alter
+ * @value:     value to modify
+ *
+ * KERNEL: This operation is safe to call in interrupt or softirq context.
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt,
+                   u32 mask, u32 value)
+{
+       int err;
+       u32 tmp;
+
+       err = nfp_xpb_readl(cpp, xpb_tgt, &tmp);
+       if (err < 0)
+               return err;
+
+       tmp &= ~mask;
+       tmp |= mask & value;
+       return nfp_xpb_writel(cpp, xpb_tgt, tmp);
+}
+
+/* Lockdep markers */
+static struct lock_class_key nfp_cpp_resource_lock_key;
+
+static void nfp_cpp_dev_release(struct device *dev)
+{
+       /* Nothing to do here - it just makes the kernel happy */
+}
+
+/**
+ * nfp_cpp_from_operations() - Create a NFP CPP handle
+ *                             from an operations structure
+ * @ops:       NFP CPP operations structure
+ * @parent:    Parent device
+ * @priv:      Private data of low-level implementation
+ *
+ * NOTE: On failure, cpp_ops->free will be called!
+ *
+ * Return: NFP CPP handle on success, ERR_PTR on failure
+ */
+struct nfp_cpp *
+nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
+                       struct device *parent, void *priv)
+{
+       const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0);
+       struct nfp_cpp *cpp;
+       u32 mask[2];
+       u32 xpbaddr;
+       size_t tgt;
+       int err;
+
+       cpp = kzalloc(sizeof(*cpp), GFP_KERNEL);
+       if (!cpp) {
+               err = -ENOMEM;
+               goto err_malloc;
+       }
+
+       cpp->op = ops;
+       cpp->priv = priv;
+       cpp->interface = ops->get_interface(parent);
+       if (ops->read_serial)
+               ops->read_serial(parent, cpp->serial);
+       rwlock_init(&cpp->resource_lock);
+       init_waitqueue_head(&cpp->waitq);
+       lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
+       INIT_LIST_HEAD(&cpp->mutex_cache);
+       INIT_LIST_HEAD(&cpp->resource_list);
+       INIT_LIST_HEAD(&cpp->area_cache_list);
+       mutex_init(&cpp->area_cache_mutex);
+       cpp->dev.init_name = "cpp";
+       cpp->dev.parent = parent;
+       cpp->dev.release = nfp_cpp_dev_release;
+       err = device_register(&cpp->dev);
+       if (err < 0) {
+               put_device(&cpp->dev);
+               goto err_dev;
+       }
+
+       dev_set_drvdata(&cpp->dev, cpp);
+
+       /* NOTE: cpp_lock is NOT locked for op->init,
+        * since it may call NFP CPP API operations
+        */
+       if (cpp->op->init) {
+               err = cpp->op->init(cpp);
+               if (err < 0) {
+                       dev_err(parent,
+                               "NFP interface initialization failed\n");
+                       goto err_out;
+               }
+       }
+
+       err = nfp_cpp_model_autodetect(cpp, &cpp->model);
+       if (err < 0) {
+               dev_err(parent, "NFP model detection failed\n");
+               goto err_out;
+       }
+
+       for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) {
+                       /* Hardcoded XPB IMB Base, island 0 */
+               xpbaddr = 0x000a0000 + (tgt * 4);
+               err = nfp_xpb_readl(cpp, xpbaddr,
+                                   &cpp->imb_cat_table[tgt]);
+               if (err < 0) {
+                       dev_err(parent,
+                               "Can't read CPP mapping from device\n");
+                       goto err_out;
+               }
+       }
+
+       nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL2,
+                     &mask[0]);
+       nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL3,
+                     &mask[1]);
+
+       dev_info(cpp->dev.parent, "Model: 0x%08x, SN: %pM, Ifc: 0x%04x\n",
+                nfp_cpp_model(cpp), cpp->serial, nfp_cpp_interface(cpp));
+
+       return cpp;
+
+err_out:
+       device_unregister(&cpp->dev);
+err_dev:
+       kfree(cpp);
+err_malloc:
+       return ERR_PTR(err);
+}
+
+/**
+ * nfp_cpp_priv() - Get the operations private data of a CPP handle
+ * @cpp:       CPP handle
+ *
+ * Return: Private data for the NFP CPP handle
+ */
+void *nfp_cpp_priv(struct nfp_cpp *cpp)
+{
+       return cpp->priv;
+}
+
+/**
+ * nfp_cpp_device() - Get the Linux device handle of a CPP handle
+ * @cpp:       CPP handle
+ *
+ * Return: Device for the NFP CPP bus
+ */
+struct device *nfp_cpp_device(struct nfp_cpp *cpp)
+{
+       return &cpp->dev;
+}
+
+#define NFP_EXPL_OP(func, expl, args...)                         \
+       ({                                                        \
+               struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
+               int err = -ENODEV;                                \
+                                                                 \
+               if (cpp->op->func)                                \
+                       err = cpp->op->func(expl, ##args);        \
+               err;                                              \
+       })
+
+#define NFP_EXPL_OP_NR(func, expl, args...)                      \
+       ({                                                        \
+               struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
+                                                                 \
+               if (cpp->op->func)                                \
+                       cpp->op->func(expl, ##args);              \
+                                                                 \
+       })
+
+/**
+ * nfp_cpp_explicit_acquire() - Acquire explicit access handle
+ * @cpp:       NFP CPP handle
+ *
+ * The 'data_ref' and 'signal_ref' values are useful when
+ * constructing the NFP_EXPL_CSR1 and NFP_EXPL_POST values.
+ *
+ * Return: NFP CPP explicit handle
+ */
+struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp)
+{
+       struct nfp_cpp_explicit *expl;
+       int err;
+
+       expl = kzalloc(sizeof(*expl) + cpp->op->explicit_priv_size, GFP_KERNEL);
+       if (!expl)
+               return NULL;
+
+       expl->cpp = cpp;
+       err = NFP_EXPL_OP(explicit_acquire, expl);
+       if (err < 0) {
+               kfree(expl);
+               return NULL;
+       }
+
+       return expl;
+}
+
+/**
+ * nfp_cpp_explicit_set_target() - Set target fields for explicit
+ * @expl:      Explicit handle
+ * @cpp_id:    CPP ID field
+ * @len:       CPP Length field
+ * @mask:      CPP Mask field
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl,
+                               u32 cpp_id, u8 len, u8 mask)
+{
+       expl->cmd.cpp_id = cpp_id;
+       expl->cmd.len = len;
+       expl->cmd.byte_mask = mask;
+
+       return 0;
+}
+
+/**
+ * nfp_cpp_explicit_set_data() - Set data fields for explicit
+ * @expl:      Explicit handle
+ * @data_master: CPP Data Master field
+ * @data_ref:  CPP Data Ref field
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl,
+                             u8 data_master, u16 data_ref)
+{
+       expl->cmd.data_master = data_master;
+       expl->cmd.data_ref = data_ref;
+
+       return 0;
+}
+
+/**
+ * nfp_cpp_explicit_set_signal() - Set signal fields for explicit
+ * @expl:      Explicit handle
+ * @signal_master: CPP Signal Master field
+ * @signal_ref:        CPP Signal Ref field
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl,
+                               u8 signal_master, u8 signal_ref)
+{
+       expl->cmd.signal_master = signal_master;
+       expl->cmd.signal_ref = signal_ref;
+
+       return 0;
+}
+
+/**
+ * nfp_cpp_explicit_set_posted() - Set completion fields for explicit
+ * @expl:      Explicit handle
+ * @posted:    True for signaled completion, false otherwise
+ * @siga:      CPP Signal A field
+ * @siga_mode: CPP Signal A Mode field
+ * @sigb:      CPP Signal B field
+ * @sigb_mode: CPP Signal B Mode field
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted,
+                               u8 siga,
+                               enum nfp_cpp_explicit_signal_mode siga_mode,
+                               u8 sigb,
+                               enum nfp_cpp_explicit_signal_mode sigb_mode)
+{
+       expl->cmd.posted = posted;
+       expl->cmd.siga = siga;
+       expl->cmd.sigb = sigb;
+       expl->cmd.siga_mode = siga_mode;
+       expl->cmd.sigb_mode = sigb_mode;
+
+       return 0;
+}
+
+/**
+ * nfp_cpp_explicit_put() - Set up the write (pull) data for a explicit access
+ * @expl:      NFP CPP Explicit handle
+ * @buff:      Data to have the target pull in the transaction
+ * @len:       Length of data, in bytes
+ *
+ * The 'len' parameter must be less than or equal to 128 bytes.
+ *
+ * If this function is called before the configuration
+ * registers are set, it will return -EINVAL.
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl,
+                        const void *buff, size_t len)
+{
+       return NFP_EXPL_OP(explicit_put, expl, buff, len);
+}
+
+/**
+ * nfp_cpp_explicit_do() - Execute a transaction, and wait for it to complete
+ * @expl:      NFP CPP Explicit handle
+ * @address:   Address to send in the explicit transaction
+ *
+ * If this function is called before the configuration
+ * registers are set, it will return -1, with an errno of EINVAL.
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address)
+{
+       return NFP_EXPL_OP(explicit_do, expl, &expl->cmd, address);
+}
+
+/**
+ * nfp_cpp_explicit_get() - Get the 'push' (read) data from a explicit access
+ * @expl:      NFP CPP Explicit handle
+ * @buff:      Data that the target pushed in the transaction
+ * @len:       Length of data, in bytes
+ *
+ * The 'len' parameter must be less than or equal to 128 bytes.
+ *
+ * If this function is called before all three configuration
+ * registers are set, it will return -1, with an errno of EINVAL.
+ *
+ * If this function is called before nfp_cpp_explicit_do()
+ * has completed, it will return -1, with an errno of EBUSY.
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len)
+{
+       return NFP_EXPL_OP(explicit_get, expl, buff, len);
+}
+
+/**
+ * nfp_cpp_explicit_release() - Release explicit access handle
+ * @expl:      NFP CPP Explicit handle
+ *
+ */
+void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl)
+{
+       NFP_EXPL_OP_NR(explicit_release, expl);
+       kfree(expl);
+}
+
+/**
+ * nfp_cpp_explicit_cpp() - return CPP handle for CPP explicit
+ * @cpp_explicit:      CPP explicit handle
+ *
+ * Return: NFP CPP handle of the explicit
+ */
+struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *cpp_explicit)
+{
+       return cpp_explicit->cpp;
+}
+
+/**
+ * nfp_cpp_explicit_priv() - return private struct for CPP explicit
+ * @cpp_explicit:      CPP explicit handle
+ *
+ * Return: private data of the explicit, or NULL
+ */
+void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit)
+{
+       return &cpp_explicit[1];
+}
+
+/* THIS FUNCTION IS NOT EXPORTED */
+static u32 nfp_mutex_locked(u16 interface)
+{
+       return (u32)interface << 16 | 0x000f;
+}
+
+static u32 nfp_mutex_unlocked(u16 interface)
+{
+       return (u32)interface << 16 | 0x0000;
+}
+
+static bool nfp_mutex_is_locked(u32 val)
+{
+       return (val & 0xffff) == 0x000f;
+}
+
+static bool nfp_mutex_is_unlocked(u32 val)
+{
+       return (val & 0xffff) == 0000;
+}
+
+/* If you need more than 65536 recursive locks, please rethink your code. */
+#define MUTEX_DEPTH_MAX         0xffff
+
+static int
+nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address)
+{
+       /* Not permitted on invalid interfaces */
+       if (NFP_CPP_INTERFACE_TYPE_of(interface) ==
+           NFP_CPP_INTERFACE_TYPE_INVALID)
+               return -EINVAL;
+
+       /* Address must be 64-bit aligned */
+       if (address & 7)
+               return -EINVAL;
+
+       if (*target != NFP_CPP_TARGET_MU)
+               return -EINVAL;
+
+       return 0;
+}
+
+/**
+ * nfp_cpp_mutex_init() - Initialize a mutex location
+ * @cpp:       NFP CPP handle
+ * @target:    NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
+ * @address:   Offset into the address space of the NFP CPP target ID
+ * @key:       Unique 32-bit value for this mutex
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * will initialize 64 bits of data at the location.
+ *
+ * This creates the initial mutex state, as locked by this
+ * nfp_cpp_interface().
+ *
+ * This function should only be called when setting up
+ * the initial lock state upon boot-up of the system.
+ *
+ * Return: 0 on success, or -errno on failure
+ */
+int nfp_cpp_mutex_init(struct nfp_cpp *cpp,
+                      int target, unsigned long long address, u32 key)
+{
+       const u32 muw = NFP_CPP_ID(target, 4, 0);    /* atomic_write */
+       u16 interface = nfp_cpp_interface(cpp);
+       int err;
+
+       err = nfp_cpp_mutex_validate(interface, &target, address);
+       if (err)
+               return err;
+
+       err = nfp_cpp_writel(cpp, muw, address + 4, key);
+       if (err)
+               return err;
+
+       err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface));
+       if (err)
+               return err;
+
+       return 0;
+}
+
+/**
+ * nfp_cpp_mutex_alloc() - Create a mutex handle
+ * @cpp:       NFP CPP handle
+ * @target:    NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU)
+ * @address:   Offset into the address space of the NFP CPP target ID
+ * @key:       32-bit unique key (must match the key at this location)
+ *
+ * The CPP target:address must point to a 64-bit aligned location, and
+ * reserve 64 bits of data at the location for use by the handle.
+ *
+ * Only target/address pairs that point to entities that support the
+ * MU Atomic Engine's CmpAndSwap32 command are supported.
+ *
+ * Return:     A non-NULL struct nfp_cpp_mutex * on success, NULL on failure.
+ */
+struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target,
+                                         unsigned long long address, u32 key)
+{
+       const u32 mur = NFP_CPP_ID(target, 3, 0);    /* atomic_read */
+       u16 interface = nfp_cpp_interface(cpp);
+       struct nfp_cpp_mutex *mutex;
+       int err;
+       u32 tmp;
+
+       err = nfp_cpp_mutex_validate(interface, &target, address);
+       if (err)
+               return NULL;
+
+       /* Look for mutex on cache list */
+       list_for_each_entry(mutex, &cpp->mutex_cache, list) {
+               if (mutex->target == target && mutex->address == address) {
+                       mutex->usage++;
+                       return mutex;
+               }
+       }
+
+       err = nfp_cpp_readl(cpp, mur, address + 4, &tmp);
+       if (err < 0)
+               return NULL;
+
+       if (tmp != key)
+               return NULL;
+
+       mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
+       if (!mutex)
+               return NULL;
+
+       mutex->cpp = cpp;
+       mutex->target = target;
+       mutex->address = address;
+       mutex->key = key;
+       mutex->depth = 0;
+       mutex->usage = 1;
+
+       /* Add mutex to cache list */
+       list_add(&mutex->list, &cpp->mutex_cache);
+
+       return mutex;
+}
+
+/**
+ * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state
+ * @mutex:     NFP CPP Mutex handle
+ */
+void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex)
+{
+       if (--mutex->usage)
+               return;
+
+       /* Remove mutex from cache */
+       list_del(&mutex->list);
+       kfree(mutex);
+}
+
+/**
+ * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine
+ * @mutex:     NFP CPP Mutex handle
+ *
+ * Return: 0 on success, or -errno on failure
+ */
+int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex)
+{
+       unsigned long warn_at = jiffies + 15 * HZ;
+       unsigned int timeout_ms = 1;
+       int err;
+
+       /* We can't use a waitqueue here, because the unlocker
+        * might be on a separate CPU.
+        *
+        * So just wait for now.
+        */
+       for (;;) {
+               err = nfp_cpp_mutex_trylock(mutex);
+               if (err != -EBUSY)
+                       break;
+
+               err = msleep_interruptible(timeout_ms);
+               if (err != 0)
+                       return -ERESTARTSYS;
+
+               if (time_is_before_eq_jiffies(warn_at)) {
+                       warn_at = jiffies + 60 * HZ;
+                       dev_warn(mutex->cpp->dev.parent,
+                                "Warning: waiting for NFP mutex [usage:%hd depth:%hd target:%d addr:%llx key:%08x]\n",
+                                mutex->usage, mutex->depth,
+                                mutex->target, mutex->address, mutex->key);
+               }
+       }
+
+       return err;
+}
+
+/**
+ * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine
+ * @mutex:     NFP CPP Mutex handle
+ *
+ * Return: 0 on success, or -errno on failure
+ */
+int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex)
+{
+       const u32 muw = NFP_CPP_ID(mutex->target, 4, 0);    /* atomic_write */
+       const u32 mur = NFP_CPP_ID(mutex->target, 3, 0);    /* atomic_read */
+       struct nfp_cpp *cpp = mutex->cpp;
+       u32 key, value;
+       u16 interface;
+       int err;
+
+       interface = nfp_cpp_interface(cpp);
+
+       if (mutex->depth > 1) {
+               mutex->depth--;
+               return 0;
+       }
+
+       err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key);
+       if (err < 0)
+               return err;
+
+       if (key != mutex->key)
+               return -EPERM;
+
+       err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value);
+       if (err < 0)
+               return err;
+
+       if (value != nfp_mutex_locked(interface))
+               return -EACCES;
+
+       err = nfp_cpp_writel(cpp, muw, mutex->address,
+                            nfp_mutex_unlocked(interface));
+       if (err < 0)
+               return err;
+
+       mutex->depth = 0;
+       return 0;
+}
+
+/**
+ * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle
+ * @mutex:     NFP CPP Mutex handle
+ *
+ * Return:      0 if the lock succeeded, -errno on failure
+ */
+int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex)
+{
+       const u32 muw = NFP_CPP_ID(mutex->target, 4, 0);    /* atomic_write */
+       const u32 mus = NFP_CPP_ID(mutex->target, 5, 3);    /* test_set_imm */
+       const u32 mur = NFP_CPP_ID(mutex->target, 3, 0);    /* atomic_read */
+       struct nfp_cpp *cpp = mutex->cpp;
+       u32 key, value, tmp;
+       int err;
+
+       if (mutex->depth > 0) {
+               if (mutex->depth == MUTEX_DEPTH_MAX)
+                       return -E2BIG;
+               mutex->depth++;
+               return 0;
+       }
+
+       /* Verify that the lock marker is not damaged */
+       err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key);
+       if (err < 0)
+               return err;
+
+       if (key != mutex->key)
+               return -EPERM;
+
+       /* Compare against the unlocked state, and if true,
+        * write the interface id into the top 16 bits, and
+        * mark as locked.
+        */
+       value = nfp_mutex_locked(nfp_cpp_interface(cpp));
+
+       /* We use test_set_imm here, as it implies a read
+        * of the current state, and sets the bits in the
+        * bytemask of the command to 1s. Since the mutex
+        * is guaranteed to be 64-bit aligned, the bytemask
+        * of this 32-bit command is ensured to be 8'b00001111,
+        * which implies that the lower 4 bits will be set to
+        * ones regardless of the initial state.
+        *
+        * Since this is a 'Readback' operation, with no Pull
+        * data, we can treat this as a normal Push (read)
+        * atomic, which returns the original value.
+        */
+       err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp);
+       if (err < 0)
+               return err;
+
+       /* Was it unlocked? */
+       if (nfp_mutex_is_unlocked(tmp)) {
+               /* The read value can only be 0x....0000 in the unlocked state.
+                * If there was another contending for this lock, then
+                * the lock state would be 0x....000f
+                */
+
+               /* Write our owner ID into the lock
+                * While not strictly necessary, this helps with
+                * debug and bookkeeping.
+                */
+               err = nfp_cpp_writel(cpp, muw, mutex->address, value);
+               if (err < 0)
+                       return err;
+
+               mutex->depth = 1;
+               return 0;
+       }
+
+       /* Already locked by us? Success! */
+       if (tmp == value) {
+               mutex->depth = 1;
+               return 0;
+       }
+
+       return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c
new file mode 100644 (file)
index 0000000..0ba0379
--- /dev/null
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_cpplib.c
+ * Library of functions to access the NFP's CPP bus
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ *          Jason McMullan <jason.mcmullan@netronome.com>
+ *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+#include "nfp6000/nfp_xpb.h"
+
+/* NFP6000 PL */
+#define NFP_PL_DEVICE_ID                       0x00000004
+#define   NFP_PL_DEVICE_ID_MASK                        GENMASK(7, 0)
+
+#define NFP6000_ARM_GCSR_SOFTMODEL0            0x00400144
+
+/**
+ * nfp_cpp_readl() - Read a u32 word from a CPP location
+ * @cpp:       CPP device handle
+ * @cpp_id:    CPP ID for operation
+ * @address:   Address for operation
+ * @value:     Pointer to read buffer
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id,
+                 unsigned long long address, u32 *value)
+{
+       u8 tmp[4];
+       int err;
+
+       err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
+       *value = get_unaligned_le32(tmp);
+
+       return err;
+}
+
+/**
+ * nfp_cpp_writel() - Write a u32 word to a CPP location
+ * @cpp:       CPP device handle
+ * @cpp_id:    CPP ID for operation
+ * @address:   Address for operation
+ * @value:     Value to write
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id,
+                  unsigned long long address, u32 value)
+{
+       u8 tmp[4];
+
+       put_unaligned_le32(value, tmp);
+       return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+}
+
+/**
+ * nfp_cpp_readq() - Read a u64 word from a CPP location
+ * @cpp:       CPP device handle
+ * @cpp_id:    CPP ID for operation
+ * @address:   Address for operation
+ * @value:     Pointer to read buffer
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id,
+                 unsigned long long address, u64 *value)
+{
+       u8 tmp[8];
+       int err;
+
+       err = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp));
+       *value = get_unaligned_le64(tmp);
+
+       return err;
+}
+
+/**
+ * nfp_cpp_writeq() - Write a u64 word to a CPP location
+ * @cpp:       CPP device handle
+ * @cpp_id:    CPP ID for operation
+ * @address:   Address for operation
+ * @value:     Value to write
+ *
+ * Return: length of the io, or -ERRNO
+ */
+int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id,
+                  unsigned long long address, u64 value)
+{
+       u8 tmp[8];
+
+       put_unaligned_le64(value, tmp);
+       return nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp));
+}
+
+/* NOTE: This code should not use nfp_xpb_* functions,
+ * as those are model-specific
+ */
+int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model)
+{
+       const u32 arm_id = NFP_CPP_ID(NFP_CPP_TARGET_ARM, 0, 0);
+       u32 reg;
+       int err;
+
+       err = nfp_cpp_readl(cpp, arm_id, NFP6000_ARM_GCSR_SOFTMODEL0, model);
+       if (err < 0)
+               return err;
+
+       /* The PL's PluDeviceID revision code is authoratative */
+       *model &= ~0xff;
+       err = nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + NFP_PL_DEVICE_ID,
+                           &reg);
+       if (err < 0)
+               return err;
+
+       *model |= (NFP_PL_DEVICE_ID_MASK & reg) - 0x10;
+
+       return 0;
+}
+
+static u8 nfp_bytemask(int width, u64 addr)
+{
+       if (width == 8)
+               return 0xff;
+       else if (width == 4)
+               return 0x0f << (addr & 4);
+       else if (width == 2)
+               return 0x03 << (addr & 6);
+       else if (width == 1)
+               return 0x01 << (addr & 7);
+       else
+               return 0;
+}
+
+int nfp_cpp_explicit_read(struct nfp_cpp *cpp, u32 cpp_id,
+                         u64 addr, void *buff, size_t len, int width_read)
+{
+       struct nfp_cpp_explicit *expl;
+       char *tmp = buff;
+       int err, i, incr;
+       u8 byte_mask;
+
+       if (len & (width_read - 1))
+               return -EINVAL;
+
+       expl = nfp_cpp_explicit_acquire(cpp);
+       if (!expl)
+               return -EBUSY;
+
+       incr = min_t(int, 16 * width_read, 128);
+       incr = min_t(int, incr, len);
+
+       /* Translate a NFP_CPP_ACTION_RW to action 0 */
+       if (NFP_CPP_ID_ACTION_of(cpp_id) == NFP_CPP_ACTION_RW)
+               cpp_id = NFP_CPP_ID(NFP_CPP_ID_TARGET_of(cpp_id), 0,
+                                   NFP_CPP_ID_TOKEN_of(cpp_id));
+
+       byte_mask = nfp_bytemask(width_read, addr);
+
+       nfp_cpp_explicit_set_target(expl, cpp_id,
+                                   incr / width_read - 1, byte_mask);
+       nfp_cpp_explicit_set_posted(expl, 1, 0, NFP_SIGNAL_PUSH,
+                                   0, NFP_SIGNAL_NONE);
+
+       for (i = 0; i < len; i += incr, addr += incr, tmp += incr) {
+               if (i + incr > len) {
+                       incr = len - i;
+                       nfp_cpp_explicit_set_target(expl, cpp_id,
+                                                   incr / width_read - 1,
+                                                   0xff);
+               }
+
+               err = nfp_cpp_explicit_do(expl, addr);
+               if (err < 0)
+                       goto exit_release;
+
+               err = nfp_cpp_explicit_get(expl, tmp, incr);
+               if (err < 0)
+                       goto exit_release;
+       }
+       err = len;
+exit_release:
+       nfp_cpp_explicit_release(expl);
+
+       return err;
+}
+
+int nfp_cpp_explicit_write(struct nfp_cpp *cpp, u32 cpp_id, u64 addr,
+                          const void *buff, size_t len, int width_write)
+{
+       struct nfp_cpp_explicit *expl;
+       const char *tmp = buff;
+       int err, i, incr;
+       u8 byte_mask;
+
+       if (len & (width_write - 1))
+               return -EINVAL;
+
+       expl = nfp_cpp_explicit_acquire(cpp);
+       if (!expl)
+               return -EBUSY;
+
+       incr = min_t(int, 16 * width_write, 128);
+       incr = min_t(int, incr, len);
+
+       /* Translate a NFP_CPP_ACTION_RW to action 1 */
+       if (NFP_CPP_ID_ACTION_of(cpp_id) == NFP_CPP_ACTION_RW)
+               cpp_id = NFP_CPP_ID(NFP_CPP_ID_TARGET_of(cpp_id), 1,
+                                   NFP_CPP_ID_TOKEN_of(cpp_id));
+
+       byte_mask = nfp_bytemask(width_write, addr);
+
+       nfp_cpp_explicit_set_target(expl, cpp_id,
+                                   incr / width_write - 1, byte_mask);
+       nfp_cpp_explicit_set_posted(expl, 1, 0, NFP_SIGNAL_PULL,
+                                   0, NFP_SIGNAL_NONE);
+
+       for (i = 0; i < len; i += incr, addr += incr, tmp += incr) {
+               if (i + incr > len) {
+                       incr = len - i;
+                       nfp_cpp_explicit_set_target(expl, cpp_id,
+                                                   incr / width_write - 1,
+                                                   0xff);
+               }
+
+               err = nfp_cpp_explicit_put(expl, tmp, incr);
+               if (err < 0)
+                       goto exit_release;
+
+               err = nfp_cpp_explicit_do(expl, addr);
+               if (err < 0)
+                       goto exit_release;
+       }
+       err = len;
+exit_release:
+       nfp_cpp_explicit_release(expl);
+
+       return err;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c
new file mode 100644 (file)
index 0000000..8d8f311
--- /dev/null
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM
+ * after chip reset.
+ *
+ * Examples of the fields:
+ *   me.count = 40
+ *   me.mask = 0x7f_ffff_ffff
+ *
+ *   me.count is the total number of MEs on the system.
+ *   me.mask is the bitmask of MEs that are available for application usage.
+ *
+ *   (ie, in this example, ME 39 has been reserved by boardconfig.)
+ */
+
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define NFP_SUBSYS "nfp_hwinfo"
+
+#include "crc32.h"
+#include "nfp.h"
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+
+#define HWINFO_SIZE_MIN        0x100
+#define HWINFO_WAIT    20      /* seconds */
+
+/* The Hardware Info Table defines the properties of the system.
+ *
+ * HWInfo v1 Table (fixed size)
+ *
+ * 0x0000: u32 version         Hardware Info Table version (1.0)
+ * 0x0004: u32 size            Total size of the table, including
+ *                             the CRC32 (IEEE 802.3)
+ * 0x0008: u32 jumptab         Offset of key/value table
+ * 0x000c: u32 keys            Total number of keys in the key/value table
+ * NNNNNN:                     Key/value jump table and string data
+ * (size - 4): u32 crc32       CRC32 (same as IEEE 802.3, POSIX csum, etc)
+ *                             CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE
+ *
+ * HWInfo v2 Table (variable size)
+ *
+ * 0x0000: u32 version         Hardware Info Table version (2.0)
+ * 0x0004: u32 size            Current size of the data area, excluding CRC32
+ * 0x0008: u32 limit           Maximum size of the table
+ * 0x000c: u32 reserved                Unused, set to zero
+ * NNNNNN:                     Key/value data
+ * (size - 4): u32 crc32       CRC32 (same as IEEE 802.3, POSIX csum, etc)
+ *                             CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE
+ *
+ * If the HWInfo table is in the process of being updated, the low bit
+ * of version will be set.
+ *
+ * HWInfo v1 Key/Value Table
+ * -------------------------
+ *
+ *  The key/value table is a set of offsets to ASCIIZ strings which have
+ *  been strcmp(3) sorted (yes, please use bsearch(3) on the table).
+ *
+ *  All keys are guaranteed to be unique.
+ *
+ * N+0:        u32 key_1               Offset to the first key
+ * N+4:        u32 val_1               Offset to the first value
+ * N+8: u32 key_2              Offset to the second key
+ * N+c: u32 val_2              Offset to the second value
+ * ...
+ *
+ * HWInfo v2 Key/Value Table
+ * -------------------------
+ *
+ * Packed UTF8Z strings, ie 'key1\000value1\000key2\000value2\000'
+ *
+ * Unsorted.
+ */
+
+#define NFP_HWINFO_VERSION_1 ('H' << 24 | 'I' << 16 | 1 << 8 | 0 << 1 | 0)
+#define NFP_HWINFO_VERSION_2 ('H' << 24 | 'I' << 16 | 2 << 8 | 0 << 1 | 0)
+#define NFP_HWINFO_VERSION_UPDATING    BIT(0)
+
+struct nfp_hwinfo {
+       u8 start[0];
+
+       __le32 version;
+       __le32 size;
+
+       /* v2 specific fields */
+       __le32 limit;
+       __le32 resv;
+
+       char data[];
+};
+
+static bool nfp_hwinfo_is_updating(struct nfp_hwinfo *hwinfo)
+{
+       return le32_to_cpu(hwinfo->version) & NFP_HWINFO_VERSION_UPDATING;
+}
+
+static int
+hwinfo_db_walk(struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo, u32 size)
+{
+       const char *key, *val, *end = hwinfo->data + size;
+
+       for (key = hwinfo->data; *key && key < end;
+            key = val + strlen(val) + 1) {
+
+               val = key + strlen(key) + 1;
+               if (val >= end) {
+                       nfp_warn(cpp, "Bad HWINFO - overflowing key\n");
+                       return -EINVAL;
+               }
+
+               if (val + strlen(val) + 1 > end) {
+                       nfp_warn(cpp, "Bad HWINFO - overflowing value\n");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int
+hwinfo_db_validate(struct nfp_cpp *cpp, struct nfp_hwinfo *db, u32 len)
+{
+       u32 size, crc;
+
+       size = le32_to_cpu(db->size);
+       if (size > len) {
+               nfp_err(cpp, "Unsupported hwinfo size %u > %u\n", size, len);
+               return -EINVAL;
+       }
+
+       size -= sizeof(u32);
+       crc = crc32_posix(db, size);
+       if (crc != get_unaligned_le32(db->start + size)) {
+               nfp_err(cpp, "Corrupt hwinfo table (CRC mismatch), calculated 0x%x, expected 0x%x\n",
+                       crc, get_unaligned_le32(db->start + size));
+
+               return -EINVAL;
+       }
+
+       return hwinfo_db_walk(cpp, db, size);
+}
+
+static int hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size)
+{
+       struct nfp_hwinfo *header;
+       struct nfp_resource *res;
+       u64 cpp_addr;
+       u32 cpp_id;
+       int err;
+       u8 *db;
+
+       res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO);
+       if (!IS_ERR(res)) {
+               cpp_id = nfp_resource_cpp_id(res);
+               cpp_addr = nfp_resource_address(res);
+               *cpp_size = nfp_resource_size(res);
+
+               nfp_resource_release(res);
+
+               if (*cpp_size < HWINFO_SIZE_MIN)
+                       return -ENOENT;
+       } else if (PTR_ERR(res) == -ENOENT) {
+               /* Try getting the HWInfo table from the 'classic' location */
+               cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU,
+                                          NFP_CPP_ACTION_RW, 0, 1);
+               cpp_addr = 0x30000;
+               *cpp_size = 0x0e000;
+       } else {
+               return PTR_ERR(res);
+       }
+
+       db = kmalloc(*cpp_size + 1, GFP_KERNEL);
+       if (!db)
+               return -ENOMEM;
+
+       err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size);
+       if (err != *cpp_size) {
+               kfree(db);
+               return err < 0 ? err : -EIO;
+       }
+
+       header = (void *)db;
+       if (nfp_hwinfo_is_updating(header)) {
+               kfree(db);
+               return -EBUSY;
+       }
+
+       if (le32_to_cpu(header->version) != NFP_HWINFO_VERSION_2) {
+               nfp_err(cpp, "Unknown HWInfo version: 0x%08x\n",
+                       le32_to_cpu(header->version));
+               kfree(db);
+               return -EINVAL;
+       }
+
+       /* NULL-terminate for safety */
+       db[*cpp_size] = '\0';
+
+       nfp_hwinfo_cache_set(cpp, db);
+
+       return 0;
+}
+
+static int hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size)
+{
+       const unsigned long wait_until = jiffies + HWINFO_WAIT * HZ;
+       int err;
+
+       for (;;) {
+               const unsigned long start_time = jiffies;
+
+               err = hwinfo_try_fetch(cpp, hwdb_size);
+               if (!err)
+                       return 0;
+
+               err = msleep_interruptible(100);
+               if (err || time_after(start_time, wait_until)) {
+                       nfp_err(cpp, "NFP access error\n");
+                       return -EIO;
+               }
+       }
+}
+
+static int nfp_hwinfo_load(struct nfp_cpp *cpp)
+{
+       struct nfp_hwinfo *db;
+       size_t hwdb_size = 0;
+       int err;
+
+       err = hwinfo_fetch(cpp, &hwdb_size);
+       if (err)
+               return err;
+
+       db = nfp_hwinfo_cache(cpp);
+       err = hwinfo_db_validate(cpp, db, hwdb_size);
+       if (err) {
+               kfree(db);
+               nfp_hwinfo_cache_set(cpp, NULL);
+               return err;
+       }
+
+       return 0;
+}
+
+/**
+ * nfp_hwinfo_lookup() - Find a value in the HWInfo table by name
+ * @cpp:       NFP CPP handle
+ * @lookup:    HWInfo name to search for
+ *
+ * Return: Value of the HWInfo name, or NULL
+ */
+const char *nfp_hwinfo_lookup(struct nfp_cpp *cpp, const char *lookup)
+{
+       const char *key, *val, *end;
+       struct nfp_hwinfo *hwinfo;
+       int err;
+
+       hwinfo = nfp_hwinfo_cache(cpp);
+       if (!hwinfo) {
+               err = nfp_hwinfo_load(cpp);
+               if (err)
+                       return NULL;
+               hwinfo = nfp_hwinfo_cache(cpp);
+       }
+
+       if (!hwinfo || !lookup)
+               return NULL;
+
+       end = hwinfo->data + le32_to_cpu(hwinfo->size) - sizeof(u32);
+
+       for (key = hwinfo->data; *key && key < end;
+            key = val + strlen(val) + 1) {
+
+               val = key + strlen(key) + 1;
+
+               if (strcmp(key, lookup) == 0)
+                       return val;
+       }
+
+       return NULL;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c
new file mode 100644 (file)
index 0000000..3d15dd0
--- /dev/null
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_mip.c
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ *          Jason McMullan <jason.mcmullan@netronome.com>
+ *          Espen Skoglund <espen.skoglund@netronome.com>
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "nfp.h"
+#include "nfp_cpp.h"
+#include "nfp_nffw.h"
+
+#define NFP_MIP_SIGNATURE      cpu_to_le32(0x0050494d)  /* "MIP\0" */
+#define NFP_MIP_VERSION                cpu_to_le32(1)
+#define NFP_MIP_MAX_OFFSET     (256 * 1024)
+
+struct nfp_mip {
+       __le32 signature;
+       __le32 mip_version;
+       __le32 mip_size;
+       __le32 first_entry;
+
+       __le32 version;
+       __le32 buildnum;
+       __le32 buildtime;
+       __le32 loadtime;
+
+       __le32 symtab_addr;
+       __le32 symtab_size;
+       __le32 strtab_addr;
+       __le32 strtab_size;
+
+       char name[16];
+       char toolchain[32];
+};
+
+/* Read memory and check if it could be a valid MIP */
+static int
+nfp_mip_try_read(struct nfp_cpp *cpp, u32 cpp_id, u64 addr, struct nfp_mip *mip)
+{
+       int ret;
+
+       ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip));
+       if (ret != sizeof(*mip)) {
+               nfp_err(cpp, "Failed to read MIP data (%d, %zu)\n",
+                       ret, sizeof(*mip));
+               return -EIO;
+       }
+       if (mip->signature != NFP_MIP_SIGNATURE) {
+               nfp_warn(cpp, "Incorrect MIP signature (0x%08x)\n",
+                        le32_to_cpu(mip->signature));
+               return -EINVAL;
+       }
+       if (mip->mip_version != NFP_MIP_VERSION) {
+               nfp_warn(cpp, "Unsupported MIP version (%d)\n",
+                        le32_to_cpu(mip->mip_version));
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/* Try to locate MIP using the resource table */
+static int nfp_mip_read_resource(struct nfp_cpp *cpp, struct nfp_mip *mip)
+{
+       struct nfp_nffw_info *nffw_info;
+       u32 cpp_id;
+       u64 addr;
+       int err;
+
+       nffw_info = nfp_nffw_info_open(cpp);
+       if (IS_ERR(nffw_info))
+               return PTR_ERR(nffw_info);
+
+       err = nfp_nffw_info_mip_first(nffw_info, &cpp_id, &addr);
+       if (err)
+               goto exit_close_nffw;
+
+       err = nfp_mip_try_read(cpp, cpp_id, addr, mip);
+exit_close_nffw:
+       nfp_nffw_info_close(nffw_info);
+       return err;
+}
+
+/**
+ * nfp_mip_open() - Get device MIP structure
+ * @cpp:       NFP CPP Handle
+ *
+ * Copy MIP structure from NFP device and return it.  The returned
+ * structure is handled internally by the library and should be
+ * freed by calling nfp_mip_close().
+ *
+ * Return: pointer to mip, NULL on failure.
+ */
+const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp)
+{
+       struct nfp_mip *mip;
+       int err;
+
+       mip = kmalloc(sizeof(*mip), GFP_KERNEL);
+       if (!mip)
+               return NULL;
+
+       err = nfp_mip_read_resource(cpp, mip);
+       if (err) {
+               kfree(mip);
+               return NULL;
+       }
+
+       return mip;
+}
+
+void nfp_mip_close(const struct nfp_mip *mip)
+{
+       kfree(mip);
+}
+
+/**
+ * nfp_mip_symtab() - Get the address and size of the MIP symbol table
+ * @mip:       MIP handle
+ * @addr:      Location for NFP DDR address of MIP symbol table
+ * @size:      Location for size of MIP symbol table
+ */
+void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size)
+{
+       *addr = le32_to_cpu(mip->symtab_addr);
+       *size = le32_to_cpu(mip->symtab_size);
+}
+
+/**
+ * nfp_mip_strtab() - Get the address and size of the MIP symbol name table
+ * @mip:       MIP handle
+ * @addr:      Location for NFP DDR address of MIP symbol name table
+ * @size:      Location for size of MIP symbol name table
+ */
+void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size)
+{
+       *addr = le32_to_cpu(mip->strtab_addr);
+       *size = le32_to_cpu(mip->strtab_size);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
new file mode 100644 (file)
index 0000000..cd34097
--- /dev/null
@@ -0,0 +1,323 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_nffw.c
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ *          Jason McMullan <jason.mcmullan@netronome.com>
+ *          Francois H. Theron <francois.theron@netronome.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "nfp.h"
+#include "nfp_cpp.h"
+#include "nfp_nffw.h"
+#include "nfp6000/nfp6000.h"
+
+/* Init-CSR owner IDs for firmware map to firmware IDs which start at 4.
+ * Lower IDs are reserved for target and loader IDs.
+ */
+#define NFFW_FWID_EXT   3 /* For active MEs that we didn't load. */
+#define NFFW_FWID_BASE  4
+
+#define NFFW_FWID_ALL   255
+
+/**
+ * NFFW_INFO_VERSION history:
+ * 0: This was never actually used (before versioning), but it refers to
+ *    the previous struct which had FWINFO_CNT = MEINFO_CNT = 120 that later
+ *    changed to 200.
+ * 1: First versioned struct, with
+ *     FWINFO_CNT = 120
+ *     MEINFO_CNT = 120
+ * 2:  FWINFO_CNT = 200
+ *     MEINFO_CNT = 200
+ */
+#define NFFW_INFO_VERSION_CURRENT 2
+
+/* Enough for all current chip families */
+#define NFFW_MEINFO_CNT_V1 120
+#define NFFW_FWINFO_CNT_V1 120
+#define NFFW_MEINFO_CNT_V2 200
+#define NFFW_FWINFO_CNT_V2 200
+
+/* Work in 32-bit words to make cross-platform endianness easier to handle */
+
+/** nfp.nffw meinfo **/
+struct nffw_meinfo {
+       __le32 ctxmask__fwid__meid;
+};
+
+struct nffw_fwinfo {
+       __le32 loaded__mu_da__mip_off_hi;
+       __le32 mip_cppid; /* 0 means no MIP */
+       __le32 mip_offset_lo;
+};
+
+struct nfp_nffw_info_v1 {
+       struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V1];
+       struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V1];
+};
+
+struct nfp_nffw_info_v2 {
+       struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V2];
+       struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V2];
+};
+
+/** Resource: nfp.nffw main **/
+struct nfp_nffw_info_data {
+       __le32 flags[2];
+       union {
+               struct nfp_nffw_info_v1 v1;
+               struct nfp_nffw_info_v2 v2;
+       } info;
+};
+
+struct nfp_nffw_info {
+       struct nfp_cpp *cpp;
+       struct nfp_resource *res;
+
+       struct nfp_nffw_info_data fwinf;
+};
+
+/* flg_info_version = flags[0]<27:16>
+ * This is a small version counter intended only to detect if the current
+ * implementation can read the current struct. Struct changes should be very
+ * rare and as such a 12-bit counter should cover large spans of time. By the
+ * time it wraps around, we don't expect to have 4096 versions of this struct
+ * to be in use at the same time.
+ */
+static u32 nffw_res_info_version_get(const struct nfp_nffw_info_data *res)
+{
+       return (le32_to_cpu(res->flags[0]) >> 16) & 0xfff;
+}
+
+/* flg_init = flags[0]<0> */
+static u32 nffw_res_flg_init_get(const struct nfp_nffw_info_data *res)
+{
+       return (le32_to_cpu(res->flags[0]) >> 0) & 1;
+}
+
+/* loaded = loaded__mu_da__mip_off_hi<31:31> */
+static u32 nffw_fwinfo_loaded_get(const struct nffw_fwinfo *fi)
+{
+       return (le32_to_cpu(fi->loaded__mu_da__mip_off_hi) >> 31) & 1;
+}
+
+/* mip_cppid = mip_cppid */
+static u32 nffw_fwinfo_mip_cppid_get(const struct nffw_fwinfo *fi)
+{
+       return le32_to_cpu(fi->mip_cppid);
+}
+
+/* loaded = loaded__mu_da__mip_off_hi<8:8> */
+static u32 nffw_fwinfo_mip_mu_da_get(const struct nffw_fwinfo *fi)
+{
+       return (le32_to_cpu(fi->loaded__mu_da__mip_off_hi) >> 8) & 1;
+}
+
+/* mip_offset = (loaded__mu_da__mip_off_hi<7:0> << 8) | mip_offset_lo */
+static u64 nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi)
+{
+       u64 mip_off_hi = le32_to_cpu(fi->loaded__mu_da__mip_off_hi);
+
+       return (mip_off_hi & 0xFF) << 32 | le32_to_cpu(fi->mip_offset_lo);
+}
+
+#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x)          (((_x) >> 13) & 0x7)
+#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE             BIT(12)
+#define   NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT    0
+#define   NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT    BIT(12)
+
+static int nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp)
+{
+       unsigned int mode, addr40;
+       u32 xpbaddr, imbcppat;
+       int err;
+
+       /* Hardcoded XPB IMB Base, island 0 */
+       xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4;
+       err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat);
+       if (err < 0)
+               return err;
+
+       mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat);
+       addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE);
+
+       return nfp_cppat_mu_locality_lsb(mode, addr40);
+}
+
+static unsigned int
+nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr)
+{
+       /* For the this code, version 0 is most likely to be
+        * version 1 in this case. Since the kernel driver
+        * does not take responsibility for initialising the
+        * nfp.nffw resource, any previous code (CA firmware or
+        * userspace) that left the version 0 and did set
+        * the init flag is going to be version 1.
+        */
+       switch (nffw_res_info_version_get(fwinf)) {
+       case 0:
+       case 1:
+               *arr = &fwinf->info.v1.fwinfo[0];
+               return NFFW_FWINFO_CNT_V1;
+       case 2:
+               *arr = &fwinf->info.v2.fwinfo[0];
+               return NFFW_FWINFO_CNT_V2;
+       default:
+               *arr = NULL;
+               return 0;
+       }
+}
+
+/**
+ * nfp_nffw_info_open() - Acquire the lock on the NFFW table
+ * @cpp:       NFP CPP handle
+ *
+ * Return: 0, or -ERRNO
+ */
+struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
+{
+       struct nfp_nffw_info_data *fwinf;
+       struct nfp_nffw_info *state;
+       u32 info_ver;
+       int err;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return ERR_PTR(-ENOMEM);
+
+       state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW);
+       if (IS_ERR(state->res))
+               goto err_free;
+
+       fwinf = &state->fwinf;
+
+       if (sizeof(*fwinf) > nfp_resource_size(state->res))
+               goto err_release;
+
+       err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
+                          nfp_resource_address(state->res),
+                          fwinf, sizeof(*fwinf));
+       if (err < sizeof(*fwinf))
+               goto err_release;
+
+       if (!nffw_res_flg_init_get(fwinf))
+               goto err_release;
+
+       info_ver = nffw_res_info_version_get(fwinf);
+       if (info_ver > NFFW_INFO_VERSION_CURRENT)
+               goto err_release;
+
+       state->cpp = cpp;
+       return state;
+
+err_release:
+       nfp_resource_release(state->res);
+err_free:
+       kfree(state);
+       return ERR_PTR(-EIO);
+}
+
+/**
+ * nfp_nffw_info_release() - Release the lock on the NFFW table
+ * @state:     NFP FW info state
+ *
+ * Return: 0, or -ERRNO
+ */
+void nfp_nffw_info_close(struct nfp_nffw_info *state)
+{
+       nfp_resource_release(state->res);
+       kfree(state);
+}
+
+/**
+ * nfp_nffw_info_fwid_first() - Return the first firmware ID in the NFFW
+ * @state:     NFP FW info state
+ *
+ * Return: First NFFW firmware info, NULL on failure
+ */
+static struct nffw_fwinfo *nfp_nffw_info_fwid_first(struct nfp_nffw_info *state)
+{
+       struct nffw_fwinfo *fwinfo;
+       unsigned int cnt, i;
+
+       cnt = nffw_res_fwinfos(&state->fwinf, &fwinfo);
+       if (!cnt)
+               return NULL;
+
+       for (i = 0; i < cnt; i++)
+               if (nffw_fwinfo_loaded_get(&fwinfo[i]))
+                       return &fwinfo[i];
+
+       return NULL;
+}
+
+/**
+ * nfp_nffw_info_mip_first() - Retrieve the location of the first FW's MIP
+ * @state:     NFP FW info state
+ * @cpp_id:    Pointer to the CPP ID of the MIP
+ * @off:       Pointer to the CPP Address of the MIP
+ *
+ * Return: 0, or -ERRNO
+ */
+int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off)
+{
+       struct nffw_fwinfo *fwinfo;
+
+       fwinfo = nfp_nffw_info_fwid_first(state);
+       if (!fwinfo)
+               return -EINVAL;
+
+       *cpp_id = nffw_fwinfo_mip_cppid_get(fwinfo);
+       *off = nffw_fwinfo_mip_offset_get(fwinfo);
+
+       if (nffw_fwinfo_mip_mu_da_get(fwinfo)) {
+               int locality_off;
+
+               if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU)
+                       return 0;
+
+               locality_off = nfp_mip_mu_locality_lsb(state->cpp);
+               if (locality_off < 0)
+                       return locality_off;
+
+               *off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off);
+               *off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off;
+       }
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h
new file mode 100644 (file)
index 0000000..988badd
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_nffw.h
+ * Authors: Jason McMullan <jason.mcmullan@netronome.com>
+ *          Francois H. Theron <francois.theron@netronome.com>
+ */
+
+#ifndef NFP_NFFW_H
+#define NFP_NFFW_H
+
+/* Implemented in nfp_nffw.c */
+
+struct nfp_nffw_info;
+
+struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp);
+void nfp_nffw_info_close(struct nfp_nffw_info *state);
+int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off);
+
+/* Implemented in nfp_mip.c */
+
+struct nfp_mip;
+
+const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp);
+void nfp_mip_close(const struct nfp_mip *mip);
+
+void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size);
+void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size);
+
+/* Implemented in nfp_rtsym.c */
+
+#define NFP_RTSYM_TYPE_NONE            0
+#define NFP_RTSYM_TYPE_OBJECT          1
+#define NFP_RTSYM_TYPE_FUNCTION                2
+#define NFP_RTSYM_TYPE_ABS             3
+
+#define NFP_RTSYM_TARGET_NONE          0
+#define NFP_RTSYM_TARGET_LMEM          -1
+#define NFP_RTSYM_TARGET_EMU_CACHE     -7
+
+/**
+ * struct nfp_rtsym - RTSYM descriptor
+ * @name:      Symbol name
+ * @addr:      Address in the domain/target's address space
+ * @size:      Size (in bytes) of the symbol
+ * @type:      NFP_RTSYM_TYPE_* of the symbol
+ * @target:    CPP Target identifier, or NFP_RTSYM_TARGET_*
+ * @domain:    CPP Target Domain (island)
+ */
+struct nfp_rtsym {
+       const char *name;
+       u64 addr;
+       u64 size;
+       int type;
+       int target;
+       int domain;
+};
+
+int nfp_rtsym_count(struct nfp_cpp *cpp);
+const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx);
+const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name);
+u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error);
+
+#endif /* NFP_NFFW_H */
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
new file mode 100644 (file)
index 0000000..f07f2fc
--- /dev/null
@@ -0,0 +1,417 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_nsp.c
+ * Author: Jakub Kicinski <jakub.kicinski@netronome.com>
+ *         Jason McMullan <jason.mcmullan@netronome.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#define NFP_SUBSYS "nfp_nsp"
+
+#include "nfp.h"
+#include "nfp_cpp.h"
+
+/* Offsets relative to the CSR base */
+#define NSP_STATUS             0x00
+#define   NSP_STATUS_MAGIC     GENMASK_ULL(63, 48)
+#define   NSP_STATUS_MAJOR     GENMASK_ULL(47, 44)
+#define   NSP_STATUS_MINOR     GENMASK_ULL(43, 32)
+#define   NSP_STATUS_CODE      GENMASK_ULL(31, 16)
+#define   NSP_STATUS_RESULT    GENMASK_ULL(15, 8)
+#define   NSP_STATUS_BUSY      BIT_ULL(0)
+
+#define NSP_COMMAND            0x08
+#define   NSP_COMMAND_OPTION   GENMASK_ULL(63, 32)
+#define   NSP_COMMAND_CODE     GENMASK_ULL(31, 16)
+#define   NSP_COMMAND_START    BIT_ULL(0)
+
+/* CPP address to retrieve the data from */
+#define NSP_BUFFER             0x10
+#define   NSP_BUFFER_CPP       GENMASK_ULL(63, 40)
+#define   NSP_BUFFER_PCIE      GENMASK_ULL(39, 38)
+#define   NSP_BUFFER_ADDRESS   GENMASK_ULL(37, 0)
+
+#define NSP_DFLT_BUFFER                0x18
+
+#define NSP_DFLT_BUFFER_CONFIG 0x20
+#define   NSP_DFLT_BUFFER_SIZE_MB      GENMASK_ULL(7, 0)
+
+#define NSP_MAGIC              0xab10
+#define NSP_MAJOR              0
+#define NSP_MINOR              (__MAX_SPCODE - 1)
+
+#define NSP_CODE_MAJOR         GENMASK(15, 12)
+#define NSP_CODE_MINOR         GENMASK(11, 0)
+
+enum nfp_nsp_cmd {
+       SPCODE_NOOP             = 0, /* No operation */
+       SPCODE_SOFT_RESET       = 1, /* Soft reset the NFP */
+       SPCODE_FW_DEFAULT       = 2, /* Load default (UNDI) FW */
+       SPCODE_PHY_INIT         = 3, /* Initialize the PHY */
+       SPCODE_MAC_INIT         = 4, /* Initialize the MAC */
+       SPCODE_PHY_RXADAPT      = 5, /* Re-run PHY RX Adaptation */
+       SPCODE_FW_LOAD          = 6, /* Load fw from buffer, len in option */
+       SPCODE_ETH_RESCAN       = 7, /* Rescan ETHs, write ETH_TABLE to buf */
+       SPCODE_ETH_CONTROL      = 8, /* Update media config from buffer */
+
+       __MAX_SPCODE,
+};
+
+struct nfp_nsp {
+       struct nfp_cpp *cpp;
+       struct nfp_resource *res;
+};
+
+static int nfp_nsp_check(struct nfp_nsp *state)
+{
+       struct nfp_cpp *cpp = state->cpp;
+       u64 nsp_status, reg;
+       u32 nsp_cpp;
+       int err;
+
+       nsp_cpp = nfp_resource_cpp_id(state->res);
+       nsp_status = nfp_resource_address(state->res) + NSP_STATUS;
+
+       err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, &reg);
+       if (err < 0)
+               return err;
+
+       if (FIELD_GET(NSP_STATUS_MAGIC, reg) != NSP_MAGIC) {
+               nfp_err(cpp, "Cannot detect NFP Service Processor\n");
+               return -ENODEV;
+       }
+
+       if (FIELD_GET(NSP_STATUS_MAJOR, reg) != NSP_MAJOR ||
+           FIELD_GET(NSP_STATUS_MINOR, reg) < NSP_MINOR) {
+               nfp_err(cpp, "Unsupported ABI %lld.%lld\n",
+                       FIELD_GET(NSP_STATUS_MAJOR, reg),
+                       FIELD_GET(NSP_STATUS_MINOR, reg));
+               return -EINVAL;
+       }
+
+       if (reg & NSP_STATUS_BUSY) {
+               nfp_err(cpp, "Service processor busy!\n");
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
+/**
+ * nfp_nsp_open() - Prepare for communication and lock the NSP resource.
+ * @cpp:       NFP CPP Handle
+ */
+struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp)
+{
+       struct nfp_resource *res;
+       struct nfp_nsp *state;
+       int err;
+
+       res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP);
+       if (IS_ERR(res))
+               return (void *)res;
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state) {
+               nfp_resource_release(res);
+               return ERR_PTR(-ENOMEM);
+       }
+       state->cpp = cpp;
+       state->res = res;
+
+       err = nfp_nsp_check(state);
+       if (err) {
+               nfp_nsp_close(state);
+               return ERR_PTR(err);
+       }
+
+       return state;
+}
+
+/**
+ * nfp_nsp_close() - Clean up and unlock the NSP resource.
+ * @state:     NFP SP state
+ */
+void nfp_nsp_close(struct nfp_nsp *state)
+{
+       nfp_resource_release(state->res);
+       kfree(state);
+}
+
+static int
+nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg,
+                u32 nsp_cpp, u64 addr, u64 mask, u64 val)
+{
+       const unsigned long wait_until = jiffies + 30 * HZ;
+       int err;
+
+       for (;;) {
+               const unsigned long start_time = jiffies;
+
+               err = nfp_cpp_readq(cpp, nsp_cpp, addr, reg);
+               if (err < 0)
+                       return err;
+
+               if ((*reg & mask) == val)
+                       return 0;
+
+               err = msleep_interruptible(100);
+               if (err)
+                       return err;
+
+               if (time_after(start_time, wait_until))
+                       return -ETIMEDOUT;
+       }
+}
+
+/**
+ * nfp_nsp_command() - Execute a command on the NFP Service Processor
+ * @state:     NFP SP state
+ * @code:      NFP SP Command Code
+ * @option:    NFP SP Command Argument
+ * @buff_cpp:  NFP SP Buffer CPP Address info
+ * @buff_addr: NFP SP Buffer Host address
+ *
+ * Return: 0 for success with no result
+ *
+ *      1..255 for NSP completion with a result code
+ *
+ *     -EAGAIN if the NSP is not yet present
+ *     -ENODEV if the NSP is not a supported model
+ *     -EBUSY if the NSP is stuck
+ *     -EINTR if interrupted while waiting for completion
+ *     -ETIMEDOUT if the NSP took longer than 30 seconds to complete
+ */
+static int nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option,
+                          u32 buff_cpp, u64 buff_addr)
+{
+       u64 reg, nsp_base, nsp_buffer, nsp_status, nsp_command;
+       struct nfp_cpp *cpp = state->cpp;
+       u32 nsp_cpp;
+       int err;
+
+       nsp_cpp = nfp_resource_cpp_id(state->res);
+       nsp_base = nfp_resource_address(state->res);
+       nsp_status = nsp_base + NSP_STATUS;
+       nsp_command = nsp_base + NSP_COMMAND;
+       nsp_buffer = nsp_base + NSP_BUFFER;
+
+       err = nfp_nsp_check(state);
+       if (err)
+               return err;
+
+       if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) ||
+           !FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) {
+               nfp_err(cpp, "Host buffer out of reach %08x %016llx\n",
+                       buff_cpp, buff_addr);
+               return -EINVAL;
+       }
+
+       err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer,
+                            FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) |
+                            FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr));
+       if (err < 0)
+               return err;
+
+       err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command,
+                            FIELD_PREP(NSP_COMMAND_OPTION, option) |
+                            FIELD_PREP(NSP_COMMAND_CODE, code) |
+                            FIELD_PREP(NSP_COMMAND_START, 1));
+       if (err < 0)
+               return err;
+
+       /* Wait for NSP_COMMAND_START to go to 0 */
+       err = nfp_nsp_wait_reg(cpp, &reg,
+                              nsp_cpp, nsp_command, NSP_COMMAND_START, 0);
+       if (err) {
+               nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n",
+                       err, code);
+               return err;
+       }
+
+       /* Wait for NSP_STATUS_BUSY to go to 0 */
+       err = nfp_nsp_wait_reg(cpp, &reg,
+                              nsp_cpp, nsp_status, NSP_STATUS_BUSY, 0);
+       if (err) {
+               nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n",
+                       err, code);
+               return err;
+       }
+
+       err = FIELD_GET(NSP_STATUS_RESULT, reg);
+       if (err) {
+               nfp_warn(cpp, "Result (error) code set: %d command: %d\n",
+                        -err, code);
+               return -err;
+       }
+
+       err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &reg);
+       if (err < 0)
+               return err;
+
+       return FIELD_GET(NSP_COMMAND_OPTION, reg);
+}
+
+static int nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option,
+                              const void *in_buf, unsigned int in_size,
+                              void *out_buf, unsigned int out_size)
+{
+       struct nfp_cpp *cpp = nsp->cpp;
+       unsigned int max_size;
+       u64 reg, cpp_buf;
+       int ret, err;
+       u32 cpp_id;
+
+       err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
+                           nfp_resource_address(nsp->res) + NSP_STATUS, &reg);
+       if (err < 0)
+               return err;
+
+       if (FIELD_GET(NSP_STATUS_MINOR, reg) < 13) {
+               nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %lld.%lld)\n",
+                       code, FIELD_GET(NSP_STATUS_MAJOR, reg),
+                       FIELD_GET(NSP_STATUS_MINOR, reg));
+               return -EOPNOTSUPP;
+       }
+
+       err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
+                           nfp_resource_address(nsp->res) +
+                           NSP_DFLT_BUFFER_CONFIG,
+                           &reg);
+       if (err < 0)
+               return err;
+
+       max_size = max(in_size, out_size);
+       if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) {
+               nfp_err(cpp, "NSP: default buffer too small for command 0x%04x (%llu < %u)\n",
+                       code, FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M,
+                       max_size);
+               return -EINVAL;
+       }
+
+       err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res),
+                           nfp_resource_address(nsp->res) +
+                           NSP_DFLT_BUFFER,
+                           &reg);
+       if (err < 0)
+               return err;
+
+       cpp_id = FIELD_GET(NSP_BUFFER_CPP, reg) << 8;
+       cpp_buf = FIELD_GET(NSP_BUFFER_ADDRESS, reg);
+
+       if (in_buf && in_size) {
+               err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size);
+               if (err < 0)
+                       return err;
+       }
+
+       ret = nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf);
+       if (ret < 0)
+               return ret;
+
+       if (out_buf && out_size) {
+               err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size);
+               if (err < 0)
+                       return err;
+       }
+
+       return ret;
+}
+
+int nfp_nsp_wait(struct nfp_nsp *state)
+{
+       const unsigned long wait_until = jiffies + 30 * HZ;
+       int err;
+
+       nfp_dbg(state->cpp, "Waiting for NSP to respond (30 sec max).\n");
+
+       for (;;) {
+               const unsigned long start_time = jiffies;
+
+               err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0);
+               if (err != -EAGAIN)
+                       break;
+
+               err = msleep_interruptible(100);
+               if (err)
+                       break;
+
+               if (time_after(start_time, wait_until)) {
+                       err = -ETIMEDOUT;
+                       break;
+               }
+       }
+       if (err)
+               nfp_err(state->cpp, "NSP failed to respond %d\n", err);
+
+       return err;
+}
+
+int nfp_nsp_device_soft_reset(struct nfp_nsp *state)
+{
+       int err;
+
+       err = nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0);
+
+       nfp_nffw_cache_flush(state->cpp);
+
+       return err;
+}
+
+int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw)
+{
+       return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, fw->size, fw->data,
+                                  fw->size, NULL, 0);
+}
+
+int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+       return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0,
+                                  buf, size);
+}
+
+int nfp_nsp_write_eth_table(struct nfp_nsp *state,
+                           const void *buf, unsigned int size)
+{
+       return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size,
+                                  NULL, 0);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
new file mode 100644 (file)
index 0000000..1ece1f8
--- /dev/null
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/* Authors: David Brunecz <david.brunecz@netronome.com>
+ *          Jakub Kicinski <jakub.kicinski@netronome.com>
+ *          Jason Mcmullan <jason.mcmullan@netronome.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "nfp.h"
+#include "nfp_nsp_eth.h"
+#include "nfp6000/nfp6000.h"
+
+#define NSP_ETH_NBI_PORT_COUNT         24
+#define NSP_ETH_MAX_COUNT              (2 * NSP_ETH_NBI_PORT_COUNT)
+#define NSP_ETH_TABLE_SIZE             (NSP_ETH_MAX_COUNT *            \
+                                        sizeof(struct eth_table_entry))
+
+#define NSP_ETH_PORT_LANES             GENMASK_ULL(3, 0)
+#define NSP_ETH_PORT_INDEX             GENMASK_ULL(15, 8)
+#define NSP_ETH_PORT_LABEL             GENMASK_ULL(53, 48)
+#define NSP_ETH_PORT_PHYLABEL          GENMASK_ULL(59, 54)
+
+#define NSP_ETH_PORT_LANES_MASK                cpu_to_le64(NSP_ETH_PORT_LANES)
+
+#define NSP_ETH_STATE_ENABLED          BIT_ULL(1)
+#define NSP_ETH_STATE_TX_ENABLED       BIT_ULL(2)
+#define NSP_ETH_STATE_RX_ENABLED       BIT_ULL(3)
+#define NSP_ETH_STATE_RATE             GENMASK_ULL(11, 8)
+
+#define NSP_ETH_CTRL_ENABLED           BIT_ULL(1)
+#define NSP_ETH_CTRL_TX_ENABLED                BIT_ULL(2)
+#define NSP_ETH_CTRL_RX_ENABLED                BIT_ULL(3)
+
+enum nfp_eth_rate {
+       RATE_INVALID = 0,
+       RATE_10M,
+       RATE_100M,
+       RATE_1G,
+       RATE_10G,
+       RATE_25G,
+};
+
+struct eth_table_entry {
+       __le64 port;
+       __le64 state;
+       u8 mac_addr[6];
+       u8 resv[2];
+       __le64 control;
+};
+
+static unsigned int nfp_eth_rate(enum nfp_eth_rate rate)
+{
+       unsigned int rate_xlate[] = {
+               [RATE_INVALID]          = 0,
+               [RATE_10M]              = SPEED_10,
+               [RATE_100M]             = SPEED_100,
+               [RATE_1G]               = SPEED_1000,
+               [RATE_10G]              = SPEED_10000,
+               [RATE_25G]              = SPEED_25000,
+       };
+
+       if (rate >= ARRAY_SIZE(rate_xlate))
+               return 0;
+
+       return rate_xlate[rate];
+}
+
+static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src)
+{
+       int i;
+
+       for (i = 0; i < ETH_ALEN; i++)
+               dst[ETH_ALEN - i - 1] = src[i];
+}
+
+static void
+nfp_eth_port_translate(const struct eth_table_entry *src, unsigned int index,
+                      struct nfp_eth_table_port *dst)
+{
+       unsigned int rate;
+       u64 port, state;
+
+       port = le64_to_cpu(src->port);
+       state = le64_to_cpu(src->state);
+
+       dst->eth_index = FIELD_GET(NSP_ETH_PORT_INDEX, port);
+       dst->index = index;
+       dst->nbi = index / NSP_ETH_NBI_PORT_COUNT;
+       dst->base = index % NSP_ETH_NBI_PORT_COUNT;
+       dst->lanes = FIELD_GET(NSP_ETH_PORT_LANES, port);
+
+       dst->enabled = FIELD_GET(NSP_ETH_STATE_ENABLED, state);
+       dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state);
+       dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state);
+
+       rate = nfp_eth_rate(FIELD_GET(NSP_ETH_STATE_RATE, state));
+       dst->speed = dst->lanes * rate;
+
+       nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr);
+
+       snprintf(dst->label, sizeof(dst->label) - 1, "%llu.%llu",
+                FIELD_GET(NSP_ETH_PORT_PHYLABEL, port),
+                FIELD_GET(NSP_ETH_PORT_LABEL, port));
+}
+
+/**
+ * nfp_eth_read_ports() - retrieve port information
+ * @cpp:       NFP CPP handle
+ *
+ * Read the port information from the device.  Returned structure should
+ * be freed with kfree() once no longer needed.
+ *
+ * Return: populated ETH table or NULL on error.
+ */
+struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp)
+{
+       struct nfp_eth_table *ret;
+       struct nfp_nsp *nsp;
+
+       nsp = nfp_nsp_open(cpp);
+       if (IS_ERR(nsp))
+               return NULL;
+
+       ret = __nfp_eth_read_ports(cpp, nsp);
+       nfp_nsp_close(nsp);
+
+       return ret;
+}
+
+struct nfp_eth_table *
+__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp)
+{
+       struct eth_table_entry *entries;
+       struct nfp_eth_table *table;
+       unsigned int cnt;
+       int i, j, ret;
+
+       entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL);
+       if (!entries)
+               return NULL;
+
+       ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+       if (ret < 0) {
+               nfp_err(cpp, "reading port table failed %d\n", ret);
+               kfree(entries);
+               return NULL;
+       }
+
+       /* Some versions of flash will give us 0 instead of port count */
+       cnt = ret;
+       if (!cnt) {
+               for (i = 0; i < NSP_ETH_MAX_COUNT; i++)
+                       if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
+                               cnt++;
+       }
+
+       table = kzalloc(sizeof(*table) +
+                       sizeof(struct nfp_eth_table_port) * cnt, GFP_KERNEL);
+       if (!table) {
+               kfree(entries);
+               return NULL;
+       }
+
+       table->count = cnt;
+       for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++)
+               if (entries[i].port & NSP_ETH_PORT_LANES_MASK)
+                       nfp_eth_port_translate(&entries[i], i,
+                                              &table->ports[j++]);
+
+       kfree(entries);
+
+       return table;
+}
+
+/**
+ * nfp_eth_set_mod_enable() - set PHY module enable control bit
+ * @cpp:       NFP CPP handle
+ * @idx:       NFP chip-wide port index
+ * @enable:    Desired state
+ *
+ * Enable or disable PHY module (this usually means setting the TX lanes
+ * disable bits).
+ *
+ * Return: 0 or -ERRNO.
+ */
+int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable)
+{
+       struct eth_table_entry *entries;
+       struct nfp_nsp *nsp;
+       u64 reg;
+       int ret;
+
+       entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL);
+       if (!entries)
+               return -ENOMEM;
+
+       nsp = nfp_nsp_open(cpp);
+       if (IS_ERR(nsp)) {
+               kfree(entries);
+               return PTR_ERR(nsp);
+       }
+
+       ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+       if (ret < 0) {
+               nfp_err(cpp, "reading port table failed %d\n", ret);
+               goto exit_close_nsp;
+       }
+
+       if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) {
+               nfp_warn(cpp, "trying to set port state on disabled port %d\n",
+                        idx);
+               ret = -EINVAL;
+               goto exit_close_nsp;
+       }
+
+       /* Check if we are already in requested state */
+       reg = le64_to_cpu(entries[idx].state);
+       if (enable == FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) {
+               ret = 0;
+               goto exit_close_nsp;
+       }
+
+       reg = le64_to_cpu(entries[idx].control);
+       reg &= ~NSP_ETH_CTRL_ENABLED;
+       reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable);
+       entries[idx].control = cpu_to_le64(reg);
+
+       ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE);
+exit_close_nsp:
+       nfp_nsp_close(nsp);
+       kfree(entries);
+
+       return ret < 0 ? ret : 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.h
new file mode 100644 (file)
index 0000000..edf703d
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef NSP_NSP_ETH_H
+#define NSP_NSP_ETH_H 1
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/**
+ * struct nfp_eth_table - ETH table information
+ * @count:     number of table entries
+ * @ports:     table of ports
+ *
+ * @eth_index: port index according to legacy ethX numbering
+ * @index:     chip-wide first channel index
+ * @nbi:       NBI index
+ * @base:      first channel index (within NBI)
+ * @lanes:     number of channels
+ * @speed:     interface speed (in Mbps)
+ * @mac_addr:  interface MAC address
+ * @label:     interface id string
+ * @enabled:   is enabled?
+ * @tx_enabled:        is TX enabled?
+ * @rx_enabled:        is RX enabled?
+ */
+struct nfp_eth_table {
+       unsigned int count;
+       struct nfp_eth_table_port {
+               unsigned int eth_index;
+               unsigned int index;
+               unsigned int nbi;
+               unsigned int base;
+               unsigned int lanes;
+               unsigned int speed;
+
+               u8 mac_addr[ETH_ALEN];
+               char label[8];
+
+               bool enabled;
+               bool tx_enabled;
+               bool rx_enabled;
+       } ports[0];
+};
+
+struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
+struct nfp_eth_table *
+__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp);
+int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable);
+
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
new file mode 100644 (file)
index 0000000..a285034
--- /dev/null
@@ -0,0 +1,279 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_resource.c
+ * Author: Jakub Kicinski <jakub.kicinski@netronome.com>
+ *         Jason McMullan <jason.mcmullan@netronome.com>
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "crc32.h"
+#include "nfp.h"
+#include "nfp_cpp.h"
+#include "nfp6000/nfp6000.h"
+
+#define NFP_RESOURCE_ENTRY_NAME_SZ     8
+
+/**
+ * struct nfp_resource_entry - Resource table entry
+ * @owner:             NFP CPP Lock, interface owner
+ * @key:               NFP CPP Lock, posix_crc32(name, 8)
+ * @region:            Memory region descriptor
+ * @name:              ASCII, zero padded name
+ * @reserved
+ * @cpp_action:                CPP Action
+ * @cpp_token:         CPP Token
+ * @cpp_target:                CPP Target ID
+ * @page_offset:       256-byte page offset into target's CPP address
+ * @page_size:         size, in 256-byte pages
+ */
+struct nfp_resource_entry {
+       struct nfp_resource_entry_mutex {
+               u32 owner;
+               u32 key;
+       } mutex;
+       struct nfp_resource_entry_region {
+               u8  name[NFP_RESOURCE_ENTRY_NAME_SZ];
+               u8  reserved[5];
+               u8  cpp_action;
+               u8  cpp_token;
+               u8  cpp_target;
+               u32 page_offset;
+               u32 page_size;
+       } region;
+};
+
+#define NFP_RESOURCE_TBL_SIZE          4096
+#define NFP_RESOURCE_TBL_ENTRIES       (NFP_RESOURCE_TBL_SIZE /        \
+                                        sizeof(struct nfp_resource_entry))
+
+struct nfp_resource {
+       char name[NFP_RESOURCE_ENTRY_NAME_SZ + 1];
+       u32 cpp_id;
+       u64 addr;
+       u64 size;
+       struct nfp_cpp_mutex *mutex;
+};
+
+static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res)
+{
+       char name_pad[NFP_RESOURCE_ENTRY_NAME_SZ] = {};
+       struct nfp_resource_entry entry;
+       u32 cpp_id, key;
+       int ret, i;
+
+       cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0);  /* Atomic read */
+
+       strncpy(name_pad, res->name, sizeof(name_pad));
+
+       /* Search for a matching entry */
+       key = NFP_RESOURCE_TBL_KEY;
+       if (memcmp(name_pad, NFP_RESOURCE_TBL_NAME "\0\0\0\0\0\0\0\0", 8))
+               key = crc32_posix(name_pad, sizeof(name_pad));
+
+       for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) {
+               u64 addr = NFP_RESOURCE_TBL_BASE +
+                       sizeof(struct nfp_resource_entry) * i;
+
+               ret = nfp_cpp_read(cpp, cpp_id, addr, &entry, sizeof(entry));
+               if (ret != sizeof(entry))
+                       return -EIO;
+
+               if (entry.mutex.key != key)
+                       continue;
+
+               /* Found key! */
+               res->mutex =
+                       nfp_cpp_mutex_alloc(cpp,
+                                           NFP_RESOURCE_TBL_TARGET, addr, key);
+               res->cpp_id = NFP_CPP_ID(entry.region.cpp_target,
+                                        entry.region.cpp_action,
+                                        entry.region.cpp_token);
+               res->addr = (u64)entry.region.page_offset << 8;
+               res->size = (u64)entry.region.page_size << 8;
+
+               return 0;
+       }
+
+       return -ENOENT;
+}
+
+static int
+nfp_resource_try_acquire(struct nfp_cpp *cpp, struct nfp_resource *res,
+                        struct nfp_cpp_mutex *dev_mutex)
+{
+       int err;
+
+       if (nfp_cpp_mutex_lock(dev_mutex))
+               return -EINVAL;
+
+       err = nfp_cpp_resource_find(cpp, res);
+       if (err)
+               goto err_unlock_dev;
+
+       err = nfp_cpp_mutex_trylock(res->mutex);
+       if (err)
+               goto err_res_mutex_free;
+
+       nfp_cpp_mutex_unlock(dev_mutex);
+
+       return 0;
+
+err_res_mutex_free:
+       nfp_cpp_mutex_free(res->mutex);
+err_unlock_dev:
+       nfp_cpp_mutex_unlock(dev_mutex);
+
+       return err;
+}
+
+/**
+ * nfp_resource_acquire() - Acquire a resource handle
+ * @cpp:       NFP CPP handle
+ * @name:      Name of the resource
+ *
+ * NOTE: This function locks the acquired resource
+ *
+ * Return: NFP Resource handle, or ERR_PTR()
+ */
+struct nfp_resource *
+nfp_resource_acquire(struct nfp_cpp *cpp, const char *name)
+{
+       unsigned long warn_at = jiffies + 15 * HZ;
+       struct nfp_cpp_mutex *dev_mutex;
+       struct nfp_resource *res;
+       int err;
+
+       res = kzalloc(sizeof(*res), GFP_KERNEL);
+       if (!res)
+               return ERR_PTR(-ENOMEM);
+
+       strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ);
+
+       dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET,
+                                       NFP_RESOURCE_TBL_BASE,
+                                       NFP_RESOURCE_TBL_KEY);
+       if (!dev_mutex) {
+               kfree(res);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       for (;;) {
+               err = nfp_resource_try_acquire(cpp, res, dev_mutex);
+               if (!err)
+                       break;
+               if (err != -EBUSY)
+                       goto err_free;
+
+               err = msleep_interruptible(1);
+               if (err != 0) {
+                       err = -ERESTARTSYS;
+                       goto err_free;
+               }
+
+               if (time_is_before_eq_jiffies(warn_at)) {
+                       warn_at = jiffies + 60 * HZ;
+                       nfp_warn(cpp, "Warning: waiting for NFP resource %s\n",
+                                name);
+               }
+       }
+
+       nfp_cpp_mutex_free(dev_mutex);
+
+       return res;
+
+err_free:
+       nfp_cpp_mutex_free(dev_mutex);
+       kfree(res);
+       return ERR_PTR(err);
+}
+
+/**
+ * nfp_resource_release() - Release a NFP Resource handle
+ * @res:       NFP Resource handle
+ *
+ * NOTE: This function implictly unlocks the resource handle
+ */
+void nfp_resource_release(struct nfp_resource *res)
+{
+       nfp_cpp_mutex_unlock(res->mutex);
+       nfp_cpp_mutex_free(res->mutex);
+       kfree(res);
+}
+
+/**
+ * nfp_resource_cpp_id() - Return the cpp_id of a resource handle
+ * @res:       NFP Resource handle
+ *
+ * Return: NFP CPP ID
+ */
+u32 nfp_resource_cpp_id(struct nfp_resource *res)
+{
+       return res->cpp_id;
+}
+
+/**
+ * nfp_resource_name() - Return the name of a resource handle
+ * @res:       NFP Resource handle
+ *
+ * Return: const char pointer to the name of the resource
+ */
+const char *nfp_resource_name(struct nfp_resource *res)
+{
+       return res->name;
+}
+
+/**
+ * nfp_resource_address() - Return the address of a resource handle
+ * @res:       NFP Resource handle
+ *
+ * Return: Address of the resource
+ */
+u64 nfp_resource_address(struct nfp_resource *res)
+{
+       return res->addr;
+}
+
+/**
+ * nfp_resource_size() - Return the size in bytes of a resource handle
+ * @res:       NFP Resource handle
+ *
+ * Return: Size of the resource in bytes
+ */
+u64 nfp_resource_size(struct nfp_resource *res)
+{
+       return res->size;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c
new file mode 100644 (file)
index 0000000..c659b1d
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_rtsym.c
+ * Interface for accessing run-time symbol table
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ *          Jason McMullan <jason.mcmullan@netronome.com>
+ *          Espen Skoglund <espen.skoglund@netronome.com>
+ *          Francois H. Theron <francois.theron@netronome.com>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+
+#include "nfp.h"
+#include "nfp_cpp.h"
+#include "nfp_nffw.h"
+#include "nfp6000/nfp6000.h"
+
+/* These need to match the linker */
+#define SYM_TGT_LMEM           0
+#define SYM_TGT_EMU_CACHE      0x17
+
+struct nfp_rtsym_entry {
+       u8      type;
+       u8      target;
+       u8      island;
+       u8      addr_hi;
+       __le32  addr_lo;
+       __le16  name;
+       u8      menum;
+       u8      size_hi;
+       __le32  size_lo;
+};
+
+struct nfp_rtsym_cache {
+       int num;
+       char *strtab;
+       struct nfp_rtsym symtab[];
+};
+
+static int nfp_meid(u8 island_id, u8 menum)
+{
+       return (island_id & 0x3F) == island_id && menum < 12 ?
+               (island_id << 4) | (menum + 4) : -1;
+}
+
+static void
+nfp_rtsym_sw_entry_init(struct nfp_rtsym_cache *cache, u32 strtab_size,
+                       struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw)
+{
+       sw->type = fw->type;
+       sw->name = cache->strtab + le16_to_cpu(fw->name) % strtab_size;
+       sw->addr = ((u64)fw->addr_hi << 32) | le32_to_cpu(fw->addr_lo);
+       sw->size = ((u64)fw->size_hi << 32) | le32_to_cpu(fw->size_lo);
+
+       switch (fw->target) {
+       case SYM_TGT_LMEM:
+               sw->target = NFP_RTSYM_TARGET_LMEM;
+               break;
+       case SYM_TGT_EMU_CACHE:
+               sw->target = NFP_RTSYM_TARGET_EMU_CACHE;
+               break;
+       default:
+               sw->target = fw->target;
+               break;
+       }
+
+       if (fw->menum != 0xff)
+               sw->domain = nfp_meid(fw->island, fw->menum);
+       else if (fw->island != 0xff)
+               sw->domain = fw->island;
+       else
+               sw->domain = -1;
+}
+
+static int nfp_rtsymtab_probe(struct nfp_cpp *cpp)
+{
+       const u32 dram = NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) |
+               NFP_ISL_EMEM0;
+       u32 strtab_addr, symtab_addr, strtab_size, symtab_size;
+       struct nfp_rtsym_entry *rtsymtab;
+       struct nfp_rtsym_cache *cache;
+       const struct nfp_mip *mip;
+       int err, n, size;
+
+       mip = nfp_mip_open(cpp);
+       if (!mip)
+               return -EIO;
+
+       nfp_mip_strtab(mip, &strtab_addr, &strtab_size);
+       nfp_mip_symtab(mip, &symtab_addr, &symtab_size);
+       nfp_mip_close(mip);
+
+       if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab))
+               return -ENXIO;
+
+       /* Align to 64 bits */
+       symtab_size = round_up(symtab_size, 8);
+       strtab_size = round_up(strtab_size, 8);
+
+       rtsymtab = kmalloc(symtab_size, GFP_KERNEL);
+       if (!rtsymtab)
+               return -ENOMEM;
+
+       size = sizeof(*cache);
+       size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym);
+       size += strtab_size + 1;
+       cache = kmalloc(size, GFP_KERNEL);
+       if (!cache) {
+               err = -ENOMEM;
+               goto err_free_rtsym_raw;
+       }
+
+       cache->num = symtab_size / sizeof(*rtsymtab);
+       cache->strtab = (void *)&cache->symtab[cache->num];
+
+       err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size);
+       if (err != symtab_size)
+               goto err_free_cache;
+
+       err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size);
+       if (err != strtab_size)
+               goto err_free_cache;
+       cache->strtab[strtab_size] = '\0';
+
+       for (n = 0; n < cache->num; n++)
+               nfp_rtsym_sw_entry_init(cache, strtab_size,
+                                       &cache->symtab[n], &rtsymtab[n]);
+
+       kfree(rtsymtab);
+       nfp_rtsym_cache_set(cpp, cache);
+       return 0;
+
+err_free_cache:
+       kfree(cache);
+err_free_rtsym_raw:
+       kfree(rtsymtab);
+       return err;
+}
+
+static struct nfp_rtsym_cache *nfp_rtsym(struct nfp_cpp *cpp)
+{
+       struct nfp_rtsym_cache *cache;
+       int err;
+
+       cache = nfp_rtsym_cache(cpp);
+       if (cache)
+               return cache;
+
+       err = nfp_rtsymtab_probe(cpp);
+       if (err < 0)
+               return ERR_PTR(err);
+
+       return nfp_rtsym_cache(cpp);
+}
+
+/**
+ * nfp_rtsym_count() - Get the number of RTSYM descriptors
+ * @cpp:       NFP CPP handle
+ *
+ * Return: Number of RTSYM descriptors, or -ERRNO
+ */
+int nfp_rtsym_count(struct nfp_cpp *cpp)
+{
+       struct nfp_rtsym_cache *cache;
+
+       cache = nfp_rtsym(cpp);
+       if (IS_ERR(cache))
+               return PTR_ERR(cache);
+
+       return cache->num;
+}
+
+/**
+ * nfp_rtsym_get() - Get the Nth RTSYM descriptor
+ * @cpp:       NFP CPP handle
+ * @idx:       Index (0-based) of the RTSYM descriptor
+ *
+ * Return: const pointer to a struct nfp_rtsym descriptor, or NULL
+ */
+const struct nfp_rtsym *nfp_rtsym_get(struct nfp_cpp *cpp, int idx)
+{
+       struct nfp_rtsym_cache *cache;
+
+       cache = nfp_rtsym(cpp);
+       if (IS_ERR(cache))
+               return NULL;
+
+       if (idx >= cache->num)
+               return NULL;
+
+       return &cache->symtab[idx];
+}
+
+/**
+ * nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name
+ * @cpp:       NFP CPP handle
+ * @name:      Symbol name
+ *
+ * Return: const pointer to a struct nfp_rtsym descriptor, or NULL
+ */
+const struct nfp_rtsym *nfp_rtsym_lookup(struct nfp_cpp *cpp, const char *name)
+{
+       struct nfp_rtsym_cache *cache;
+       int n;
+
+       cache = nfp_rtsym(cpp);
+       if (IS_ERR(cache))
+               return NULL;
+
+       for (n = 0; n < cache->num; n++) {
+               if (strcmp(name, cache->symtab[n].name) == 0)
+                       return &cache->symtab[n];
+       }
+
+       return NULL;
+}
+
+/**
+ * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol
+ * @cpp:       NFP CPP handle
+ * @name:      Symbol name
+ * @error:     Poniter to error code (optional)
+ *
+ * Lookup a symbol, map, read it and return it's value. Value of the symbol
+ * will be interpreted as a simple little-endian unsigned value. Symbol can
+ * be 4 or 8 bytes in size.
+ *
+ * Return: value read, on error sets the error and returns ~0ULL.
+ */
+u64 nfp_rtsym_read_le(struct nfp_cpp *cpp, const char *name, int *error)
+{
+       const struct nfp_rtsym *sym;
+       u32 val32, id;
+       u64 val;
+       int err;
+
+       sym = nfp_rtsym_lookup(cpp, name);
+       if (!sym)
+               return -ENOENT;
+
+       id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain);
+
+       switch (sym->size) {
+       case 4:
+               err = nfp_cpp_readl(cpp, id, sym->addr, &val32);
+               val = val32;
+               break;
+       case 8:
+               err = nfp_cpp_readq(cpp, id, sym->addr, &val);
+               break;
+       default:
+               nfp_err(cpp,
+                       "rtsym '%s' unsupported or non-scalar size: %lld\n",
+                       name, sym->size);
+               err = -EINVAL;
+               break;
+       }
+
+       if (err == sym->size)
+               err = 0;
+       else if (err >= 0)
+               err = -EIO;
+
+       if (error)
+               *error = err;
+
+       if (err)
+               return ~0ULL;
+       return val;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c
new file mode 100644 (file)
index 0000000..4ea1e58
--- /dev/null
@@ -0,0 +1,764 @@
+/*
+ * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below.  You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      1. Redistributions of source code must retain the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer.
+ *
+ *      2. Redistributions in binary form must reproduce the above
+ *         copyright notice, this list of conditions and the following
+ *         disclaimer in the documentation and/or other materials
+ *         provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/*
+ * nfp_target.c
+ * CPP Access Width Decoder
+ * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
+ *          Jason McMullan <jason.mcmullan@netronome.com>
+ *          Francois H. Theron <francois.theron@netronome.com>
+ */
+
+#include <linux/bitops.h>
+
+#include "nfp_cpp.h"
+
+#include "nfp6000/nfp6000.h"
+
+#define P32 1
+#define P64 2
+
+/* This structure ONLY includes items that can be done with a read or write of
+ * 32-bit or 64-bit words. All others are not listed.
+ */
+
+#define AT(_action, _token, _pull, _push)                              \
+       case NFP_CPP_ID(0, (_action), (_token)):                        \
+               return PUSHPULL((_pull), (_push))
+
+static int target_rw(u32 cpp_id, int pp, int start, int len)
+{
+       switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+       AT(0, 0,  0, pp);
+       AT(1, 0, pp,  0);
+       AT(NFP_CPP_ACTION_RW, 0, pp, pp);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int nfp6000_nbi_dma(u32 cpp_id)
+{
+       switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+       AT(0, 0,   0, P64);     /* ReadNbiDma */
+       AT(1, 0,   P64, 0);     /* WriteNbiDma */
+       AT(NFP_CPP_ACTION_RW, 0, P64, P64);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int nfp6000_nbi_stats(u32 cpp_id)
+{
+       switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+       AT(0, 0,   0, P32);     /* ReadNbiStats */
+       AT(1, 0,   P32, 0);     /* WriteNbiStats */
+       AT(NFP_CPP_ACTION_RW, 0, P32, P32);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int nfp6000_nbi_tm(u32 cpp_id)
+{
+       switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+       AT(0, 0,   0, P64);     /* ReadNbiTM */
+       AT(1, 0,   P64, 0);     /* WriteNbiTM */
+       AT(NFP_CPP_ACTION_RW, 0, P64, P64);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int nfp6000_nbi_ppc(u32 cpp_id)
+{
+       switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+       AT(0, 0,   0, P64);     /* ReadNbiPreclassifier */
+       AT(1, 0,   P64, 0);     /* WriteNbiPreclassifier */
+       AT(NFP_CPP_ACTION_RW, 0, P64, P64);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int nfp6000_nbi(u32 cpp_id, u64 address)
+{
+       u64 rel_addr = address & 0x3fFFFF;
+
+       if (rel_addr < (1 << 20))
+               return nfp6000_nbi_dma(cpp_id);
+       if (rel_addr < (2 << 20))
+               return nfp6000_nbi_stats(cpp_id);
+       if (rel_addr < (3 << 20))
+               return nfp6000_nbi_tm(cpp_id);
+       return nfp6000_nbi_ppc(cpp_id);
+}
+
+/* This structure ONLY includes items that can be done with a read or write of
+ * 32-bit or 64-bit words. All others are not listed.
+ */
+static int nfp6000_mu_common(u32 cpp_id)
+{
+       switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+       AT(NFP_CPP_ACTION_RW, 0, P64, P64);     /* read_be/write_be */
+       AT(NFP_CPP_ACTION_RW, 1, P64, P64);     /* read_le/write_le */
+       AT(NFP_CPP_ACTION_RW, 2, P64, P64);     /* read_swap_be/write_swap_be */
+       AT(NFP_CPP_ACTION_RW, 3, P64, P64);     /* read_swap_le/write_swap_le */
+       AT(0, 0,   0, P64);     /* read_be */
+       AT(0, 1,   0, P64);     /* read_le */
+       AT(0, 2,   0, P64);     /* read_swap_be */
+       AT(0, 3,   0, P64);     /* read_swap_le */
+       AT(1, 0, P64,   0);     /* write_be */
+       AT(1, 1, P64,   0);     /* write_le */
+       AT(1, 2, P64,   0);     /* write_swap_be */
+       AT(1, 3, P64,   0);     /* write_swap_le */
+       AT(3, 0,   0, P32);     /* atomic_read */
+       AT(3, 2, P32,   0);     /* mask_compare_write */
+       AT(4, 0, P32,   0);     /* atomic_write */
+       AT(4, 2,   0,   0);     /* atomic_write_imm */
+       AT(4, 3,   0, P32);     /* swap_imm */
+       AT(5, 0, P32,   0);     /* set */
+       AT(5, 3,   0, P32);     /* test_set_imm */
+       AT(6, 0, P32,   0);     /* clr */
+       AT(6, 3,   0, P32);     /* test_clr_imm */
+       AT(7, 0, P32,   0);     /* add */
+       AT(7, 3,   0, P32);     /* test_add_imm */
+       AT(8, 0, P32,   0);     /* addsat */
+       AT(8, 3,   0, P32);     /* test_subsat_imm */
+       AT(9, 0, P32,   0);     /* sub */
+       AT(9, 3,   0, P32);     /* test_sub_imm */
+       AT(10, 0, P32,   0);    /* subsat */
+       AT(10, 3,   0, P32);    /* test_subsat_imm */
+       AT(13, 0,   0, P32);    /* microq128_get */
+       AT(13, 1,   0, P32);    /* microq128_pop */
+       AT(13, 2, P32,   0);    /* microq128_put */
+       AT(15, 0, P32,   0);    /* xor */
+       AT(15, 3,   0, P32);    /* test_xor_imm */
+       AT(28, 0,   0, P32);    /* read32_be */
+       AT(28, 1,   0, P32);    /* read32_le */
+       AT(28, 2,   0, P32);    /* read32_swap_be */
+       AT(28, 3,   0, P32);    /* read32_swap_le */
+       AT(31, 0, P32,   0);    /* write32_be */
+       AT(31, 1, P32,   0);    /* write32_le */
+       AT(31, 2, P32,   0);    /* write32_swap_be */
+       AT(31, 3, P32,   0);    /* write32_swap_le */
+       default:
+               return -EINVAL;
+       }
+}
+
+static int nfp6000_mu_ctm(u32 cpp_id)
+{
+       switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+       AT(16, 1,   0, P32);    /* packet_read_packet_status */
+       AT(17, 1,   0, P32);    /* packet_credit_get */
+       AT(17, 3,   0, P64);    /* packet_add_thread */
+       AT(18, 2,   0, P64);    /* packet_free_and_return_pointer */
+       AT(18, 3,   0, P64);    /* packet_return_pointer */
+       AT(21, 0,   0, P64);    /* pe_dma_to_memory_indirect */
+       AT(21, 1,   0, P64);    /* pe_dma_to_memory_indirect_swap */
+       AT(21, 2,   0, P64);    /* pe_dma_to_memory_indirect_free */
+       AT(21, 3,   0, P64);    /* pe_dma_to_memory_indirect_free_swap */
+       default:
+               return nfp6000_mu_common(cpp_id);
+       }
+}
+
+static int nfp6000_mu_emu(u32 cpp_id)
+{
+       switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+       AT(18, 0,   0, P32);    /* read_queue */
+       AT(18, 1,   0, P32);    /* read_queue_ring */
+       AT(18, 2, P32,   0);    /* write_queue */
+       AT(18, 3, P32,   0);    /* write_queue_ring */
+       AT(20, 2, P32,   0);    /* journal */
+       AT(21, 0,   0, P32);    /* get */
+       AT(21, 1,   0, P32);    /* get_eop */
+       AT(21, 2,   0, P32);    /* get_freely */
+       AT(22, 0,   0, P32);    /* pop */
+       AT(22, 1,   0, P32);    /* pop_eop */
+       AT(22, 2,   0, P32);    /* pop_freely */
+       default:
+               return nfp6000_mu_common(cpp_id);
+       }
+}
+
+static int nfp6000_mu_imu(u32 cpp_id)
+{
+       return nfp6000_mu_common(cpp_id);
+}
+
+static int nfp6000_mu(u32 cpp_id, u64 address)
+{
+       int pp;
+
+       if (address < 0x2000000000ULL)
+               pp = nfp6000_mu_ctm(cpp_id);
+       else if (address < 0x8000000000ULL)
+               pp = nfp6000_mu_emu(cpp_id);
+       else if (address < 0x9800000000ULL)
+               pp = nfp6000_mu_ctm(cpp_id);
+       else if (address < 0x9C00000000ULL)
+               pp = nfp6000_mu_emu(cpp_id);
+       else if (address < 0xA000000000ULL)
+               pp = nfp6000_mu_imu(cpp_id);
+       else
+               pp = nfp6000_mu_ctm(cpp_id);
+
+       return pp;
+}
+
+static int nfp6000_ila(u32 cpp_id)
+{
+       switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+       AT(0, 1,   0, P32);     /* read_check_error */
+       AT(2, 0,   0, P32);     /* read_int */
+       AT(3, 0, P32,   0);     /* write_int */
+       default:
+               return target_rw(cpp_id, P32, 48, 4);
+       }
+}
+
+static int nfp6000_pci(u32 cpp_id)
+{
+       switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+       AT(2, 0,   0, P32);
+       AT(3, 0, P32,   0);
+       default:
+               return target_rw(cpp_id, P32, 4, 4);
+       }
+}
+
+static int nfp6000_crypto(u32 cpp_id)
+{
+       switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+       AT(2, 0, P64,   0);
+       default:
+               return target_rw(cpp_id, P64, 12, 4);
+       }
+}
+
+static int nfp6000_cap_xpb(u32 cpp_id)
+{
+       switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+       AT(0, 1,   0, P32); /* RingGet */
+       AT(0, 2, P32,   0); /* Interthread Signal */
+       AT(1, 1, P32,   0); /* RingPut */
+       AT(1, 2, P32,   0); /* CTNNWr */
+       AT(2, 0,   0, P32); /* ReflectRd, signal none */
+       AT(2, 1,   0, P32); /* ReflectRd, signal self */
+       AT(2, 2,   0, P32); /* ReflectRd, signal remote */
+       AT(2, 3,   0, P32); /* ReflectRd, signal both */
+       AT(3, 0, P32,   0); /* ReflectWr, signal none */
+       AT(3, 1, P32,   0); /* ReflectWr, signal self */
+       AT(3, 2, P32,   0); /* ReflectWr, signal remote */
+       AT(3, 3, P32,   0); /* ReflectWr, signal both */
+       AT(NFP_CPP_ACTION_RW, 1, P32, P32);
+       default:
+               return target_rw(cpp_id, P32, 1, 63);
+       }
+}
+
+static int nfp6000_cls(u32 cpp_id)
+{
+       switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) {
+       AT(0, 3, P32,  0); /* xor */
+       AT(2, 0, P32,  0); /* set */
+       AT(2, 1, P32,  0); /* clr */
+       AT(4, 0, P32,  0); /* add */
+       AT(4, 1, P32,  0); /* add64 */
+       AT(6, 0, P32,  0); /* sub */
+       AT(6, 1, P32,  0); /* sub64 */
+       AT(6, 2, P32,  0); /* subsat */
+       AT(8, 2, P32,  0); /* hash_mask */
+       AT(8, 3, P32,  0); /* hash_clear */
+       AT(9, 0,  0, P32); /* ring_get */
+       AT(9, 1,  0, P32); /* ring_pop */
+       AT(9, 2,  0, P32); /* ring_get_freely */
+       AT(9, 3,  0, P32); /* ring_pop_freely */
+       AT(10, 0, P32,  0); /* ring_put */
+       AT(10, 2, P32,  0); /* ring_journal */
+       AT(14, 0,  P32, 0); /* reflect_write_sig_local */
+       AT(15, 1,  0, P32); /* reflect_read_sig_local */
+       AT(17, 2, P32,  0); /* statisic */
+       AT(24, 0,  0, P32); /* ring_read */
+       AT(24, 1, P32,  0); /* ring_write */
+       AT(25, 0,  0, P32); /* ring_workq_add_thread */
+       AT(25, 1, P32,  0); /* ring_workq_add_work */
+       default:
+               return target_rw(cpp_id, P32, 0, 64);
+       }
+}
+
+int nfp_target_pushpull(u32 cpp_id, u64 address)
+{
+       switch (NFP_CPP_ID_TARGET_of(cpp_id)) {
+       case NFP_CPP_TARGET_NBI:
+               return nfp6000_nbi(cpp_id, address);
+       case NFP_CPP_TARGET_QDR:
+               return target_rw(cpp_id, P32, 24, 4);
+       case NFP_CPP_TARGET_ILA:
+               return nfp6000_ila(cpp_id);
+       case NFP_CPP_TARGET_MU:
+               return nfp6000_mu(cpp_id, address);
+       case NFP_CPP_TARGET_PCIE:
+               return nfp6000_pci(cpp_id);
+       case NFP_CPP_TARGET_ARM:
+               if (address < 0x10000)
+                       return target_rw(cpp_id, P64, 1, 1);
+               else
+                       return target_rw(cpp_id, P32, 1, 1);
+       case NFP_CPP_TARGET_CRYPTO:
+               return nfp6000_crypto(cpp_id);
+       case NFP_CPP_TARGET_CT_XPB:
+               return nfp6000_cap_xpb(cpp_id);
+       case NFP_CPP_TARGET_CLS:
+               return nfp6000_cls(cpp_id);
+       case 0:
+               return target_rw(cpp_id, P32, 4, 4);
+       default:
+               return -EINVAL;
+       }
+}
+
+#undef AT
+#undef P32
+#undef P64
+
+/* All magic NFP-6xxx IMB 'mode' numbers here are from:
+ * Databook (1 August 2013)
+ * - System Overview and Connectivity
+ * -- Internal Connectivity
+ * --- Distributed Switch Fabric - Command Push/Pull (DSF-CPP) Bus
+ * ---- CPP addressing
+ * ----- Table 3.6. CPP Address Translation Mode Commands
+ */
+
+#define _NIC_NFP6000_MU_LOCALITY_DIRECT     2
+
+static int nfp_decode_basic(u64 addr, int *dest_island, int cpp_tgt,
+                           int mode, bool addr40, int isld1, int isld0)
+{
+       int iid_lsb, idx_lsb;
+
+       /* This function doesn't handle MU or CTXBP */
+       if (cpp_tgt == NFP_CPP_TARGET_MU || cpp_tgt == NFP_CPP_TARGET_CT_XPB)
+               return -EINVAL;
+
+       switch (mode) {
+       case 0:
+               /* For VQDR, in this mode for 32-bit addressing
+                * it would be islands 0, 16, 32 and 48 depending on channel
+                * and upper address bits.
+                * Since those are not all valid islands, most decode
+                * cases would result in bad island IDs, but we do them
+                * anyway since this is decoding an address that is already
+                * assumed to be used as-is to get to sram.
+                */
+               iid_lsb = addr40 ? 34 : 26;
+               *dest_island = (addr >> iid_lsb) & 0x3F;
+               return 0;
+       case 1:
+               /* For VQDR 32-bit, this would decode as:
+                * Channel 0: island#0
+                * Channel 1: island#0
+                * Channel 2: island#1
+                * Channel 3: island#1
+                * That would be valid as long as both islands
+                * have VQDR. Let's allow this.
+                */
+               idx_lsb = addr40 ? 39 : 31;
+               if (addr & BIT_ULL(idx_lsb))
+                       *dest_island = isld1;
+               else
+                       *dest_island = isld0;
+
+               return 0;
+       case 2:
+               /* For VQDR 32-bit:
+                * Channel 0: (island#0 | 0)
+                * Channel 1: (island#0 | 1)
+                * Channel 2: (island#1 | 0)
+                * Channel 3: (island#1 | 1)
+                *
+                * Make sure we compare against isldN values
+                * by clearing the LSB.
+                * This is what the silicon does.
+                */
+               isld0 &= ~1;
+               isld1 &= ~1;
+
+               idx_lsb = addr40 ? 39 : 31;
+               iid_lsb = idx_lsb - 1;
+
+               if (addr & BIT_ULL(idx_lsb))
+                       *dest_island = isld1 | (int)((addr >> iid_lsb) & 1);
+               else
+                       *dest_island = isld0 | (int)((addr >> iid_lsb) & 1);
+
+               return 0;
+       case 3:
+               /* In this mode the data address starts to affect the island ID
+                * so rather not allow it. In some really specific case
+                * one could use this to send the upper half of the
+                * VQDR channel to another MU, but this is getting very
+                * specific.
+                * However, as above for mode 0, this is the decoder
+                * and the caller should validate the resulting IID.
+                * This blindly does what the silicon would do.
+                */
+               isld0 &= ~3;
+               isld1 &= ~3;
+
+               idx_lsb = addr40 ? 39 : 31;
+               iid_lsb = idx_lsb - 2;
+
+               if (addr & BIT_ULL(idx_lsb))
+                       *dest_island = isld1 | (int)((addr >> iid_lsb) & 3);
+               else
+                       *dest_island = isld0 | (int)((addr >> iid_lsb) & 3);
+
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int nfp_encode_basic_qdr(u64 addr, int dest_island, int cpp_tgt,
+                               int mode, bool addr40, int isld1, int isld0)
+{
+       int v, ret;
+
+       /* Full Island ID and channel bits overlap? */
+       ret = nfp_decode_basic(addr, &v, cpp_tgt, mode, addr40, isld1, isld0);
+       if (ret)
+               return ret;
+
+       /* The current address won't go where expected? */
+       if (dest_island != -1 && dest_island != v)
+               return -EINVAL;
+
+       /* If dest_island was -1, we don't care where it goes. */
+       return 0;
+}
+
+/* Try each option, take first one that fits.
+ * Not sure if we would want to do some smarter
+ * searching and prefer 0 or non-0 island IDs.
+ */
+static int nfp_encode_basic_search(u64 *addr, int dest_island, int *isld,
+                                  int iid_lsb, int idx_lsb, int v_max)
+{
+       int i, v;
+
+       for (i = 0; i < 2; i++)
+               for (v = 0; v < v_max; v++) {
+                       if (dest_island != (isld[i] | v))
+                               continue;
+
+                       *addr &= ~GENMASK_ULL(idx_lsb, iid_lsb);
+                       *addr |= ((u64)i << idx_lsb);
+                       *addr |= ((u64)v << iid_lsb);
+                       return 0;
+               }
+
+       return -ENODEV;
+}
+
+/* For VQDR, we may not modify the Channel bits, which might overlap
+ *  with the Index bit. When it does, we need to ensure that isld0 == isld1.
+ */
+static int nfp_encode_basic(u64 *addr, int dest_island, int cpp_tgt,
+                           int mode, bool addr40, int isld1, int isld0)
+{
+       int iid_lsb, idx_lsb;
+       int isld[2];
+       u64 v64;
+
+       isld[0] = isld0;
+       isld[1] = isld1;
+
+       /* This function doesn't handle MU or CTXBP */
+       if (cpp_tgt == NFP_CPP_TARGET_MU || cpp_tgt == NFP_CPP_TARGET_CT_XPB)
+               return -EINVAL;
+
+       switch (mode) {
+       case 0:
+               if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40)
+                       /* In this specific mode we'd rather not modify
+                        * the address but we can verify if the existing
+                        * contents will point to a valid island.
+                        */
+                       return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
+                                                   mode, addr40, isld1, isld0);
+
+               iid_lsb = addr40 ? 34 : 26;
+               /* <39:34> or <31:26> */
+               v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb);
+               *addr &= ~v64;
+               *addr |= ((u64)dest_island << iid_lsb) & v64;
+               return 0;
+       case 1:
+               if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40)
+                       return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
+                                                   mode, addr40, isld1, isld0);
+
+               idx_lsb = addr40 ? 39 : 31;
+               if (dest_island == isld0) {
+                       /* Only need to clear the Index bit */
+                       *addr &= ~BIT_ULL(idx_lsb);
+                       return 0;
+               }
+
+               if (dest_island == isld1) {
+                       /* Only need to set the Index bit */
+                       *addr |= BIT_ULL(idx_lsb);
+                       return 0;
+               }
+
+               return -ENODEV;
+       case 2:
+               /* iid<0> = addr<30> = channel<0>
+                * channel<1> = addr<31> = Index
+                */
+               if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40)
+                       /* Special case where we allow channel bits to
+                        * be set before hand and with them select an island.
+                        * So we need to confirm that it's at least plausible.
+                        */
+                       return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
+                                                   mode, addr40, isld1, isld0);
+
+               /* Make sure we compare against isldN values
+                * by clearing the LSB.
+                * This is what the silicon does.
+                */
+               isld[0] &= ~1;
+               isld[1] &= ~1;
+
+               idx_lsb = addr40 ? 39 : 31;
+               iid_lsb = idx_lsb - 1;
+
+               return nfp_encode_basic_search(addr, dest_island, isld,
+                                              iid_lsb, idx_lsb, 2);
+       case 3:
+               if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40)
+                       /* iid<0> = addr<29> = data
+                        * iid<1> = addr<30> = channel<0>
+                        * channel<1> = addr<31> = Index
+                        */
+                       return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island,
+                                                   mode, addr40, isld1, isld0);
+
+               isld[0] &= ~3;
+               isld[1] &= ~3;
+
+               idx_lsb = addr40 ? 39 : 31;
+               iid_lsb = idx_lsb - 2;
+
+               return nfp_encode_basic_search(addr, dest_island, isld,
+                                              iid_lsb, idx_lsb, 4);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int nfp_encode_mu(u64 *addr, int dest_island, int mode,
+                        bool addr40, int isld1, int isld0)
+{
+       int iid_lsb, idx_lsb, locality_lsb;
+       int isld[2];
+       u64 v64;
+       int da;
+
+       isld[0] = isld0;
+       isld[1] = isld1;
+       locality_lsb = nfp_cppat_mu_locality_lsb(mode, addr40);
+
+       if (((*addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT)
+               da = 1;
+       else
+               da = 0;
+
+       switch (mode) {
+       case 0:
+               iid_lsb = addr40 ? 32 : 24;
+               v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb);
+               *addr &= ~v64;
+               *addr |= (((u64)dest_island) << iid_lsb) & v64;
+               return 0;
+       case 1:
+               if (da) {
+                       iid_lsb = addr40 ? 32 : 24;
+                       v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb);
+                       *addr &= ~v64;
+                       *addr |= (((u64)dest_island) << iid_lsb) & v64;
+                       return 0;
+               }
+
+               idx_lsb = addr40 ? 37 : 29;
+               if (dest_island == isld0) {
+                       *addr &= ~BIT_ULL(idx_lsb);
+                       return 0;
+               }
+
+               if (dest_island == isld1) {
+                       *addr |= BIT_ULL(idx_lsb);
+                       return 0;
+               }
+
+               return -ENODEV;
+       case 2:
+               if (da) {
+                       iid_lsb = addr40 ? 32 : 24;
+                       v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb);
+                       *addr &= ~v64;
+                       *addr |= (((u64)dest_island) << iid_lsb) & v64;
+                       return 0;
+               }
+
+               /* Make sure we compare against isldN values
+                * by clearing the LSB.
+                * This is what the silicon does.
+                */
+               isld[0] &= ~1;
+               isld[1] &= ~1;
+
+               idx_lsb = addr40 ? 37 : 29;
+               iid_lsb = idx_lsb - 1;
+
+               return nfp_encode_basic_search(addr, dest_island, isld,
+                                              iid_lsb, idx_lsb, 2);
+       case 3:
+               /* Only the EMU will use 40 bit addressing. Silently
+                * set the direct locality bit for everyone else.
+                * The SDK toolchain uses dest_island <= 0 to test
+                * for atypical address encodings to support access
+                * to local-island CTM with a 32-but address (high-locality
+                * is effewctively ignored and just used for
+                * routing to island #0).
+                */
+               if (dest_island > 0 && (dest_island < 24 || dest_island > 26)) {
+                       *addr |= ((u64)_NIC_NFP6000_MU_LOCALITY_DIRECT)
+                                                       << locality_lsb;
+                       da = 1;
+               }
+
+               if (da) {
+                       iid_lsb = addr40 ? 32 : 24;
+                       v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb);
+                       *addr &= ~v64;
+                       *addr |= (((u64)dest_island) << iid_lsb) & v64;
+                       return 0;
+               }
+
+               isld[0] &= ~3;
+               isld[1] &= ~3;
+
+               idx_lsb = addr40 ? 37 : 29;
+               iid_lsb = idx_lsb - 2;
+
+               return nfp_encode_basic_search(addr, dest_island, isld,
+                                              iid_lsb, idx_lsb, 4);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int nfp_cppat_addr_encode(u64 *addr, int dest_island, int cpp_tgt,
+                                int mode, bool addr40, int isld1, int isld0)
+{
+       switch (cpp_tgt) {
+       case NFP_CPP_TARGET_NBI:
+       case NFP_CPP_TARGET_QDR:
+       case NFP_CPP_TARGET_ILA:
+       case NFP_CPP_TARGET_PCIE:
+       case NFP_CPP_TARGET_ARM:
+       case NFP_CPP_TARGET_CRYPTO:
+       case NFP_CPP_TARGET_CLS:
+               return nfp_encode_basic(addr, dest_island, cpp_tgt, mode,
+                                       addr40, isld1, isld0);
+
+       case NFP_CPP_TARGET_MU:
+               return nfp_encode_mu(addr, dest_island, mode,
+                                    addr40, isld1, isld0);
+
+       case NFP_CPP_TARGET_CT_XPB:
+               if (mode != 1 || addr40)
+                       return -EINVAL;
+               *addr &= ~GENMASK_ULL(29, 24);
+               *addr |= ((u64)dest_island << 24) & GENMASK_ULL(29, 24);
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address,
+                  u32 *cpp_target_id, u64 *cpp_target_address,
+                  const u32 *imb_table)
+{
+       const int island = NFP_CPP_ID_ISLAND_of(cpp_island_id);
+       const int target = NFP_CPP_ID_TARGET_of(cpp_island_id);
+       u32 imb;
+       int err;
+
+       if (target < 0 || target >= 16)
+               return -EINVAL;
+
+       if (island == 0) {
+               /* Already translated */
+               *cpp_target_id = cpp_island_id;
+               *cpp_target_address = cpp_island_address;
+               return 0;
+       }
+
+       /* CPP + Island only allowed on systems with IMB tables */
+       if (!imb_table)
+               return -EINVAL;
+
+       imb = imb_table[target];
+
+       *cpp_target_address = cpp_island_address;
+       err = nfp_cppat_addr_encode(cpp_target_address, island, target,
+                                   ((imb >> 13) & 7), ((imb >> 12) & 1),
+                                   ((imb >> 6)  & 0x3f), ((imb >> 0)  & 0x3f));
+       if (err)
+               return err;
+
+       *cpp_target_id = NFP_CPP_ID(target,
+                                   NFP_CPP_ID_ACTION_of(cpp_island_id),
+                                   NFP_CPP_ID_TOKEN_of(cpp_island_id));
+
+       return 0;
+}
index 119f6dca71f051883a01f4ebb6d151c0b784df62..9709c8ca0774dcf70f226d29d63b4eaf70cc578c 100644 (file)
@@ -874,16 +874,18 @@ static void w90p910_get_drvinfo(struct net_device *dev,
        strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
 }
 
-static int w90p910_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int w90p910_get_link_ksettings(struct net_device *dev,
+                                     struct ethtool_link_ksettings *cmd)
 {
        struct w90p910_ether *ether = netdev_priv(dev);
-       return mii_ethtool_gset(&ether->mii, cmd);
+       return mii_ethtool_get_link_ksettings(&ether->mii, cmd);
 }
 
-static int w90p910_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int w90p910_set_link_ksettings(struct net_device *dev,
+                                     const struct ethtool_link_ksettings *cmd)
 {
        struct w90p910_ether *ether = netdev_priv(dev);
-       return mii_ethtool_sset(&ether->mii, cmd);
+       return mii_ethtool_set_link_ksettings(&ether->mii, cmd);
 }
 
 static int w90p910_nway_reset(struct net_device *dev)
@@ -899,11 +901,11 @@ static u32 w90p910_get_link(struct net_device *dev)
 }
 
 static const struct ethtool_ops w90p910_ether_ethtool_ops = {
-       .get_settings   = w90p910_get_settings,
-       .set_settings   = w90p910_set_settings,
        .get_drvinfo    = w90p910_get_drvinfo,
        .nway_reset     = w90p910_nway_reset,
        .get_link       = w90p910_get_link,
+       .get_link_ksettings = w90p910_get_link_ksettings,
+       .set_link_ksettings = w90p910_set_link_ksettings,
 };
 
 static const struct net_device_ops w90p910_ether_netdev_ops = {
index 3913f07279d2baef43db0a89cc4c594bcab26dcd..92367a06491a074e7e7557f5ddc15e506ffd8ab5 100644 (file)
@@ -1733,7 +1733,7 @@ static void nv_update_stats(struct net_device *dev)
  * Called with read_lock(&dev_base_lock) held for read -
  * only synchronized against unregister_netdevice.
  */
-static struct rtnl_link_stats64*
+static void
 nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
        __acquires(&netdev_priv(dev)->hwstats_lock)
        __releases(&netdev_priv(dev)->hwstats_lock)
@@ -1793,8 +1793,6 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
 
                spin_unlock_bh(&np->hwstats_lock);
        }
-
-       return storage;
 }
 
 /*
@@ -3751,7 +3749,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
        if (rx_work < budget) {
                /* re-enable interrupts
                   (msix not enabled in napi) */
-               napi_complete(napi);
+               napi_complete_done(napi, rx_work);
 
                writel(np->irqmask, base + NvRegIrqMask);
        }
@@ -4239,14 +4237,15 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
        return 0;
 }
 
-static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int nv_get_link_ksettings(struct net_device *dev,
+                                struct ethtool_link_ksettings *cmd)
 {
        struct fe_priv *np = netdev_priv(dev);
-       u32 speed;
+       u32 speed, supported, advertising;
        int adv;
 
        spin_lock_irq(&np->lock);
-       ecmd->port = PORT_MII;
+       cmd->base.port = PORT_MII;
        if (!netif_running(dev)) {
                /* We do not track link speed / duplex setting if the
                 * interface is disabled. Force a link check */
@@ -4274,64 +4273,71 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                        speed = -1;
                        break;
                }
-               ecmd->duplex = DUPLEX_HALF;
+               cmd->base.duplex = DUPLEX_HALF;
                if (np->duplex)
-                       ecmd->duplex = DUPLEX_FULL;
+                       cmd->base.duplex = DUPLEX_FULL;
        } else {
                speed = SPEED_UNKNOWN;
-               ecmd->duplex = DUPLEX_UNKNOWN;
+               cmd->base.duplex = DUPLEX_UNKNOWN;
        }
-       ethtool_cmd_speed_set(ecmd, speed);
-       ecmd->autoneg = np->autoneg;
+       cmd->base.speed = speed;
+       cmd->base.autoneg = np->autoneg;
 
-       ecmd->advertising = ADVERTISED_MII;
+       advertising = ADVERTISED_MII;
        if (np->autoneg) {
-               ecmd->advertising |= ADVERTISED_Autoneg;
+               advertising |= ADVERTISED_Autoneg;
                adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
                if (adv & ADVERTISE_10HALF)
-                       ecmd->advertising |= ADVERTISED_10baseT_Half;
+                       advertising |= ADVERTISED_10baseT_Half;
                if (adv & ADVERTISE_10FULL)
-                       ecmd->advertising |= ADVERTISED_10baseT_Full;
+                       advertising |= ADVERTISED_10baseT_Full;
                if (adv & ADVERTISE_100HALF)
-                       ecmd->advertising |= ADVERTISED_100baseT_Half;
+                       advertising |= ADVERTISED_100baseT_Half;
                if (adv & ADVERTISE_100FULL)
-                       ecmd->advertising |= ADVERTISED_100baseT_Full;
+                       advertising |= ADVERTISED_100baseT_Full;
                if (np->gigabit == PHY_GIGABIT) {
                        adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
                        if (adv & ADVERTISE_1000FULL)
-                               ecmd->advertising |= ADVERTISED_1000baseT_Full;
+                               advertising |= ADVERTISED_1000baseT_Full;
                }
        }
-       ecmd->supported = (SUPPORTED_Autoneg |
+       supported = (SUPPORTED_Autoneg |
                SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
                SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
                SUPPORTED_MII);
        if (np->gigabit == PHY_GIGABIT)
-               ecmd->supported |= SUPPORTED_1000baseT_Full;
+               supported |= SUPPORTED_1000baseT_Full;
+
+       cmd->base.phy_address = np->phyaddr;
 
-       ecmd->phy_address = np->phyaddr;
-       ecmd->transceiver = XCVR_EXTERNAL;
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
 
        /* ignore maxtxpkt, maxrxpkt for now */
        spin_unlock_irq(&np->lock);
        return 0;
 }
 
-static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int nv_set_link_ksettings(struct net_device *dev,
+                                const struct ethtool_link_ksettings *cmd)
 {
        struct fe_priv *np = netdev_priv(dev);
-       u32 speed = ethtool_cmd_speed(ecmd);
+       u32 speed = cmd->base.speed;
+       u32 advertising;
 
-       if (ecmd->port != PORT_MII)
-               return -EINVAL;
-       if (ecmd->transceiver != XCVR_EXTERNAL)
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
+
+       if (cmd->base.port != PORT_MII)
                return -EINVAL;
-       if (ecmd->phy_address != np->phyaddr) {
+       if (cmd->base.phy_address != np->phyaddr) {
                /* TODO: support switching between multiple phys. Should be
                 * trivial, but not enabled due to lack of test hardware. */
                return -EINVAL;
        }
-       if (ecmd->autoneg == AUTONEG_ENABLE) {
+       if (cmd->base.autoneg == AUTONEG_ENABLE) {
                u32 mask;
 
                mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
@@ -4339,16 +4345,17 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                if (np->gigabit == PHY_GIGABIT)
                        mask |= ADVERTISED_1000baseT_Full;
 
-               if ((ecmd->advertising & mask) == 0)
+               if ((advertising & mask) == 0)
                        return -EINVAL;
 
-       } else if (ecmd->autoneg == AUTONEG_DISABLE) {
+       } else if (cmd->base.autoneg == AUTONEG_DISABLE) {
                /* Note: autonegotiation disable, speed 1000 intentionally
                 * forbidden - no one should need that. */
 
                if (speed != SPEED_10 && speed != SPEED_100)
                        return -EINVAL;
-               if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
+               if (cmd->base.duplex != DUPLEX_HALF &&
+                   cmd->base.duplex != DUPLEX_FULL)
                        return -EINVAL;
        } else {
                return -EINVAL;
@@ -4378,7 +4385,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                netif_tx_unlock_bh(dev);
        }
 
-       if (ecmd->autoneg == AUTONEG_ENABLE) {
+       if (cmd->base.autoneg == AUTONEG_ENABLE) {
                int adv, bmcr;
 
                np->autoneg = 1;
@@ -4386,13 +4393,13 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                /* advertise only what has been requested */
                adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
                adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
-               if (ecmd->advertising & ADVERTISED_10baseT_Half)
+               if (advertising & ADVERTISED_10baseT_Half)
                        adv |= ADVERTISE_10HALF;
-               if (ecmd->advertising & ADVERTISED_10baseT_Full)
+               if (advertising & ADVERTISED_10baseT_Full)
                        adv |= ADVERTISE_10FULL;
-               if (ecmd->advertising & ADVERTISED_100baseT_Half)
+               if (advertising & ADVERTISED_100baseT_Half)
                        adv |= ADVERTISE_100HALF;
-               if (ecmd->advertising & ADVERTISED_100baseT_Full)
+               if (advertising & ADVERTISED_100baseT_Full)
                        adv |= ADVERTISE_100FULL;
                if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)  /* for rx we set both advertisements but disable tx pause */
                        adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
@@ -4403,7 +4410,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
                if (np->gigabit == PHY_GIGABIT) {
                        adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
                        adv &= ~ADVERTISE_1000FULL;
-                       if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+                       if (advertising & ADVERTISED_1000baseT_Full)
                                adv |= ADVERTISE_1000FULL;
                        mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
                }
@@ -4430,13 +4437,13 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
 
                adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
                adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
-               if (speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
+               if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_HALF)
                        adv |= ADVERTISE_10HALF;
-               if (speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
+               if (speed == SPEED_10 && cmd->base.duplex == DUPLEX_FULL)
                        adv |= ADVERTISE_10FULL;
-               if (speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
+               if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_HALF)
                        adv |= ADVERTISE_100HALF;
-               if (speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
+               if (speed == SPEED_100 && cmd->base.duplex == DUPLEX_FULL)
                        adv |= ADVERTISE_100FULL;
                np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
                if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
@@ -5243,8 +5250,6 @@ static const struct ethtool_ops ops = {
        .get_link = ethtool_op_get_link,
        .get_wol = nv_get_wol,
        .set_wol = nv_set_wol,
-       .get_settings = nv_get_settings,
-       .set_settings = nv_set_settings,
        .get_regs_len = nv_get_regs_len,
        .get_regs = nv_get_regs,
        .nway_reset = nv_nway_reset,
@@ -5257,6 +5262,8 @@ static const struct ethtool_ops ops = {
        .get_sset_count = nv_get_sset_count,
        .self_test = nv_self_test,
        .get_ts_info = ethtool_op_get_ts_info,
+       .get_link_ksettings = nv_get_link_ksettings,
+       .set_link_ksettings = nv_set_link_ksettings,
 };
 
 /* The mgmt unit and driver use a semaphore to access the phy during init */
index dd6b0d0f7fa504e33cbb873546137bf1d89e1979..9c7ffd649e9a460a4519a0cecd7a3e9367154117 100644 (file)
@@ -999,7 +999,7 @@ static int lpc_eth_poll(struct napi_struct *napi, int budget)
        rx_done = __lpc_handle_recv(ndev, budget);
 
        if (rx_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rx_done);
                lpc_eth_enable_int(pldat->net_base);
        }
 
index b19be7c6c1f41efe0d7210493ec572d2812066e0..21093276d2b7519681435739f19ed475dc0814f7 100644 (file)
@@ -73,62 +73,80 @@ static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = {
 #define PCH_GBE_MAC_REGS_LEN    (sizeof(struct pch_gbe_regs) / 4)
 #define PCH_GBE_REGS_LEN        (PCH_GBE_MAC_REGS_LEN + PCH_GBE_PHY_REGS_LEN)
 /**
- * pch_gbe_get_settings - Get device-specific settings
+ * pch_gbe_get_link_ksettings - Get device-specific settings
  * @netdev: Network interface device structure
  * @ecmd:   Ethtool command
  * Returns:
  *     0:                      Successful.
  *     Negative value:         Failed.
  */
-static int pch_gbe_get_settings(struct net_device *netdev,
-                                struct ethtool_cmd *ecmd)
+static int pch_gbe_get_link_ksettings(struct net_device *netdev,
+                                     struct ethtool_link_ksettings *ecmd)
 {
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+       u32 supported, advertising;
        int ret;
 
-       ret = mii_ethtool_gset(&adapter->mii, ecmd);
-       ecmd->supported &= ~(SUPPORTED_TP | SUPPORTED_1000baseT_Half);
-       ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
+       ret = mii_ethtool_get_link_ksettings(&adapter->mii, ecmd);
+
+       ethtool_convert_link_mode_to_legacy_u32(&supported,
+                                               ecmd->link_modes.supported);
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               ecmd->link_modes.advertising);
+
+       supported &= ~(SUPPORTED_TP | SUPPORTED_1000baseT_Half);
+       advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
+
+       ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
+                                               advertising);
 
        if (!netif_carrier_ok(adapter->netdev))
-               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+               ecmd->base.speed = SPEED_UNKNOWN;
        return ret;
 }
 
 /**
- * pch_gbe_set_settings - Set device-specific settings
+ * pch_gbe_set_link_ksettings - Set device-specific settings
  * @netdev: Network interface device structure
  * @ecmd:   Ethtool command
  * Returns:
  *     0:                      Successful.
  *     Negative value:         Failed.
  */
-static int pch_gbe_set_settings(struct net_device *netdev,
-                                struct ethtool_cmd *ecmd)
+static int pch_gbe_set_link_ksettings(struct net_device *netdev,
+                                     const struct ethtool_link_ksettings *ecmd)
 {
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
        struct pch_gbe_hw *hw = &adapter->hw;
-       u32 speed = ethtool_cmd_speed(ecmd);
+       struct ethtool_link_ksettings copy_ecmd;
+       u32 speed = ecmd->base.speed;
+       u32 advertising;
        int ret;
 
        pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
 
+       memcpy(&copy_ecmd, ecmd, sizeof(*ecmd));
+
        /* when set_settings() is called with a ethtool_cmd previously
         * filled by get_settings() on a down link, speed is -1: */
        if (speed == UINT_MAX) {
                speed = SPEED_1000;
-               ethtool_cmd_speed_set(ecmd, speed);
-               ecmd->duplex = DUPLEX_FULL;
+               copy_ecmd.base.speed = speed;
+               copy_ecmd.base.duplex = DUPLEX_FULL;
        }
-       ret = mii_ethtool_sset(&adapter->mii, ecmd);
+       ret = mii_ethtool_set_link_ksettings(&adapter->mii, &copy_ecmd);
        if (ret) {
-               netdev_err(netdev, "Error: mii_ethtool_sset\n");
+               netdev_err(netdev, "Error: mii_ethtool_set_link_ksettings\n");
                return ret;
        }
        hw->mac.link_speed = speed;
-       hw->mac.link_duplex = ecmd->duplex;
-       hw->phy.autoneg_advertised = ecmd->advertising;
-       hw->mac.autoneg = ecmd->autoneg;
+       hw->mac.link_duplex = copy_ecmd.base.duplex;
+       ethtool_convert_link_mode_to_legacy_u32(
+               &advertising, copy_ecmd.link_modes.advertising);
+       hw->phy.autoneg_advertised = advertising;
+       hw->mac.autoneg = copy_ecmd.base.autoneg;
 
        /* reset the link */
        if (netif_running(adapter->netdev)) {
@@ -487,8 +505,6 @@ static int pch_gbe_get_sset_count(struct net_device *netdev, int sset)
 }
 
 static const struct ethtool_ops pch_gbe_ethtool_ops = {
-       .get_settings = pch_gbe_get_settings,
-       .set_settings = pch_gbe_set_settings,
        .get_drvinfo = pch_gbe_get_drvinfo,
        .get_regs_len = pch_gbe_get_regs_len,
        .get_regs = pch_gbe_get_regs,
@@ -503,6 +519,8 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
        .get_strings = pch_gbe_get_strings,
        .get_ethtool_stats = pch_gbe_get_ethtool_stats,
        .get_sset_count = pch_gbe_get_sset_count,
+       .get_link_ksettings = pch_gbe_get_link_ksettings,
+       .set_link_ksettings = pch_gbe_set_link_ksettings,
 };
 
 void pch_gbe_set_ethtool_ops(struct net_device *netdev)
index d461f419948ea4179c161786047050a44ba457e4..5ae9681a2da7394748b0a1bf383fbbe393b6ea3d 100644 (file)
@@ -2148,17 +2148,6 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        return NETDEV_TX_OK;
 }
 
-/**
- * pch_gbe_get_stats - Get System Network Statistics
- * @netdev:  Network interface device structure
- * Returns:  The current stats
- */
-static struct net_device_stats *pch_gbe_get_stats(struct net_device *netdev)
-{
-       /* only return the current stats */
-       return &netdev->stats;
-}
-
 /**
  * pch_gbe_set_multi - Multicast and Promiscuous mode set
  * @netdev:   Network interface device structure
@@ -2385,7 +2374,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
                poll_end_flag = true;
 
        if (poll_end_flag) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                pch_gbe_irq_enable(adapter);
        }
 
@@ -2420,7 +2409,6 @@ static const struct net_device_ops pch_gbe_netdev_ops = {
        .ndo_open = pch_gbe_open,
        .ndo_stop = pch_gbe_stop,
        .ndo_start_xmit = pch_gbe_xmit_frame,
-       .ndo_get_stats = pch_gbe_get_stats,
        .ndo_set_mac_address = pch_gbe_set_mac,
        .ndo_tx_timeout = pch_gbe_tx_timeout,
        .ndo_change_mtu = pch_gbe_change_mtu,
index baff744b560e4115755f6a88c8a563e540d6cff7..8b026dbf0d8dccbbeee916c4cc94e8d26e43c944 100644 (file)
@@ -1811,21 +1811,23 @@ static void hamachi_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
        strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
 }
 
-static int hamachi_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int hamachi_get_link_ksettings(struct net_device *dev,
+                                     struct ethtool_link_ksettings *cmd)
 {
        struct hamachi_private *np = netdev_priv(dev);
        spin_lock_irq(&np->lock);
-       mii_ethtool_gset(&np->mii_if, ecmd);
+       mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
        spin_unlock_irq(&np->lock);
        return 0;
 }
 
-static int hamachi_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int hamachi_set_link_ksettings(struct net_device *dev,
+                                     const struct ethtool_link_ksettings *cmd)
 {
        struct hamachi_private *np = netdev_priv(dev);
        int res;
        spin_lock_irq(&np->lock);
-       res = mii_ethtool_sset(&np->mii_if, ecmd);
+       res = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
        spin_unlock_irq(&np->lock);
        return res;
 }
@@ -1845,10 +1847,10 @@ static u32 hamachi_get_link(struct net_device *dev)
 static const struct ethtool_ops ethtool_ops = {
        .begin = check_if_running,
        .get_drvinfo = hamachi_get_drvinfo,
-       .get_settings = hamachi_get_settings,
-       .set_settings = hamachi_set_settings,
        .nway_reset = hamachi_nway_reset,
        .get_link = hamachi_get_link,
+       .get_link_ksettings = hamachi_get_link_ksettings,
+       .set_link_ksettings = hamachi_set_link_ksettings,
 };
 
 static const struct ethtool_ops ethtool_ops_no_mii = {
index badfa1d562a41e9860ccd425addf936f78b2a04b..49591d9c2e1b9f4bde7217e750396d99f032f302 100644 (file)
@@ -1575,7 +1575,7 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget)
        pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
        if (pkts < budget) {
                /* all done, no more packets present */
-               napi_complete(napi);
+               napi_complete_done(napi, pkts);
 
                pasemi_mac_restart_rx_intr(mac);
                pasemi_mac_restart_tx_intr(mac);
index 3cfd105034463f5d503b8748a6dec5f720ab902b..aaa1e8517348178cccfe7ddf8d9b8247545bf8c6 100644 (file)
@@ -104,6 +104,7 @@ config QED_SRIOV
 config QEDE
        tristate "QLogic QED 25/40/100Gb Ethernet NIC"
        depends on QED
+       imply PTP_1588_CLOCK
        ---help---
          This enables the support for ...
 
index f9034467736c3bb578b97661eebdee37d71054b7..3157f97dd782b8357cc79cc18f809b07deda1cb2 100644 (file)
@@ -96,69 +96,70 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
 }
 
 static int
-netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+netxen_nic_get_link_ksettings(struct net_device *dev,
+                             struct ethtool_link_ksettings *cmd)
 {
        struct netxen_adapter *adapter = netdev_priv(dev);
        int check_sfp_module = 0;
+       u32 supported, advertising;
 
        /* read which mode */
        if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
-               ecmd->supported = (SUPPORTED_10baseT_Half |
+               supported = (SUPPORTED_10baseT_Half |
                                   SUPPORTED_10baseT_Full |
                                   SUPPORTED_100baseT_Half |
                                   SUPPORTED_100baseT_Full |
                                   SUPPORTED_1000baseT_Half |
                                   SUPPORTED_1000baseT_Full);
 
-               ecmd->advertising = (ADVERTISED_100baseT_Half |
+               advertising = (ADVERTISED_100baseT_Half |
                                     ADVERTISED_100baseT_Full |
                                     ADVERTISED_1000baseT_Half |
                                     ADVERTISED_1000baseT_Full);
 
-               ecmd->port = PORT_TP;
+               cmd->base.port = PORT_TP;
 
-               ethtool_cmd_speed_set(ecmd, adapter->link_speed);
-               ecmd->duplex = adapter->link_duplex;
-               ecmd->autoneg = adapter->link_autoneg;
+               cmd->base.speed = adapter->link_speed;
+               cmd->base.duplex = adapter->link_duplex;
+               cmd->base.autoneg = adapter->link_autoneg;
 
        } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
                u32 val;
 
                val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR);
                if (val == NETXEN_PORT_MODE_802_3_AP) {
-                       ecmd->supported = SUPPORTED_1000baseT_Full;
-                       ecmd->advertising = ADVERTISED_1000baseT_Full;
+                       supported = SUPPORTED_1000baseT_Full;
+                       advertising = ADVERTISED_1000baseT_Full;
                } else {
-                       ecmd->supported = SUPPORTED_10000baseT_Full;
-                       ecmd->advertising = ADVERTISED_10000baseT_Full;
+                       supported = SUPPORTED_10000baseT_Full;
+                       advertising = ADVERTISED_10000baseT_Full;
                }
 
                if (netif_running(dev) && adapter->has_link_events) {
-                       ethtool_cmd_speed_set(ecmd, adapter->link_speed);
-                       ecmd->autoneg = adapter->link_autoneg;
-                       ecmd->duplex = adapter->link_duplex;
+                       cmd->base.speed = adapter->link_speed;
+                       cmd->base.autoneg = adapter->link_autoneg;
+                       cmd->base.duplex = adapter->link_duplex;
                        goto skip;
                }
 
-               ecmd->port = PORT_TP;
+               cmd->base.port = PORT_TP;
 
                if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
                        u16 pcifn = adapter->ahw.pci_func;
 
                        val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn));
-                       ethtool_cmd_speed_set(ecmd, P3_LINK_SPEED_MHZ *
-                                             P3_LINK_SPEED_VAL(pcifn, val));
+                       cmd->base.speed = P3_LINK_SPEED_MHZ *
+                               P3_LINK_SPEED_VAL(pcifn, val);
                } else
-                       ethtool_cmd_speed_set(ecmd, SPEED_10000);
+                       cmd->base.speed = SPEED_10000;
 
-               ecmd->duplex = DUPLEX_FULL;
-               ecmd->autoneg = AUTONEG_DISABLE;
+               cmd->base.duplex = DUPLEX_FULL;
+               cmd->base.autoneg = AUTONEG_DISABLE;
        } else
                return -EIO;
 
 skip:
-       ecmd->phy_address = adapter->physical_port;
-       ecmd->transceiver = XCVR_EXTERNAL;
+       cmd->base.phy_address = adapter->physical_port;
 
        switch (adapter->ahw.board_type) {
        case NETXEN_BRDTYPE_P2_SB35_4G:
@@ -167,16 +168,16 @@ skip:
        case NETXEN_BRDTYPE_P3_4_GB:
        case NETXEN_BRDTYPE_P3_4_GB_MM:
 
-               ecmd->supported |= SUPPORTED_Autoneg;
-               ecmd->advertising |= ADVERTISED_Autoneg;
+               supported |= SUPPORTED_Autoneg;
+               advertising |= ADVERTISED_Autoneg;
        case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
        case NETXEN_BRDTYPE_P3_10G_CX4:
        case NETXEN_BRDTYPE_P3_10G_CX4_LP:
        case NETXEN_BRDTYPE_P3_10000_BASE_T:
-               ecmd->supported |= SUPPORTED_TP;
-               ecmd->advertising |= ADVERTISED_TP;
-               ecmd->port = PORT_TP;
-               ecmd->autoneg = (adapter->ahw.board_type ==
+               supported |= SUPPORTED_TP;
+               advertising |= ADVERTISED_TP;
+               cmd->base.port = PORT_TP;
+               cmd->base.autoneg = (adapter->ahw.board_type ==
                                 NETXEN_BRDTYPE_P2_SB31_10G_CX4) ?
                    (AUTONEG_DISABLE) : (adapter->link_autoneg);
                break;
@@ -185,39 +186,39 @@ skip:
        case NETXEN_BRDTYPE_P3_IMEZ:
        case NETXEN_BRDTYPE_P3_XG_LOM:
        case NETXEN_BRDTYPE_P3_HMEZ:
-               ecmd->supported |= SUPPORTED_MII;
-               ecmd->advertising |= ADVERTISED_MII;
-               ecmd->port = PORT_MII;
-               ecmd->autoneg = AUTONEG_DISABLE;
+               supported |= SUPPORTED_MII;
+               advertising |= ADVERTISED_MII;
+               cmd->base.port = PORT_MII;
+               cmd->base.autoneg = AUTONEG_DISABLE;
                break;
        case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
        case NETXEN_BRDTYPE_P3_10G_SFP_CT:
        case NETXEN_BRDTYPE_P3_10G_SFP_QT:
-               ecmd->advertising |= ADVERTISED_TP;
-               ecmd->supported |= SUPPORTED_TP;
+               advertising |= ADVERTISED_TP;
+               supported |= SUPPORTED_TP;
                check_sfp_module = netif_running(dev) &&
                        adapter->has_link_events;
        case NETXEN_BRDTYPE_P2_SB31_10G:
        case NETXEN_BRDTYPE_P3_10G_XFP:
-               ecmd->supported |= SUPPORTED_FIBRE;
-               ecmd->advertising |= ADVERTISED_FIBRE;
-               ecmd->port = PORT_FIBRE;
-               ecmd->autoneg = AUTONEG_DISABLE;
+               supported |= SUPPORTED_FIBRE;
+               advertising |= ADVERTISED_FIBRE;
+               cmd->base.port = PORT_FIBRE;
+               cmd->base.autoneg = AUTONEG_DISABLE;
                break;
        case NETXEN_BRDTYPE_P3_10G_TP:
                if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
-                       ecmd->autoneg = AUTONEG_DISABLE;
-                       ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
-                       ecmd->advertising |=
+                       cmd->base.autoneg = AUTONEG_DISABLE;
+                       supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+                       advertising |=
                                (ADVERTISED_FIBRE | ADVERTISED_TP);
-                       ecmd->port = PORT_FIBRE;
+                       cmd->base.port = PORT_FIBRE;
                        check_sfp_module = netif_running(dev) &&
                                adapter->has_link_events;
                } else {
-                       ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
-                       ecmd->advertising |=
+                       supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+                       advertising |=
                                (ADVERTISED_TP | ADVERTISED_Autoneg);
-                       ecmd->port = PORT_TP;
+                       cmd->base.port = PORT_TP;
                }
                break;
        default:
@@ -232,31 +233,37 @@ skip:
                case LINKEVENT_MODULE_OPTICAL_SRLR:
                case LINKEVENT_MODULE_OPTICAL_LRM:
                case LINKEVENT_MODULE_OPTICAL_SFP_1G:
-                       ecmd->port = PORT_FIBRE;
+                       cmd->base.port = PORT_FIBRE;
                        break;
                case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
                case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
                case LINKEVENT_MODULE_TWINAX:
-                       ecmd->port = PORT_TP;
+                       cmd->base.port = PORT_TP;
                        break;
                default:
-                       ecmd->port = -1;
+                       cmd->base.port = -1;
                }
        }
 
        if (!netif_running(dev) || !adapter->ahw.linkup) {
-               ecmd->duplex = DUPLEX_UNKNOWN;
-               ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+               cmd->base.duplex = DUPLEX_UNKNOWN;
+               cmd->base.speed = SPEED_UNKNOWN;
        }
 
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
+
        return 0;
 }
 
 static int
-netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+netxen_nic_set_link_ksettings(struct net_device *dev,
+                             const struct ethtool_link_ksettings *cmd)
 {
        struct netxen_adapter *adapter = netdev_priv(dev);
-       u32 speed = ethtool_cmd_speed(ecmd);
+       u32 speed = cmd->base.speed;
        int ret;
 
        if (adapter->ahw.port_type != NETXEN_NIC_GBE)
@@ -265,16 +272,16 @@ netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
        if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG))
                return -EOPNOTSUPP;
 
-       ret = nx_fw_cmd_set_gbe_port(adapter, speed, ecmd->duplex,
-                                    ecmd->autoneg);
+       ret = nx_fw_cmd_set_gbe_port(adapter, speed, cmd->base.duplex,
+                                    cmd->base.autoneg);
        if (ret == NX_RCODE_NOT_SUPPORTED)
                return -EOPNOTSUPP;
        else if (ret)
                return -EIO;
 
        adapter->link_speed = speed;
-       adapter->link_duplex = ecmd->duplex;
-       adapter->link_autoneg = ecmd->autoneg;
+       adapter->link_duplex = cmd->base.duplex;
+       adapter->link_autoneg = cmd->base.autoneg;
 
        if (!netif_running(dev))
                return 0;
@@ -931,8 +938,6 @@ netxen_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
 }
 
 const struct ethtool_ops netxen_nic_ethtool_ops = {
-       .get_settings = netxen_nic_get_settings,
-       .set_settings = netxen_nic_set_settings,
        .get_drvinfo = netxen_nic_get_drvinfo,
        .get_regs_len = netxen_nic_get_regs_len,
        .get_regs = netxen_nic_get_regs,
@@ -954,4 +959,6 @@ const struct ethtool_ops netxen_nic_ethtool_ops = {
        .get_dump_flag = netxen_get_dump_flag,
        .get_dump_data = netxen_get_dump_data,
        .set_dump = netxen_set_dump,
+       .get_link_ksettings = netxen_nic_get_link_ksettings,
+       .set_link_ksettings = netxen_nic_set_link_ksettings,
 };
index 561fb94c72670ecf51db5eb51a963de4d8030eb6..3b5d7cfa2321bbab436beb0affdc89376429892b 100644 (file)
@@ -90,8 +90,8 @@ static irqreturn_t netxen_msix_intr(int irq, void *data);
 
 static void netxen_free_ip_list(struct netxen_adapter *, bool);
 static void netxen_restore_indev_addr(struct net_device *dev, unsigned long);
-static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
-                                                     struct rtnl_link_stats64 *stats);
+static void netxen_nic_get_stats(struct net_device *dev,
+                                struct rtnl_link_stats64 *stats);
 static int netxen_nic_set_mac(struct net_device *netdev, void *p);
 
 /*  PCI Device ID Table  */
@@ -2302,8 +2302,8 @@ request_reset:
        clear_bit(__NX_RESETTING, &adapter->state);
 }
 
-static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev,
-                                                     struct rtnl_link_stats64 *stats)
+static void netxen_nic_get_stats(struct net_device *netdev,
+                                struct rtnl_link_stats64 *stats)
 {
        struct netxen_adapter *adapter = netdev_priv(netdev);
 
@@ -2313,8 +2313,6 @@ static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *netdev,
        stats->tx_bytes = adapter->stats.txbytes;
        stats->rx_dropped = adapter->stats.rxdropped;
        stats->tx_dropped = adapter->stats.txdropped;
-
-       return stats;
 }
 
 static irqreturn_t netxen_intr(int irq, void *data)
@@ -2398,7 +2396,7 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
                work_done = budget;
 
        if (work_done < budget) {
-               napi_complete(&sds_ring->napi);
+               napi_complete_done(&sds_ring->napi, work_done);
                if (test_bit(__NX_DEV_UP, &adapter->state))
                        netxen_nic_enable_int(sds_ring);
        }
@@ -3266,7 +3264,7 @@ netxen_list_config_ip(struct netxen_adapter *adapter,
                cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC);
                if (cur == NULL)
                        goto out;
-               if (dev->priv_flags & IFF_802_1Q_VLAN)
+               if (is_vlan_dev(dev))
                        dev = vlan_dev_real_dev(dev);
                cur->master = !!netif_is_bond_master(dev);
                cur->ip_addr = ifa->ifa_address;
@@ -3376,7 +3374,7 @@ static void netxen_config_master(struct net_device *dev, unsigned long event)
            !netif_is_bond_slave(dev)) {
                netxen_config_indev_addr(adapter, master, event);
                for_each_netdev_rcu(&init_net, slave)
-                       if (slave->priv_flags & IFF_802_1Q_VLAN &&
+                       if (is_vlan_dev(slave) &&
                            vlan_dev_real_dev(slave) == master)
                                netxen_config_indev_addr(adapter, slave, event);
        }
@@ -3402,7 +3400,7 @@ recheck:
        if (dev == NULL)
                goto done;
 
-       if (dev->priv_flags & IFF_802_1Q_VLAN) {
+       if (is_vlan_dev(dev)) {
                dev = vlan_dev_real_dev(dev);
                goto recheck;
        }
@@ -3447,7 +3445,7 @@ recheck:
        if (dev == NULL)
                goto done;
 
-       if (dev->priv_flags & IFF_802_1Q_VLAN) {
+       if (is_vlan_dev(dev)) {
                dev = vlan_dev_real_dev(dev);
                goto recheck;
        }
index 729e43768e99d48ca57561df3233bb9d3a33b5df..1a7300f72cab43d11ae73008d033d17564f1b25a 100644 (file)
@@ -2,7 +2,7 @@ obj-$(CONFIG_QED) := qed.o
 
 qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
         qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
-        qed_selftest.o qed_dcbx.o qed_debug.o
+        qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o
 qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
 qed-$(CONFIG_QED_LL2) += qed_ll2.o
 qed-$(CONFIG_QED_RDMA) += qed_roce.o
index 44c184ebe3b0da5f9438fc5568c1bdd105e17f81..6557f94b92ed9f33189528b56a168a997ffb7c19 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_H
@@ -27,7 +51,7 @@
 #include "qed_hsi.h"
 
 extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.10.9.20"
+#define DRV_MODULE_VERSION "8.10.10.20"
 
 #define MAX_HWFNS_PER_DEVICE    (4)
 #define NAME_SIZE 16
@@ -432,6 +456,8 @@ struct qed_hwfn {
        u8 dcbx_no_edpm;
        u8 db_bar_no_edpm;
 
+       /* p_ptp_ptt is valid for leading HWFN only */
+       struct qed_ptt *p_ptp_ptt;
        struct qed_simd_fp_handler      simd_proto_handler[64];
 
 #ifdef CONFIG_QED_SRIOV
index 0c42c240b5cfdff66dc8ef021b2401812a0fec17..dcb8fc185df748d695b3a4617475dcb856bd16c5 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
index 2b8bdaa7780073286002a86bf9b1f590421be55a..98f4973cac9dc18f42563fde8af7416bd2ca48e0 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_CXT_H
index a4789a93b69267cd749b92962355083cd007afa4..dc0d2c9ad6b5cecf191ecb45942b1bc9acf84a57 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
index 9ba681643d058a3ab8510f933d388b9190796eb6..d70300fda020db00e7d34e7ff2e0acc0ee651ce1 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_DCBX_H
index 3b2250021c5f29813eb3e62257f0daa1d33ab393..33e720143b8d752c628568299c08636724aae477 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
index b6711c106597bc53654457c8c45c68e7085f0016..5d37ba24da40a75c1af5b0d0a3f9eeaa0b960bcc 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_DEV_API_H
index 785ab03683ebc4e89d2d3fc349aa2bd8112c6b3e..5d31189288e892bab55bf2a29e539a4bf678d6e1 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_HSI_H
index 6e4fae9b1430379d35313ac0a0d8d07a9f2a962a..1f606516b6aadb9290f8693bc31ba098fbad5203 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
index d01557092868ec34018b80a27f14377d586135bd..9277264d2e6552a92a9ca88853501b80763a5dbb 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_HW_H
index 23e455f22adc54747953b574e91808c6b6924aa5..d891a68526950609f9efbe75ecacfb40ce49b97a 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
index d567ba94c8d1703f46f590b49de89cd392c12403..243b64e0d4dc3ed36f92e570022aa68c7af58901 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
index 1e832049983d4263705223c0a94c2a407720af37..555dd086796df0660b21f29408e0a923ca72d9c5 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_INIT_OPS_H
index c68dbf7092b14cc6265a09e749b0e5bd1bec8e59..84310b60849b4881557cfd62761549a1a182f2d9 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
index 0948be64dc782f926f5a2c8317c22552e5bf87d3..0ae0bb4593effc45a12895a43c49c8a1fdcfb636 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_INT_H
index 17a70122df05c93bec01f13dc48e3924b4868acf..3a44d6b395fac9500841f5ac1bc73b4c11d4d188 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
index 67c25f3db4d5a841ace5a391c7612dad361d48e9..20c187f4ed0b8aa8ff2125633f81aa19a1ca546b 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_ISCSI_H
index 6a3727c4c0c61a9a17aa1d5851c263f7a5eb2296..df932be5a4e5aa1e47b16530d51a5dcf78f15706 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
@@ -74,6 +98,7 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
        p_cid->cid = cid;
        p_cid->vf_qid = vf_qid;
        p_cid->rel = *p_params;
+       p_cid->p_owner = p_hwfn;
 
        /* Don't try calculating the absolute indices for VFs */
        if (IS_VF(p_hwfn->cdev)) {
@@ -189,6 +214,7 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
        p_ramrod->vport_id      = abs_vport_id;
 
        p_ramrod->mtu                   = cpu_to_le16(p_params->mtu);
+       p_ramrod->handle_ptp_pkts       = p_params->handle_ptp_pkts;
        p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
        p_ramrod->drop_ttl0_en          = p_params->drop_ttl0;
        p_ramrod->untagged              = p_params->only_untagged;
@@ -248,76 +274,103 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
 static int
 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
                        struct vport_update_ramrod_data *p_ramrod,
-                       struct qed_rss_params *p_params)
+                       struct qed_rss_params *p_rss)
 {
-       struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
-       u16 abs_l2_queue = 0, capabilities = 0;
-       int rc = 0, i;
+       struct eth_vport_rss_config *p_config;
+       u16 capabilities = 0;
+       int i, table_size;
+       int rc = 0;
 
-       if (!p_params) {
+       if (!p_rss) {
                p_ramrod->common.update_rss_flg = 0;
                return rc;
        }
+       p_config = &p_ramrod->rss_config;
 
-       BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
-                    ETH_RSS_IND_TABLE_ENTRIES_NUM);
+       BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
 
-       rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
+       rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
        if (rc)
                return rc;
 
-       p_ramrod->common.update_rss_flg = p_params->update_rss_config;
-       rss->update_rss_capabilities = p_params->update_rss_capabilities;
-       rss->update_rss_ind_table = p_params->update_rss_ind_table;
-       rss->update_rss_key = p_params->update_rss_key;
+       p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
+       p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
+       p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
+       p_config->update_rss_key = p_rss->update_rss_key;
 
-       rss->rss_mode = p_params->rss_enable ?
-                       ETH_VPORT_RSS_MODE_REGULAR :
-                       ETH_VPORT_RSS_MODE_DISABLED;
+       p_config->rss_mode = p_rss->rss_enable ?
+                            ETH_VPORT_RSS_MODE_REGULAR :
+                            ETH_VPORT_RSS_MODE_DISABLED;
 
        SET_FIELD(capabilities,
                  ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
-                 !!(p_params->rss_caps & QED_RSS_IPV4));
+                 !!(p_rss->rss_caps & QED_RSS_IPV4));
        SET_FIELD(capabilities,
                  ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
-                 !!(p_params->rss_caps & QED_RSS_IPV6));
+                 !!(p_rss->rss_caps & QED_RSS_IPV6));
        SET_FIELD(capabilities,
                  ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
-                 !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
+                 !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
        SET_FIELD(capabilities,
                  ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
-                 !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
+                 !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
        SET_FIELD(capabilities,
                  ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
-                 !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
+                 !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
        SET_FIELD(capabilities,
                  ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
-                 !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
-       rss->tbl_size = p_params->rss_table_size_log;
+                 !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
+       p_config->tbl_size = p_rss->rss_table_size_log;
 
-       rss->capabilities = cpu_to_le16(capabilities);
+       p_config->capabilities = cpu_to_le16(capabilities);
 
        DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
                   "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
                   p_ramrod->common.update_rss_flg,
-                  rss->rss_mode, rss->update_rss_capabilities,
-                  capabilities, rss->update_rss_ind_table,
-                  rss->update_rss_key);
+                  p_config->rss_mode,
+                  p_config->update_rss_capabilities,
+                  p_config->capabilities,
+                  p_config->update_rss_ind_table, p_config->update_rss_key);
 
-       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
-               rc = qed_fw_l2_queue(p_hwfn,
-                                    (u8)p_params->rss_ind_table[i],
-                                    &abs_l2_queue);
-               if (rc)
-                       return rc;
+       table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
+                          1 << p_config->tbl_size);
+       for (i = 0; i < table_size; i++) {
+               struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
+
+               if (!p_queue)
+                       return -EINVAL;
 
-               rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
-               DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
-                          i, rss->indirection_table[i]);
+               p_config->indirection_table[i] =
+                   cpu_to_le16(p_queue->abs.queue_id);
+       }
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+                  "Configured RSS indirection table [%d entries]:\n",
+                  table_size);
+       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
+               DP_VERBOSE(p_hwfn,
+                          NETIF_MSG_IFUP,
+                          "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+                          le16_to_cpu(p_config->indirection_table[i]),
+                          le16_to_cpu(p_config->indirection_table[i + 1]),
+                          le16_to_cpu(p_config->indirection_table[i + 2]),
+                          le16_to_cpu(p_config->indirection_table[i + 3]),
+                          le16_to_cpu(p_config->indirection_table[i + 4]),
+                          le16_to_cpu(p_config->indirection_table[i + 5]),
+                          le16_to_cpu(p_config->indirection_table[i + 6]),
+                          le16_to_cpu(p_config->indirection_table[i + 7]),
+                          le16_to_cpu(p_config->indirection_table[i + 8]),
+                          le16_to_cpu(p_config->indirection_table[i + 9]),
+                          le16_to_cpu(p_config->indirection_table[i + 10]),
+                          le16_to_cpu(p_config->indirection_table[i + 11]),
+                          le16_to_cpu(p_config->indirection_table[i + 12]),
+                          le16_to_cpu(p_config->indirection_table[i + 13]),
+                          le16_to_cpu(p_config->indirection_table[i + 14]),
+                          le16_to_cpu(p_config->indirection_table[i + 15]));
        }
 
        for (i = 0; i < 10; i++)
-               rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
+               p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
 
        return rc;
 }
@@ -1729,13 +1782,31 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
                int max_vf_mac_filters = 0;
 
                if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
-                       for_each_hwfn(cdev, i)
-                           info->num_queues +=
-                           FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
-                       if (cdev->int_params.fp_msix_cnt)
-                               info->num_queues =
-                                   min_t(u8, info->num_queues,
-                                         cdev->int_params.fp_msix_cnt);
+                       u16 num_queues = 0;
+
+                       /* Since the feature controls only queue-zones,
+                        * make sure we have the contexts [rx, tx, xdp] to
+                        * match.
+                        */
+                       for_each_hwfn(cdev, i) {
+                               struct qed_hwfn *hwfn = &cdev->hwfns[i];
+                               u16 l2_queues = (u16)FEAT_NUM(hwfn,
+                                                             QED_PF_L2_QUE);
+                               u16 cids;
+
+                               cids = hwfn->pf_params.eth_pf_params.num_cons;
+                               num_queues += min_t(u16, l2_queues, cids / 3);
+                       }
+
+                       /* queues might theoretically be >256, but interrupts'
+                        * upper-limit guarantes that it would fit in a u8.
+                        */
+                       if (cdev->int_params.fp_msix_cnt) {
+                               u8 irqs = cdev->int_params.fp_msix_cnt;
+
+                               info->num_queues = (u8)min_t(u16,
+                                                            num_queues, irqs);
+                       }
                } else {
                        info->num_queues = cdev->num_hwfns;
                }
@@ -1776,7 +1847,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
        qed_fill_dev_info(cdev, &info->common);
 
        if (IS_VF(cdev))
-               memset(info->common.hw_mac, 0, ETH_ALEN);
+               eth_zero_addr(info->common.hw_mac);
 
        return 0;
 }
@@ -1816,6 +1887,7 @@ static int qed_start_vport(struct qed_dev *cdev,
                start.drop_ttl0 = params->drop_ttl0;
                start.opaque_fid = p_hwfn->hw_info.opaque_fid;
                start.concrete_fid = p_hwfn->hw_info.concrete_fid;
+               start.handle_ptp_pkts = params->handle_ptp_pkts;
                start.vport_id = params->vport_id;
                start.max_buffers_per_cqe = 16;
                start.mtu = params->mtu;
@@ -1857,18 +1929,84 @@ static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
        return 0;
 }
 
+static int qed_update_vport_rss(struct qed_dev *cdev,
+                               struct qed_update_vport_rss_params *input,
+                               struct qed_rss_params *rss)
+{
+       int i, fn;
+
+       /* Update configuration with what's correct regardless of CMT */
+       rss->update_rss_config = 1;
+       rss->rss_enable = 1;
+       rss->update_rss_capabilities = 1;
+       rss->update_rss_ind_table = 1;
+       rss->update_rss_key = 1;
+       rss->rss_caps = input->rss_caps;
+       memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
+
+       /* In regular scenario, we'd simply need to take input handlers.
+        * But in CMT, we'd have to split the handlers according to the
+        * engine they were configured on. We'd then have to understand
+        * whether RSS is really required, since 2-queues on CMT doesn't
+        * require RSS.
+        */
+       if (cdev->num_hwfns == 1) {
+               memcpy(rss->rss_ind_table,
+                      input->rss_ind_table,
+                      QED_RSS_IND_TABLE_SIZE * sizeof(void *));
+               rss->rss_table_size_log = 7;
+               return 0;
+       }
+
+       /* Start by copying the non-spcific information to the 2nd copy */
+       memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
+
+       /* CMT should be round-robin */
+       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+               struct qed_queue_cid *cid = input->rss_ind_table[i];
+               struct qed_rss_params *t_rss;
+
+               if (cid->p_owner == QED_LEADING_HWFN(cdev))
+                       t_rss = &rss[0];
+               else
+                       t_rss = &rss[1];
+
+               t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
+       }
+
+       /* Make sure RSS is actually required */
+       for_each_hwfn(cdev, fn) {
+               for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
+                       if (rss[fn].rss_ind_table[i] !=
+                           rss[fn].rss_ind_table[0])
+                               break;
+               }
+               if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
+                       DP_VERBOSE(cdev, NETIF_MSG_IFUP,
+                                  "CMT - 1 queue per-hwfn; Disabling RSS\n");
+                       return -EINVAL;
+               }
+               rss[fn].rss_table_size_log = 6;
+       }
+
+       return 0;
+}
+
 static int qed_update_vport(struct qed_dev *cdev,
                            struct qed_update_vport_params *params)
 {
        struct qed_sp_vport_update_params sp_params;
-       struct qed_rss_params sp_rss_params;
-       int rc, i;
+       struct qed_rss_params *rss;
+       int rc = 0, i;
 
        if (!cdev)
                return -ENODEV;
 
+       rss = vzalloc(sizeof(*rss) * cdev->num_hwfns);
+       if (!rss)
+               return -ENOMEM;
+
        memset(&sp_params, 0, sizeof(sp_params));
-       memset(&sp_rss_params, 0, sizeof(sp_rss_params));
 
        /* Translate protocol params into sp params */
        sp_params.vport_id = params->vport_id;
@@ -1882,66 +2020,24 @@ static int qed_update_vport(struct qed_dev *cdev,
        sp_params.update_accept_any_vlan_flg =
                params->update_accept_any_vlan_flg;
 
-       /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
-        * We need to re-fix the rss values per engine for CMT.
-        */
-       if (cdev->num_hwfns > 1 && params->update_rss_flg) {
-               struct qed_update_vport_rss_params *rss = &params->rss_params;
-               int k, max = 0;
-
-               /* Find largest entry, since it's possible RSS needs to
-                * be disabled [in case only 1 queue per-hwfn]
-                */
-               for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
-                       max = (max > rss->rss_ind_table[k]) ?
-                               max : rss->rss_ind_table[k];
-
-               /* Either fix RSS values or disable RSS */
-               if (cdev->num_hwfns < max + 1) {
-                       int divisor = (max + cdev->num_hwfns - 1) /
-                               cdev->num_hwfns;
-
-                       DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
-                                  "CMT - fixing RSS values (modulo %02x)\n",
-                                  divisor);
-
-                       for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
-                               rss->rss_ind_table[k] =
-                                       rss->rss_ind_table[k] % divisor;
-               } else {
-                       DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
-                                  "CMT - 1 queue per-hwfn; Disabling RSS\n");
+       /* Prepare the RSS configuration */
+       if (params->update_rss_flg)
+               if (qed_update_vport_rss(cdev, &params->rss_params, rss))
                        params->update_rss_flg = 0;
-               }
-       }
-
-       /* Now, update the RSS configuration for actual configuration */
-       if (params->update_rss_flg) {
-               sp_rss_params.update_rss_config = 1;
-               sp_rss_params.rss_enable = 1;
-               sp_rss_params.update_rss_capabilities = 1;
-               sp_rss_params.update_rss_ind_table = 1;
-               sp_rss_params.update_rss_key = 1;
-               sp_rss_params.rss_caps = params->rss_params.rss_caps;
-               sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
-               memcpy(sp_rss_params.rss_ind_table,
-                      params->rss_params.rss_ind_table,
-                      QED_RSS_IND_TABLE_SIZE * sizeof(u16));
-               memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
-                      QED_RSS_KEY_SIZE * sizeof(u32));
-               sp_params.rss_params = &sp_rss_params;
-       }
 
        for_each_hwfn(cdev, i) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
+               if (params->update_rss_flg)
+                       sp_params.rss_params = &rss[i];
+
                sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
                rc = qed_sp_vport_update(p_hwfn, &sp_params,
                                         QED_SPQ_MODE_EBLOCK,
                                         NULL);
                if (rc) {
                        DP_ERR(cdev, "Failed to update VPORT\n");
-                       return rc;
+                       goto out;
                }
 
                DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
@@ -1950,7 +2046,9 @@ static int qed_update_vport(struct qed_dev *cdev,
                           params->update_vport_active_flg);
        }
 
-       return 0;
+out:
+       vfree(rss);
+       return rc;
 }
 
 static int qed_start_rxq(struct qed_dev *cdev,
@@ -2114,11 +2212,14 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
                                        QED_ACCEPT_MCAST_MATCHED |
                                        QED_ACCEPT_BCAST;
 
-       if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
+       if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
                accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
                                                 QED_ACCEPT_MCAST_UNMATCHED;
-       else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
+               accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+       } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
                accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+               accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+       }
 
        return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
                                     QED_SPQ_MODE_CB, NULL);
@@ -2229,6 +2330,8 @@ extern const struct qed_iov_hv_ops qed_iov_ops_pass;
 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
 #endif
 
+extern const struct qed_eth_ptp_ops qed_ptp_ops_pass;
+
 static const struct qed_eth_ops qed_eth_ops_pass = {
        .common = &qed_common_ops_pass,
 #ifdef CONFIG_QED_SRIOV
@@ -2237,6 +2340,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
 #ifdef CONFIG_DCB
        .dcb = &qed_dcbnl_ops_pass,
 #endif
+       .ptp = &qed_ptp_ops_pass,
        .fill_dev_info = &qed_fill_eth_dev_info,
        .register_ops = &qed_register_eth_ops,
        .check_mac = &qed_check_mac,
index 48c9bfc2814082ecb68b07884b1c772ca8a3bf33..e763abd334f64e08d3df948d6b78e7993f6ca251 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 #ifndef _QED_L2_H
 #define _QED_L2_H
 #include "qed.h"
 #include "qed_hw.h"
 #include "qed_sp.h"
+struct qed_rss_params {
+       u8 update_rss_config;
+       u8 rss_enable;
+       u8 rss_eng_id;
+       u8 update_rss_capabilities;
+       u8 update_rss_ind_table;
+       u8 update_rss_key;
+       u8 rss_caps;
+       u8 rss_table_size_log;
+
+       /* Indirection table consist of rx queue handles */
+       void *rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+       u32 rss_key[QED_RSS_KEY_SIZE];
+};
 
 struct qed_sge_tpa_params {
        u8 max_buffers_per_cqe;
@@ -118,6 +156,7 @@ struct qed_sp_vport_start_params {
        enum qed_tpa_mode tpa_mode;
        bool remove_inner_vlan;
        bool tx_switching;
+       bool handle_ptp_pkts;
        bool only_untagged;
        bool drop_ttl0;
        u8 max_buffers_per_cqe;
@@ -132,18 +171,6 @@ struct qed_sp_vport_start_params {
 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
                           struct qed_sp_vport_start_params *p_params);
 
-struct qed_rss_params {
-       u8      update_rss_config;
-       u8      rss_enable;
-       u8      rss_eng_id;
-       u8      update_rss_capabilities;
-       u8      update_rss_ind_table;
-       u8      update_rss_key;
-       u8      rss_caps;
-       u8      rss_table_size_log;
-       u16     rss_ind_table[QED_RSS_IND_TABLE_SIZE];
-       u32     rss_key[QED_RSS_KEY_SIZE];
-};
 
 struct qed_filter_accept_flags {
        u8      update_rx_mode_config;
@@ -263,6 +290,8 @@ struct qed_queue_cid {
 
        /* Legacy VFs might have Rx producer located elsewhere */
        bool b_legacy_vf;
+
+       struct qed_hwfn *p_owner;
 };
 
 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
index 873ce2cd76ba0540e501cf7455e8b562fb00c0e3..02c5d47cfc6d4d1369c82868dc4b9f34143e1342 100644 (file)
@@ -1,10 +1,33 @@
 /* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * Copyright (c) 2015 QLogic Corporation
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
index 31417928b6354f66d7f55c28d9e52565f9f3773d..db3e4fc78e090f223d01a2780102c66293168b33 100644 (file)
@@ -1,10 +1,33 @@
 /* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * Copyright (c) 2015 QLogic Corporation
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_LL2_H
index aeb98d8c56264c53109bbad46b8d9b42add1d36d..592e104687a7e550859204220e7c068dcfc0fa99 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/stddef.h>
@@ -853,6 +877,17 @@ static void qed_update_pf_params(struct qed_dev *cdev,
                params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
        }
 
+       /* In case we might support RDMA, don't allow qede to be greedy
+        * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
+        */
+       if (QED_LEADING_HWFN(cdev)->hw_info.personality ==
+           QED_PCI_ETH_ROCE) {
+               u16 *num_cons;
+
+               num_cons = &params->eth_pf_params.num_cons;
+               *num_cons = min_t(u16, *num_cons, 192);
+       }
+
        for (i = 0; i < cdev->num_hwfns; i++) {
                struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
 
@@ -867,6 +902,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
        struct qed_mcp_drv_version drv_version;
        const u8 *data = NULL;
        struct qed_hwfn *hwfn;
+       struct qed_ptt *p_ptt;
        int rc = -EINVAL;
 
        if (qed_iov_wq_start(cdev))
@@ -881,6 +917,14 @@ static int qed_slowpath_start(struct qed_dev *cdev,
                                  QED_FW_FILE_NAME);
                        goto err;
                }
+
+               p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+               if (p_ptt) {
+                       QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt;
+               } else {
+                       DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n");
+                       goto err;
+               }
        }
 
        cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
@@ -968,6 +1012,10 @@ err:
        if (IS_PF(cdev))
                release_firmware(cdev->firmware);
 
+       if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt)
+               qed_ptt_release(QED_LEADING_HWFN(cdev),
+                               QED_LEADING_HWFN(cdev)->p_ptp_ptt);
+
        qed_iov_wq_stop(cdev, false);
 
        return rc;
@@ -981,6 +1029,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
        qed_ll2_dealloc_if(cdev);
 
        if (IS_PF(cdev)) {
+               qed_ptt_release(QED_LEADING_HWFN(cdev),
+                               QED_LEADING_HWFN(cdev)->p_ptp_ptt);
                qed_free_stream_mem(cdev);
                if (IS_QED_ETH_IF(cdev))
                        qed_sriov_disable(cdev, true);
index 6dd3ce443484b5f75cb0a1b7d3cfcdece593fc96..c8a87759403225400176f36ba32261f6712777e3 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
@@ -1098,7 +1122,9 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
 
        switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
        case FUNC_MF_CFG_PROTOCOL_ETHERNET:
-               if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
+               if (!IS_ENABLED(CONFIG_QED_RDMA))
+                       *p_proto = QED_PCI_ETH;
+               else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
                        qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
                break;
        case FUNC_MF_CFG_PROTOCOL_ISCSI:
index 407a2c1830fb6f480a2523942c9730b4d1b50f58..363dce0f16b1e7a777954111871966e8ab5bd152 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_MCP_H
index 155abcb507fd4945cb7a78f1dac698d1f183f15b..7d731c6cb8923dd927a7bbaafa3a3a97237ba652 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
index 7a0670a9a07486502c493cafa8d16779db71e733..4f138fb5f533e0ac68cbdf9acab9a41aa4d33ff7 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_OOO_H
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
new file mode 100644 (file)
index 0000000..d27aa85
--- /dev/null
@@ -0,0 +1,323 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/types.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_hw.h"
+#include "qed_l2.h"
+#include "qed_ptp.h"
+#include "qed_reg_addr.h"
+
+/* 16 nano second time quantas to wait before making a Drift adjustment */
+#define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT       0
+/* Nano seconds to add/subtract when making a Drift adjustment */
+#define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT                28
+/* Add/subtract the Adjustment_Value when making a Drift adjustment */
+#define QED_DRIFT_CNTR_DIRECTION_SHIFT         31
+#define QED_TIMESTAMP_MASK                     BIT(16)
+
+/* Read Rx timestamp */
+static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+       u32 val;
+
+       *timestamp = 0;
+       val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID);
+       if (!(val & QED_TIMESTAMP_MASK)) {
+               DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val);
+               return -EINVAL;
+       }
+
+       val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB);
+       *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB);
+       *timestamp <<= 32;
+       *timestamp |= val;
+
+       /* Reset timestamp register to allow new timestamp */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
+              QED_TIMESTAMP_MASK);
+
+       return 0;
+}
+
+/* Read Tx timestamp */
+static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+       u32 val;
+
+       *timestamp = 0;
+       val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID);
+       if (!(val & QED_TIMESTAMP_MASK)) {
+               DP_INFO(p_hwfn, "Invalid Tx timestamp, buf_seqid = %d\n", val);
+               return -EINVAL;
+       }
+
+       val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB);
+       *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB);
+       *timestamp <<= 32;
+       *timestamp |= val;
+
+       /* Reset timestamp register to allow new timestamp */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
+
+       return 0;
+}
+
+/* Read Phy Hardware Clock */
+static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+       u32 temp = 0;
+
+       temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB);
+       *phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB);
+       *phc_cycles <<= 32;
+       *phc_cycles |= temp;
+
+       return 0;
+}
+
+/* Filter PTP protocol packets that need to be timestamped */
+static int qed_ptp_hw_cfg_rx_filters(struct qed_dev *cdev,
+                                    enum qed_ptp_filter_type type)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+       u32 rule_mask, parm_mask;
+
+       switch (type) {
+       case QED_PTP_FILTER_L2_IPV4_IPV6:
+               parm_mask = 0x6AA;
+               rule_mask = 0x3EEE;
+               break;
+       case QED_PTP_FILTER_L2:
+               parm_mask = 0x6BF;
+               rule_mask = 0x3EFF;
+               break;
+       case QED_PTP_FILTER_IPV4_IPV6:
+               parm_mask = 0x7EA;
+               rule_mask = 0x3FFE;
+               break;
+       case QED_PTP_FILTER_IPV4:
+               parm_mask = 0x7EE;
+               rule_mask = 0x3FFE;
+               break;
+       default:
+               DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", type);
+               return -EINVAL;
+       }
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, parm_mask);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_TO_HOST, 0x1);
+
+       /* Reset possibly old timestamps */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
+              QED_TIMESTAMP_MASK);
+
+       return 0;
+}
+
+/* Adjust the HW clock by a rate given in parts-per-billion (ppb) units.
+ * FW/HW accepts the adjustment value in terms of 3 parameters:
+ *   Drift period - adjustment happens once in certain number of nano seconds.
+ *   Drift value - time is adjusted by a certain value, for example by 5 ns.
+ *   Drift direction - add or subtract the adjustment value.
+ * The routine translates ppb into the adjustment triplet in an optimal manner.
+ */
+static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
+{
+       s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2;
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+       u32 drift_ctr_cfg = 0, drift_state;
+       int drift_dir = 1;
+
+       if (ppb < 0) {
+               ppb = -ppb;
+               drift_dir = 0;
+       }
+
+       if (ppb > 1) {
+               s64 best_dif = ppb, best_approx_dev = 1;
+
+               /* Adjustment value is up to +/-7ns, find an optimal value in
+                * this range.
+                */
+               for (val = 7; val > 0; val--) {
+                       period = div_s64(val * 1000000000, ppb);
+                       period -= 8;
+                       period >>= 4;
+                       if (period < 1)
+                               period = 1;
+                       if (period > 0xFFFFFFE)
+                               period = 0xFFFFFFE;
+
+                       /* Check both rounding ends for approximate error */
+                       approx_dev = period * 16 + 8;
+                       dif = ppb * approx_dev - val * 1000000000;
+                       dif2 = dif + 16 * ppb;
+
+                       if (dif < 0)
+                               dif = -dif;
+                       if (dif2 < 0)
+                               dif2 = -dif2;
+
+                       /* Determine which end gives better approximation */
+                       if (dif * (approx_dev + 16) > dif2 * approx_dev) {
+                               period++;
+                               approx_dev += 16;
+                               dif = dif2;
+                       }
+
+                       /* Track best approximation found so far */
+                       if (best_dif * approx_dev > dif * best_approx_dev) {
+                               best_dif = dif;
+                               best_val = val;
+                               best_period = period;
+                               best_approx_dev = approx_dev;
+                       }
+               }
+       } else if (ppb == 1) {
+               /* This is a special case as its the only value which wouldn't
+                * fit in a s64 variable. In order to prevent castings simple
+                * handle it seperately.
+                */
+               best_val = 4;
+               best_period = 0xee6b27f;
+       } else {
+               best_val = 0;
+               best_period = 0xFFFFFFF;
+       }
+
+       drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) |
+                       (((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) |
+                       (((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1);
+
+       drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR);
+       if (drift_state & 1) {
+               qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF,
+                      drift_ctr_cfg);
+       } else {
+               DP_INFO(p_hwfn, "Drift counter is not reset\n");
+               return -EINVAL;
+       }
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
+
+       return 0;
+}
+
+static int qed_ptp_hw_enable(struct qed_dev *cdev)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+
+       /* Reset PTP event detection rules - will be configured in the IOCTL */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
+
+       /* Pause free running counter */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
+       /* Resume free running counter */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
+
+       /* Disable drift register */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
+
+       /* Reset possibly old timestamps */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
+              QED_TIMESTAMP_MASK);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
+
+       return 0;
+}
+
+static int qed_ptp_hw_hwtstamp_tx_on(struct qed_dev *cdev)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x6AA);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3EEE);
+
+       return 0;
+}
+
+static int qed_ptp_hw_disable(struct qed_dev *cdev)
+{
+       struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+       struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+
+       /* Reset PTP event detection rules */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
+
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
+
+       /* Disable the PTP feature */
+       qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
+       qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
+
+       return 0;
+}
+
+const struct qed_eth_ptp_ops qed_ptp_ops_pass = {
+       .hwtstamp_tx_on = qed_ptp_hw_hwtstamp_tx_on,
+       .cfg_rx_filters = qed_ptp_hw_cfg_rx_filters,
+       .read_rx_ts = qed_ptp_hw_read_rx_ts,
+       .read_tx_ts = qed_ptp_hw_read_tx_ts,
+       .read_cc = qed_ptp_hw_read_cc,
+       .adjfreq = qed_ptp_hw_adjfreq,
+       .disable = qed_ptp_hw_disable,
+       .enable = qed_ptp_hw_enable,
+};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.h b/drivers/net/ethernet/qlogic/qed/qed_ptp.h
new file mode 100644 (file)
index 0000000..63c666d
--- /dev/null
@@ -0,0 +1,47 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_PTP_H
+#define _QED_PTP_H
+#include <linux/types.h>
+
+int qed_ptp_hwtstamp_tx_on(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_ptp_cfg_rx_filters(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                          enum qed_ptp_filter_type type);
+int qed_ptp_read_rx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts);
+int qed_ptp_read_tx_ts(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u64 *ts);
+int qed_ptp_read_cc(struct qed_hwfn *p_hwfn,
+                   struct qed_ptt *p_ptt, u64 *cycles);
+int qed_ptp_adjfreq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, s32 ppb);
+int qed_ptp_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_ptp_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+#endif
index 97544205a8c193540a2e8a40b640cfad4034e68a..3b7edf6ff234196732f9edf9fbfe4c53202bbec1 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef REG_ADDR_H
 #define DORQ_REG_PF_ICID_BIT_SHIFT_NORM        0x100448UL
 #define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL
 #define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL
+#define NIG_REG_RX_PTP_EN 0x501900UL
+#define NIG_REG_TX_PTP_EN 0x501904UL
+#define NIG_REG_LLH_PTP_TO_HOST        0x501908UL
+#define NIG_REG_LLH_PTP_TO_MCP 0x50190cUL
+#define NIG_REG_PTP_SW_TXTSEN 0x501910UL
+#define NIG_REG_LLH_PTP_ETHERTYPE_1 0x501914UL
+#define NIG_REG_LLH_PTP_MAC_DA_2_LSB 0x501918UL
+#define NIG_REG_LLH_PTP_MAC_DA_2_MSB 0x50191cUL
+#define NIG_REG_LLH_PTP_PARAM_MASK 0x501920UL
+#define NIG_REG_LLH_PTP_RULE_MASK 0x501924UL
+#define NIG_REG_TX_LLH_PTP_PARAM_MASK 0x501928UL
+#define NIG_REG_TX_LLH_PTP_RULE_MASK 0x50192cUL
+#define NIG_REG_LLH_PTP_HOST_BUF_SEQID 0x501930UL
+#define NIG_REG_LLH_PTP_HOST_BUF_TS_LSB 0x501934UL
+#define NIG_REG_LLH_PTP_HOST_BUF_TS_MSB        0x501938UL
+#define NIG_REG_LLH_PTP_MCP_BUF_SEQID 0x50193cUL
+#define NIG_REG_LLH_PTP_MCP_BUF_TS_LSB 0x501940UL
+#define NIG_REG_LLH_PTP_MCP_BUF_TS_MSB 0x501944UL
+#define NIG_REG_TX_LLH_PTP_BUF_SEQID 0x501948UL
+#define NIG_REG_TX_LLH_PTP_BUF_TS_LSB 0x50194cUL
+#define NIG_REG_TX_LLH_PTP_BUF_TS_MSB 0x501950UL
+#define NIG_REG_RX_PTP_TS_MSB_ERR 0x501954UL
+#define NIG_REG_TX_PTP_TS_MSB_ERR 0x501958UL
+#define NIG_REG_TSGEN_SYNC_TIME_LSB 0x5088c0UL
+#define NIG_REG_TSGEN_SYNC_TIME_MSB 0x5088c4UL
+#define NIG_REG_TSGEN_RST_DRIFT_CNTR 0x5088d8UL
+#define NIG_REG_TSGEN_DRIFT_CNTR_CONF 0x5088dcUL
+#define NIG_REG_TS_OUTPUT_ENABLE_PDA 0x508870UL
+#define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL
+#define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL
+#define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL
 #endif
index 2dbdb329899187d021b1f74b63a9948835ec523d..c3c8c5018e9397a43c8f3404b2c9568315ac7c25 100644 (file)
@@ -1,5 +1,5 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015-2016  QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 279f342af8db1c91272fa89bf80fbde565f5e135..36cf4b2ab7faf0afcd17ec3347bf0d9d05567901 100644 (file)
@@ -1,5 +1,5 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015-2016  QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 48bfaecaf6dca30fc3113c92d739dfd8187f3ca8..1bafc05db2b89fd6ccf58cafb6c394b6c6b00d11 100644 (file)
@@ -1,3 +1,35 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
 #include <linux/crc32.h>
 #include "qed.h"
 #include "qed_dev_api.h"
index 9c897bc68d05545a88cfddf582ef19616042ce89..043882959606c3610f9fc210b8d3c1a533a8b6c7 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_SP_H
index a39ef2e7a9a62410fb744130ed153a9fcff77abf..097a7298757284108f47e706062a0a298f0e1d04 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
index f022469bdcf82bdfce2a9b21fe6566daad384949..645328a9f0cfb6b4040c8d6402ad5684d79adab9 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/types.h>
index 85b09dd1787acaa78ca72c1013eb466c6e1f70b7..3f4bf31f45e0acc4846d0d6e02b7397f18cf88f0 100644 (file)
@@ -1,13 +1,38 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/etherdevice.h>
 #include <linux/crc32.h>
+#include <linux/vmalloc.h>
 #include <linux/qed/qed_iov_if.h>
 #include "qed_cxt.h"
 #include "qed_hsi.h"
@@ -1199,7 +1224,10 @@ static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
                return;
 
        /* Clear the VF mac */
-       memset(vf_info->mac, 0, ETH_ALEN);
+       eth_zero_addr(vf_info->mac);
+
+       vf_info->rx_accept_mode = 0;
+       vf_info->tx_accept_mode = 0;
 }
 
 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
@@ -2294,12 +2322,14 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
                            struct qed_vf_info *vf,
                            struct qed_sp_vport_update_params *p_data,
                            struct qed_rss_params *p_rss,
-                           struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+                           struct qed_iov_vf_mbx *p_mbx,
+                           u16 *tlvs_mask, u16 *tlvs_accepted)
 {
        struct vfpf_vport_update_rss_tlv *p_rss_tlv;
        u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
-       u16 i, q_idx, max_q_idx;
+       bool b_reject = false;
        u16 table_size;
+       u16 i, q_idx;
 
        p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
                    qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
@@ -2323,34 +2353,39 @@ qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
        p_rss->rss_eng_id = vf->relative_vf_id + 1;
        p_rss->rss_caps = p_rss_tlv->rss_caps;
        p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
-       memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
-              sizeof(p_rss->rss_ind_table));
        memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
 
        table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
                           (1 << p_rss_tlv->rss_table_size_log));
 
-       max_q_idx = ARRAY_SIZE(vf->vf_queues);
-
        for (i = 0; i < table_size; i++) {
-               u16 index = vf->vf_queues[0].fw_rx_qid;
+               q_idx = p_rss_tlv->rss_ind_table[i];
+               if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx)) {
+                       DP_VERBOSE(p_hwfn,
+                                  QED_MSG_IOV,
+                                  "VF[%d]: Omitting RSS due to wrong queue %04x\n",
+                                  vf->relative_vf_id, q_idx);
+                       b_reject = true;
+                       goto out;
+               }
 
-               q_idx = p_rss->rss_ind_table[i];
-               if (q_idx >= max_q_idx)
-                       DP_NOTICE(p_hwfn,
-                                 "rss_ind_table[%d] = %d, rxq is out of range\n",
-                                 i, q_idx);
-               else if (!vf->vf_queues[q_idx].p_rx_cid)
-                       DP_NOTICE(p_hwfn,
-                                 "rss_ind_table[%d] = %d, rxq is not active\n",
-                                 i, q_idx);
-               else
-                       index = vf->vf_queues[q_idx].fw_rx_qid;
-               p_rss->rss_ind_table[i] = index;
+               if (!vf->vf_queues[q_idx].p_rx_cid) {
+                       DP_VERBOSE(p_hwfn,
+                                  QED_MSG_IOV,
+                                  "VF[%d]: Omitting RSS due to inactive queue %08x\n",
+                                  vf->relative_vf_id, q_idx);
+                       b_reject = true;
+                       goto out;
+               }
+
+               p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
        }
 
        p_data->rss_params = p_rss;
+out:
        *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
+       if (!b_reject)
+               *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
 }
 
 static void
@@ -2401,16 +2436,49 @@ qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
        *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
 }
 
+static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
+                                   u8 vfid,
+                                   struct qed_sp_vport_update_params *params,
+                                   u16 *tlvs)
+{
+       u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
+       struct qed_filter_accept_flags *flags = &params->accept_flags;
+       struct qed_public_vf_info *vf_info;
+
+       /* Untrusted VFs can't even be trusted to know that fact.
+        * Simply indicate everything is configured fine, and trace
+        * configuration 'behind their back'.
+        */
+       if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
+               return 0;
+
+       vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+
+       if (flags->update_rx_mode_config) {
+               vf_info->rx_accept_mode = flags->rx_accept_filter;
+               if (!vf_info->is_trusted_configured)
+                       flags->rx_accept_filter &= ~mask;
+       }
+
+       if (flags->update_tx_mode_config) {
+               vf_info->tx_accept_mode = flags->tx_accept_filter;
+               if (!vf_info->is_trusted_configured)
+                       flags->tx_accept_filter &= ~mask;
+       }
+
+       return 0;
+}
+
 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
                                        struct qed_ptt *p_ptt,
                                        struct qed_vf_info *vf)
 {
+       struct qed_rss_params *p_rss_params = NULL;
        struct qed_sp_vport_update_params params;
        struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
        struct qed_sge_tpa_params sge_tpa_params;
-       struct qed_rss_params rss_params;
+       u16 tlvs_mask = 0, tlvs_accepted = 0;
        u8 status = PFVF_STATUS_SUCCESS;
-       u16 tlvs_mask = 0;
        u16 length;
        int rc;
 
@@ -2423,6 +2491,11 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
                status = PFVF_STATUS_FAILURE;
                goto out;
        }
+       p_rss_params = vzalloc(sizeof(*p_rss_params));
+       if (p_rss_params == NULL) {
+               status = PFVF_STATUS_FAILURE;
+               goto out;
+       }
 
        memset(&params, 0, sizeof(params));
        params.opaque_fid = vf->opaque_fid;
@@ -2437,20 +2510,33 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
        qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
        qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
        qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
-       qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
-                                   mbx, &tlvs_mask);
        qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
        qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
                                        &sge_tpa_params, mbx, &tlvs_mask);
 
-       /* Just log a message if there is no single extended tlv in buffer.
-        * When all features of vport update ramrod would be requested by VF
-        * as extended TLVs in buffer then an error can be returned in response
-        * if there is no extended TLV present in buffer.
+       tlvs_accepted = tlvs_mask;
+
+       /* Some of the extended TLVs need to be validated first; In that case,
+        * they can update the mask without updating the accepted [so that
+        * PF could communicate to VF it has rejected request].
         */
-       if (!tlvs_mask) {
-               DP_NOTICE(p_hwfn,
-                         "No feature tlvs found for vport update\n");
+       qed_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
+                                   mbx, &tlvs_mask, &tlvs_accepted);
+
+       if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id,
+                                    &params, &tlvs_accepted)) {
+               tlvs_accepted = 0;
+               status = PFVF_STATUS_NOT_SUPPORTED;
+               goto out;
+       }
+
+       if (!tlvs_accepted) {
+               if (tlvs_mask)
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                                  "Upper-layer prevents VF vport configuration\n");
+               else
+                       DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+                                  "No feature tlvs found for vport update\n");
                status = PFVF_STATUS_NOT_SUPPORTED;
                goto out;
        }
@@ -2461,8 +2547,9 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
                status = PFVF_STATUS_FAILURE;
 
 out:
+       vfree(p_rss_params);
        length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
-                                                 tlvs_mask, tlvs_mask);
+                                                 tlvs_mask, tlvs_accepted);
        qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
 }
 
@@ -2539,8 +2626,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
                for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
                        if (ether_addr_equal(p_vf->shadow_config.macs[i],
                                             p_params->mac)) {
-                               memset(p_vf->shadow_config.macs[i], 0,
-                                      ETH_ALEN);
+                               eth_zero_addr(p_vf->shadow_config.macs[i]);
                                break;
                        }
                }
@@ -2553,7 +2639,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
        } else if (p_params->opcode == QED_FILTER_REPLACE ||
                   p_params->opcode == QED_FILTER_FLUSH) {
                for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
-                       memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN);
+                       eth_zero_addr(p_vf->shadow_config.macs[i]);
        }
 
        /* List the new MAC address */
@@ -3892,6 +3978,32 @@ static int qed_set_vf_rate(struct qed_dev *cdev,
        return 0;
 }
 
+static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust)
+{
+       int i;
+
+       for_each_hwfn(cdev, i) {
+               struct qed_hwfn *hwfn = &cdev->hwfns[i];
+               struct qed_public_vf_info *vf;
+
+               if (!qed_iov_pf_sanity_check(hwfn, vfid)) {
+                       DP_NOTICE(hwfn,
+                                 "SR-IOV sanity check failed, can't set trust\n");
+                       return -EINVAL;
+               }
+
+               vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
+
+               if (vf->is_trusted_request == trust)
+                       return 0;
+               vf->is_trusted_request = trust;
+
+               qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG);
+       }
+
+       return 0;
+}
+
 static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
 {
        u64 events[QED_VF_ARRAY_LENGTH];
@@ -3996,6 +4108,61 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
        qed_ptt_release(hwfn, ptt);
 }
 
+static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
+{
+       struct qed_sp_vport_update_params params;
+       struct qed_filter_accept_flags *flags;
+       struct qed_public_vf_info *vf_info;
+       struct qed_vf_info *vf;
+       u8 mask;
+       int i;
+
+       mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
+       flags = &params.accept_flags;
+
+       qed_for_each_vf(hwfn, i) {
+               /* Need to make sure current requested configuration didn't
+                * flip so that we'll end up configuring something that's not
+                * needed.
+                */
+               vf_info = qed_iov_get_public_vf_info(hwfn, i, true);
+               if (vf_info->is_trusted_configured ==
+                   vf_info->is_trusted_request)
+                       continue;
+               vf_info->is_trusted_configured = vf_info->is_trusted_request;
+
+               /* Validate that the VF has a configured vport */
+               vf = qed_iov_get_vf_info(hwfn, i, true);
+               if (!vf->vport_instance)
+                       continue;
+
+               memset(&params, 0, sizeof(params));
+               params.opaque_fid = vf->opaque_fid;
+               params.vport_id = vf->vport_id;
+
+               if (vf_info->rx_accept_mode & mask) {
+                       flags->update_rx_mode_config = 1;
+                       flags->rx_accept_filter = vf_info->rx_accept_mode;
+               }
+
+               if (vf_info->tx_accept_mode & mask) {
+                       flags->update_tx_mode_config = 1;
+                       flags->tx_accept_filter = vf_info->tx_accept_mode;
+               }
+
+               /* Remove if needed; Otherwise this would set the mask */
+               if (!vf_info->is_trusted_configured) {
+                       flags->rx_accept_filter &= ~mask;
+                       flags->tx_accept_filter &= ~mask;
+               }
+
+               if (flags->update_rx_mode_config ||
+                   flags->update_tx_mode_config)
+                       qed_sp_vport_update(hwfn, &params,
+                                           QED_SPQ_MODE_EBLOCK, NULL);
+       }
+}
+
 static void qed_iov_pf_task(struct work_struct *work)
 
 {
@@ -4031,6 +4198,9 @@ static void qed_iov_pf_task(struct work_struct *work)
        if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
                               &hwfn->iov_task_flags))
                qed_handle_bulletin_post(hwfn);
+
+       if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags))
+               qed_iov_handle_trust_change(hwfn);
 }
 
 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
@@ -4093,4 +4263,5 @@ const struct qed_iov_hv_ops qed_iov_ops_pass = {
        .set_link_state = &qed_set_vf_link_state,
        .set_spoof = &qed_spoof_configure,
        .set_rate = &qed_set_vf_rate,
+       .set_trust = &qed_set_vf_trust,
 };
index 509c02b4772e087f3a655111f3ea8c687e375faf..0a2e3a36d2cf6341d2a8dddc10c1ba2199fca119 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_SRIOV_H
@@ -56,6 +80,14 @@ struct qed_public_vf_info {
 
        /* Currently configured Tx rate in MB/sec. 0 if unconfigured */
        int tx_rate;
+
+       /* Trusted VFs can configure promiscuous mode.
+        * Also store shadow promisc configuration if needed.
+        */
+       bool is_trusted_configured;
+       bool is_trusted_request;
+       u8 rx_accept_mode;
+       u8 tx_accept_mode;
 };
 
 struct qed_iov_vf_init_params {
@@ -221,6 +253,7 @@ enum qed_iov_wq_flag {
        QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
        QED_IOV_WQ_STOP_WQ_FLAG,
        QED_IOV_WQ_FLR_FLAG,
+       QED_IOV_WQ_TRUST_FLAG,
 };
 
 #ifdef CONFIG_QED_SRIOV
index 60b31a8ede73f81c76e9091362b92e2b800d71f5..9667059b15bd82659c9ce568ed5154483d80f03a 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #include <linux/crc32.h>
@@ -814,6 +838,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
        if (p_params->rss_params) {
                struct qed_rss_params *rss_params = p_params->rss_params;
                struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+               int i, table_size;
 
                size = sizeof(struct vfpf_vport_update_rss_tlv);
                p_rss_tlv = qed_add_tlv(p_hwfn,
@@ -836,8 +861,15 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
                p_rss_tlv->rss_enable = rss_params->rss_enable;
                p_rss_tlv->rss_caps = rss_params->rss_caps;
                p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
-               memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
-                      sizeof(rss_params->rss_ind_table));
+
+               table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE,
+                                  1 << p_rss_tlv->rss_table_size_log);
+               for (i = 0; i < table_size; i++) {
+                       struct qed_queue_cid *p_queue;
+
+                       p_queue = rss_params->rss_ind_table[i];
+                       p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
+               }
                memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
                       sizeof(rss_params->rss_key));
        }
index 11eb3854e6f293ee0de6305dc46b94e9d23b9b36..7da0b165d8bc2718d28fbbd108040cc4d948a03a 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_VF_H
index 048a230c3ce0c5bc807bc52a7484f6b53a2438b2..bc5f7c3b277de27a7f682170f26946070d45ca9d 100644 (file)
@@ -1,5 +1,5 @@
 obj-$(CONFIG_QEDE) := qede.o
 
-qede-y := qede_main.o qede_ethtool.o
+qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede_ptp.o
 qede-$(CONFIG_DCB) += qede_dcbnl.o
 qede-$(CONFIG_QED_RDMA) += qede_roce.o
index c79dc78746fcbf27e25530c4c94fe0e4f588eef8..f2aaef2cfb86d7a31c5fdc6f5d5940e1c9459a70 100644 (file)
@@ -1,11 +1,34 @@
 /* QLogic qede NIC Driver
-* Copyright (c) 2015 QLogic Corporation
-*
-* This software is available under the terms of the GNU General Public License
-* (GPL) Version 2, available from the file COPYING in the main directory of
-* this source tree.
-*/
-
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
 #ifndef _QEDE_H_
 #define _QEDE_H_
 #include <linux/compiler.h>
@@ -26,7 +49,7 @@
 
 #define QEDE_MAJOR_VERSION             8
 #define QEDE_MINOR_VERSION             10
-#define QEDE_REVISION_VERSION          9
+#define QEDE_REVISION_VERSION          10
 #define QEDE_ENGINEERING_VERSION       20
 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
                __stringify(QEDE_MINOR_VERSION) "."             \
@@ -114,6 +137,8 @@ struct qede_rdma_dev {
        struct workqueue_struct *roce_wq;
 };
 
+struct qede_ptp;
+
 struct qede_dev {
        struct qed_dev                  *cdev;
        struct net_device               *ndev;
@@ -125,8 +150,10 @@ struct qede_dev {
        u32 flags;
 #define QEDE_FLAG_IS_VF        BIT(0)
 #define IS_VF(edev)    (!!((edev)->flags & QEDE_FLAG_IS_VF))
+#define QEDE_TX_TIMESTAMPING_EN                BIT(1)
 
        const struct qed_eth_ops        *ops;
+       struct qede_ptp                 *ptp;
 
        struct qed_dev_eth_info dev_info;
 #define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
@@ -141,6 +168,7 @@ struct qede_dev {
        u16                             num_queues;
 #define QEDE_QUEUE_CNT(edev)   ((edev)->num_queues)
 #define QEDE_RSS_COUNT(edev)   ((edev)->num_queues - (edev)->fp_num_tx)
+#define QEDE_RX_QUEUE_IDX(edev, i)     (i)
 #define QEDE_TSS_COUNT(edev)   ((edev)->num_queues - (edev)->fp_num_rx)
 
        struct qed_int_info             int_info;
@@ -171,7 +199,10 @@ struct qede_dev {
 #define QEDE_RSS_KEY_INITED    BIT(1)
 #define QEDE_RSS_CAPS_INITED   BIT(2)
        u32 rss_params_inited; /* bit-field to track initialized rss params */
-       struct qed_update_vport_rss_params      rss_params;
+       u16 rss_ind_table[128];
+       u32 rss_key[10];
+       u8 rss_caps;
+
        u16                     q_num_rx_buffers; /* Must be a power of two */
        u16                     q_num_tx_buffers; /* Must be a power of two */
 
@@ -257,7 +288,7 @@ struct qede_rx_queue {
        u16 sw_rx_cons;
        u16 sw_rx_prod;
 
-       u16 num_rx_buffers; /* Slowpath */
+       u16 filled_buffers;
        u8 data_direction;
        u8 rxq_id;
 
@@ -270,6 +301,9 @@ struct qede_rx_queue {
        struct qed_chain rx_bd_ring;
        struct qed_chain rx_comp_ring ____cacheline_aligned;
 
+       /* Used once per each NAPI run */
+       u16 num_rx_buffers;
+
        /* GRO */
        struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
 
@@ -385,9 +419,42 @@ struct qede_reload_args {
        } u;
 };
 
+/* Datapath functions definition */
+netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+netdev_features_t qede_features_check(struct sk_buff *skb,
+                                     struct net_device *dev,
+                                     netdev_features_t features);
+void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp);
+int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
+int qede_free_tx_pkt(struct qede_dev *edev,
+                    struct qede_tx_queue *txq, int *len);
+int qede_poll(struct napi_struct *napi, int budget);
+irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie);
+
+/* Filtering function definitions */
+void qede_force_mac(void *dev, u8 *mac, bool forced);
+int qede_set_mac_addr(struct net_device *ndev, void *p);
+
+int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
+int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
+void qede_vlan_mark_nonconfigured(struct qede_dev *edev);
+int qede_configure_vlan_filters(struct qede_dev *edev);
+
+int qede_set_features(struct net_device *dev, netdev_features_t features);
+void qede_set_rx_mode(struct net_device *ndev);
+void qede_config_rx_mode(struct net_device *ndev);
+void qede_fill_rss_params(struct qede_dev *edev,
+                         struct qed_update_vport_rss_params *rss, u8 *update);
+
+void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti);
+void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti);
+
+int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp);
+
 #ifdef CONFIG_DCB
 void qede_set_dcbnl_ops(struct net_device *ndev);
 #endif
+
 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
 void qede_set_ethtool_ops(struct net_device *netdev);
 void qede_reload(struct qede_dev *edev,
index 1c48f445c93bd2b10f261e3cf05ac90a96cf2bba..c02754ddc0e9183293e1a84a5b4ceaad068897f5 100644 (file)
@@ -1,11 +1,34 @@
 /* QLogic qede NIC Driver
-* Copyright (c) 2015 QLogic Corporation
-*
-* This software is available under the terms of the GNU General Public License
-* (GPL) Version 2, available from the file COPYING in the main directory of
-* this source tree.
-*/
-
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
 #include <linux/version.h>
 #include <linux/types.h>
 #include <linux/netdevice.h>
@@ -14,7 +37,9 @@
 #include <linux/string.h>
 #include <linux/pci.h>
 #include <linux/capability.h>
+#include <linux/vmalloc.h>
 #include "qede.h"
+#include "qede_ptp.h"
 
 #define QEDE_RQSTAT_OFFSET(stat_name) \
         (offsetof(struct qede_rx_queue, stat_name))
@@ -908,8 +933,7 @@ static int qede_set_channels(struct net_device *dev,
        /* Reset the indirection table if rx queue count is updated */
        if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
                edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
-               memset(&edev->rss_params.rss_ind_table, 0,
-                      sizeof(edev->rss_params.rss_ind_table));
+               memset(edev->rss_ind_table, 0, sizeof(edev->rss_ind_table));
        }
 
        qede_reload(edev, NULL, false);
@@ -917,6 +941,14 @@ static int qede_set_channels(struct net_device *dev,
        return 0;
 }
 
+static int qede_get_ts_info(struct net_device *dev,
+                           struct ethtool_ts_info *info)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       return qede_ptp_get_ts_info(edev, info);
+}
+
 static int qede_set_phys_id(struct net_device *dev,
                            enum ethtool_phys_id_state state)
 {
@@ -955,11 +987,11 @@ static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
                info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
        case UDP_V4_FLOW:
-               if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP)
+               if (edev->rss_caps & QED_RSS_IPV4_UDP)
                        info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
        case UDP_V6_FLOW:
-               if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP)
+               if (edev->rss_caps & QED_RSS_IPV6_UDP)
                        info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
                break;
        case IPV4_FLOW:
@@ -992,8 +1024,9 @@ static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
 
 static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
 {
-       struct qed_update_vport_params vport_update_params;
+       struct qed_update_vport_params *vport_update_params;
        u8 set_caps = 0, clr_caps = 0;
+       int rc = 0;
 
        DP_VERBOSE(edev, QED_MSG_DEBUG,
                   "Set rss flags command parameters: flow type = %d, data = %llu\n",
@@ -1068,27 +1101,29 @@ static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
        }
 
        /* No action is needed if there is no change in the rss capability */
-       if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps &
-                                          ~clr_caps) | set_caps))
+       if (edev->rss_caps == ((edev->rss_caps & ~clr_caps) | set_caps))
                return 0;
 
        /* Update internal configuration */
-       edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) |
-                                   set_caps;
+       edev->rss_caps = ((edev->rss_caps & ~clr_caps) | set_caps);
        edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
 
        /* Re-configure if possible */
-       if (netif_running(edev->ndev)) {
-               memset(&vport_update_params, 0, sizeof(vport_update_params));
-               vport_update_params.update_rss_flg = 1;
-               vport_update_params.vport_id = 0;
-               memcpy(&vport_update_params.rss_params, &edev->rss_params,
-                      sizeof(vport_update_params.rss_params));
-               return edev->ops->vport_update(edev->cdev,
-                                              &vport_update_params);
+       __qede_lock(edev);
+       if (edev->state == QEDE_STATE_OPEN) {
+               vport_update_params = vzalloc(sizeof(*vport_update_params));
+               if (!vport_update_params) {
+                       __qede_unlock(edev);
+                       return -ENOMEM;
+               }
+               qede_fill_rss_params(edev, &vport_update_params->rss_params,
+                                    &vport_update_params->update_rss_flg);
+               rc = edev->ops->vport_update(edev->cdev, vport_update_params);
+               vfree(vport_update_params);
        }
+       __qede_unlock(edev);
 
-       return 0;
+       return rc;
 }
 
 static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
@@ -1113,7 +1148,7 @@ static u32 qede_get_rxfh_key_size(struct net_device *dev)
 {
        struct qede_dev *edev = netdev_priv(dev);
 
-       return sizeof(edev->rss_params.rss_key);
+       return sizeof(edev->rss_key);
 }
 
 static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
@@ -1128,11 +1163,10 @@ static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
                return 0;
 
        for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
-               indir[i] = edev->rss_params.rss_ind_table[i];
+               indir[i] = edev->rss_ind_table[i];
 
        if (key)
-               memcpy(key, edev->rss_params.rss_key,
-                      qede_get_rxfh_key_size(dev));
+               memcpy(key, edev->rss_key, qede_get_rxfh_key_size(dev));
 
        return 0;
 }
@@ -1140,9 +1174,9 @@ static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
 static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
                         const u8 *key, const u8 hfunc)
 {
-       struct qed_update_vport_params vport_update_params;
+       struct qed_update_vport_params *vport_update_params;
        struct qede_dev *edev = netdev_priv(dev);
-       int i;
+       int i, rc = 0;
 
        if (edev->dev_info.common.num_hwfns > 1) {
                DP_INFO(edev,
@@ -1158,27 +1192,30 @@ static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
 
        if (indir) {
                for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
-                       edev->rss_params.rss_ind_table[i] = indir[i];
+                       edev->rss_ind_table[i] = indir[i];
                edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
        }
 
        if (key) {
-               memcpy(&edev->rss_params.rss_key, key,
-                      qede_get_rxfh_key_size(dev));
+               memcpy(&edev->rss_key, key, qede_get_rxfh_key_size(dev));
                edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
        }
 
-       if (netif_running(edev->ndev)) {
-               memset(&vport_update_params, 0, sizeof(vport_update_params));
-               vport_update_params.update_rss_flg = 1;
-               vport_update_params.vport_id = 0;
-               memcpy(&vport_update_params.rss_params, &edev->rss_params,
-                      sizeof(vport_update_params.rss_params));
-               return edev->ops->vport_update(edev->cdev,
-                                              &vport_update_params);
+       __qede_lock(edev);
+       if (edev->state == QEDE_STATE_OPEN) {
+               vport_update_params = vzalloc(sizeof(*vport_update_params));
+               if (!vport_update_params) {
+                       __qede_unlock(edev);
+                       return -ENOMEM;
+               }
+               qede_fill_rss_params(edev, &vport_update_params->rss_params,
+                                    &vport_update_params->update_rss_flg);
+               rc = edev->ops->vport_update(edev->cdev, vport_update_params);
+               vfree(vport_update_params);
        }
+       __qede_unlock(edev);
 
-       return 0;
+       return rc;
 }
 
 /* This function enables the interrupt generation and the NAPI on the device */
@@ -1558,6 +1595,7 @@ static const struct ethtool_ops qede_ethtool_ops = {
        .get_rxfh_key_size = qede_get_rxfh_key_size,
        .get_rxfh = qede_get_rxfh,
        .set_rxfh = qede_set_rxfh,
+       .get_ts_info = qede_get_ts_info,
        .get_channels = qede_get_channels,
        .set_channels = qede_set_channels,
        .self_test = qede_self_test,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
new file mode 100644 (file)
index 0000000..107c3fd
--- /dev/null
@@ -0,0 +1,759 @@
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <net/udp_tunnel.h>
+#include <linux/bitops.h>
+#include <linux/vmalloc.h>
+
+#include <linux/qed/qed_if.h>
+#include "qede.h"
+
+void qede_force_mac(void *dev, u8 *mac, bool forced)
+{
+       struct qede_dev *edev = dev;
+
+       /* MAC hints take effect only if we haven't set one already */
+       if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
+               return;
+
+       ether_addr_copy(edev->ndev->dev_addr, mac);
+       ether_addr_copy(edev->primary_mac, mac);
+}
+
+void qede_fill_rss_params(struct qede_dev *edev,
+                         struct qed_update_vport_rss_params *rss, u8 *update)
+{
+       bool need_reset = false;
+       int i;
+
+       if (QEDE_RSS_COUNT(edev) <= 1) {
+               memset(rss, 0, sizeof(*rss));
+               *update = 0;
+               return;
+       }
+
+       /* Need to validate current RSS config uses valid entries */
+       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+               if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
+                       need_reset = true;
+                       break;
+               }
+       }
+
+       if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
+               for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+                       u16 indir_val, val;
+
+                       val = QEDE_RSS_COUNT(edev);
+                       indir_val = ethtool_rxfh_indir_default(i, val);
+                       edev->rss_ind_table[i] = indir_val;
+               }
+               edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+       }
+
+       /* Now that we have the queue-indirection, prepare the handles */
+       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+               u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
+
+               rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
+       }
+
+       if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
+               netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
+               edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+       }
+       memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
+
+       if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
+               edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
+                   QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+               edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+       }
+       rss->rss_caps = edev->rss_caps;
+
+       *update = 1;
+}
+
+static int qede_set_ucast_rx_mac(struct qede_dev *edev,
+                                enum qed_filter_xcast_params_type opcode,
+                                unsigned char mac[ETH_ALEN])
+{
+       struct qed_filter_params filter_cmd;
+
+       memset(&filter_cmd, 0, sizeof(filter_cmd));
+       filter_cmd.type = QED_FILTER_TYPE_UCAST;
+       filter_cmd.filter.ucast.type = opcode;
+       filter_cmd.filter.ucast.mac_valid = 1;
+       ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
+
+       return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
+                                 enum qed_filter_xcast_params_type opcode,
+                                 u16 vid)
+{
+       struct qed_filter_params filter_cmd;
+
+       memset(&filter_cmd, 0, sizeof(filter_cmd));
+       filter_cmd.type = QED_FILTER_TYPE_UCAST;
+       filter_cmd.filter.ucast.type = opcode;
+       filter_cmd.filter.ucast.vlan_valid = 1;
+       filter_cmd.filter.ucast.vlan = vid;
+
+       return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
+{
+       struct qed_update_vport_params *params;
+       int rc;
+
+       /* Proceed only if action actually needs to be performed */
+       if (edev->accept_any_vlan == action)
+               return 0;
+
+       params = vzalloc(sizeof(*params));
+       if (!params)
+               return -ENOMEM;
+
+       params->vport_id = 0;
+       params->accept_any_vlan = action;
+       params->update_accept_any_vlan_flg = 1;
+
+       rc = edev->ops->vport_update(edev->cdev, params);
+       if (rc) {
+               DP_ERR(edev, "Failed to %s accept-any-vlan\n",
+                      action ? "enable" : "disable");
+       } else {
+               DP_INFO(edev, "%s accept-any-vlan\n",
+                       action ? "enabled" : "disabled");
+               edev->accept_any_vlan = action;
+       }
+
+       vfree(params);
+       return 0;
+}
+
+int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qede_vlan *vlan, *tmp;
+       int rc = 0;
+
+       DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
+
+       vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+       if (!vlan) {
+               DP_INFO(edev, "Failed to allocate struct for vlan\n");
+               return -ENOMEM;
+       }
+       INIT_LIST_HEAD(&vlan->list);
+       vlan->vid = vid;
+       vlan->configured = false;
+
+       /* Verify vlan isn't already configured */
+       list_for_each_entry(tmp, &edev->vlan_list, list) {
+               if (tmp->vid == vlan->vid) {
+                       DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                                  "vlan already configured\n");
+                       kfree(vlan);
+                       return -EEXIST;
+               }
+       }
+
+       /* If interface is down, cache this VLAN ID and return */
+       __qede_lock(edev);
+       if (edev->state != QEDE_STATE_OPEN) {
+               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+                          "Interface is down, VLAN %d will be configured when interface is up\n",
+                          vid);
+               if (vid != 0)
+                       edev->non_configured_vlans++;
+               list_add(&vlan->list, &edev->vlan_list);
+               goto out;
+       }
+
+       /* Check for the filter limit.
+        * Note - vlan0 has a reserved filter and can be added without
+        * worrying about quota
+        */
+       if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
+           (vlan->vid == 0)) {
+               rc = qede_set_ucast_rx_vlan(edev,
+                                           QED_FILTER_XCAST_TYPE_ADD,
+                                           vlan->vid);
+               if (rc) {
+                       DP_ERR(edev, "Failed to configure VLAN %d\n",
+                              vlan->vid);
+                       kfree(vlan);
+                       goto out;
+               }
+               vlan->configured = true;
+
+               /* vlan0 filter isn't consuming out of our quota */
+               if (vlan->vid != 0)
+                       edev->configured_vlans++;
+       } else {
+               /* Out of quota; Activate accept-any-VLAN mode */
+               if (!edev->non_configured_vlans) {
+                       rc = qede_config_accept_any_vlan(edev, true);
+                       if (rc) {
+                               kfree(vlan);
+                               goto out;
+                       }
+               }
+
+               edev->non_configured_vlans++;
+       }
+
+       list_add(&vlan->list, &edev->vlan_list);
+
+out:
+       __qede_unlock(edev);
+       return rc;
+}
+
+static void qede_del_vlan_from_list(struct qede_dev *edev,
+                                   struct qede_vlan *vlan)
+{
+       /* vlan0 filter isn't consuming out of our quota */
+       if (vlan->vid != 0) {
+               if (vlan->configured)
+                       edev->configured_vlans--;
+               else
+                       edev->non_configured_vlans--;
+       }
+
+       list_del(&vlan->list);
+       kfree(vlan);
+}
+
+int qede_configure_vlan_filters(struct qede_dev *edev)
+{
+       int rc = 0, real_rc = 0, accept_any_vlan = 0;
+       struct qed_dev_eth_info *dev_info;
+       struct qede_vlan *vlan = NULL;
+
+       if (list_empty(&edev->vlan_list))
+               return 0;
+
+       dev_info = &edev->dev_info;
+
+       /* Configure non-configured vlans */
+       list_for_each_entry(vlan, &edev->vlan_list, list) {
+               if (vlan->configured)
+                       continue;
+
+               /* We have used all our credits, now enable accept_any_vlan */
+               if ((vlan->vid != 0) &&
+                   (edev->configured_vlans == dev_info->num_vlan_filters)) {
+                       accept_any_vlan = 1;
+                       continue;
+               }
+
+               DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
+
+               rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
+                                           vlan->vid);
+               if (rc) {
+                       DP_ERR(edev, "Failed to configure VLAN %u\n",
+                              vlan->vid);
+                       real_rc = rc;
+                       continue;
+               }
+
+               vlan->configured = true;
+               /* vlan0 filter doesn't consume our VLAN filter's quota */
+               if (vlan->vid != 0) {
+                       edev->non_configured_vlans--;
+                       edev->configured_vlans++;
+               }
+       }
+
+       /* enable accept_any_vlan mode if we have more VLANs than credits,
+        * or remove accept_any_vlan mode if we've actually removed
+        * a non-configured vlan, and all remaining vlans are truly configured.
+        */
+
+       if (accept_any_vlan)
+               rc = qede_config_accept_any_vlan(edev, true);
+       else if (!edev->non_configured_vlans)
+               rc = qede_config_accept_any_vlan(edev, false);
+
+       if (rc && !real_rc)
+               real_rc = rc;
+
+       return real_rc;
+}
+
+int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qede_vlan *vlan = NULL;
+       int rc = 0;
+
+       DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
+
+       /* Find whether entry exists */
+       __qede_lock(edev);
+       list_for_each_entry(vlan, &edev->vlan_list, list)
+               if (vlan->vid == vid)
+                       break;
+
+       if (!vlan || (vlan->vid != vid)) {
+               DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+                          "Vlan isn't configured\n");
+               goto out;
+       }
+
+       if (edev->state != QEDE_STATE_OPEN) {
+               /* As interface is already down, we don't have a VPORT
+                * instance to remove vlan filter. So just update vlan list
+                */
+               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+                          "Interface is down, removing VLAN from list only\n");
+               qede_del_vlan_from_list(edev, vlan);
+               goto out;
+       }
+
+       /* Remove vlan */
+       if (vlan->configured) {
+               rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
+                                           vid);
+               if (rc) {
+                       DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
+                       goto out;
+               }
+       }
+
+       qede_del_vlan_from_list(edev, vlan);
+
+       /* We have removed a VLAN - try to see if we can
+        * configure non-configured VLAN from the list.
+        */
+       rc = qede_configure_vlan_filters(edev);
+
+out:
+       __qede_unlock(edev);
+       return rc;
+}
+
+void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
+{
+       struct qede_vlan *vlan = NULL;
+
+       if (list_empty(&edev->vlan_list))
+               return;
+
+       list_for_each_entry(vlan, &edev->vlan_list, list) {
+               if (!vlan->configured)
+                       continue;
+
+               vlan->configured = false;
+
+               /* vlan0 filter isn't consuming out of our quota */
+               if (vlan->vid != 0) {
+                       edev->non_configured_vlans++;
+                       edev->configured_vlans--;
+               }
+
+               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+                          "marked vlan %d as non-configured\n", vlan->vid);
+       }
+
+       edev->accept_any_vlan = false;
+}
+
+static void qede_set_features_reload(struct qede_dev *edev,
+                                    struct qede_reload_args *args)
+{
+       edev->ndev->features = args->u.features;
+}
+
+int qede_set_features(struct net_device *dev, netdev_features_t features)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       netdev_features_t changes = features ^ dev->features;
+       bool need_reload = false;
+
+       /* No action needed if hardware GRO is disabled during driver load */
+       if (changes & NETIF_F_GRO) {
+               if (dev->features & NETIF_F_GRO)
+                       need_reload = !edev->gro_disable;
+               else
+                       need_reload = edev->gro_disable;
+       }
+
+       if (need_reload) {
+               struct qede_reload_args args;
+
+               args.u.features = features;
+               args.func = &qede_set_features_reload;
+
+               /* Make sure that we definitely need to reload.
+                * In case of an eBPF attached program, there will be no FW
+                * aggregations, so no need to actually reload.
+                */
+               __qede_lock(edev);
+               if (edev->xdp_prog)
+                       args.func(edev, &args);
+               else
+                       qede_reload(edev, &args, true);
+               __qede_unlock(edev);
+
+               return 1;
+       }
+
+       return 0;
+}
+
+void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u16 t_port = ntohs(ti->port);
+
+       switch (ti->type) {
+       case UDP_TUNNEL_TYPE_VXLAN:
+               if (edev->vxlan_dst_port)
+                       return;
+
+               edev->vxlan_dst_port = t_port;
+
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
+                          t_port);
+
+               set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+               break;
+       case UDP_TUNNEL_TYPE_GENEVE:
+               if (edev->geneve_dst_port)
+                       return;
+
+               edev->geneve_dst_port = t_port;
+
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
+                          t_port);
+               set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+               break;
+       default:
+               return;
+       }
+
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+
+void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       u16 t_port = ntohs(ti->port);
+
+       switch (ti->type) {
+       case UDP_TUNNEL_TYPE_VXLAN:
+               if (t_port != edev->vxlan_dst_port)
+                       return;
+
+               edev->vxlan_dst_port = 0;
+
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
+                          t_port);
+
+               set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+               break;
+       case UDP_TUNNEL_TYPE_GENEVE:
+               if (t_port != edev->geneve_dst_port)
+                       return;
+
+               edev->geneve_dst_port = 0;
+
+               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
+                          t_port);
+               set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+               break;
+       default:
+               return;
+       }
+
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_xdp_reload_func(struct qede_dev *edev,
+                                struct qede_reload_args *args)
+{
+       struct bpf_prog *old;
+
+       old = xchg(&edev->xdp_prog, args->u.new_prog);
+       if (old)
+               bpf_prog_put(old);
+}
+
+static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
+{
+       struct qede_reload_args args;
+
+       if (prog && prog->xdp_adjust_head) {
+               DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n");
+               return -EOPNOTSUPP;
+       }
+
+       /* If we're called, there was already a bpf reference increment */
+       args.func = &qede_xdp_reload_func;
+       args.u.new_prog = prog;
+       qede_reload(edev, &args, false);
+
+       return 0;
+}
+
+int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+
+       switch (xdp->command) {
+       case XDP_SETUP_PROG:
+               return qede_xdp_set(edev, xdp->prog);
+       case XDP_QUERY_PROG:
+               xdp->prog_attached = !!edev->xdp_prog;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int qede_set_mcast_rx_mac(struct qede_dev *edev,
+                                enum qed_filter_xcast_params_type opcode,
+                                unsigned char *mac, int num_macs)
+{
+       struct qed_filter_params filter_cmd;
+       int i;
+
+       memset(&filter_cmd, 0, sizeof(filter_cmd));
+       filter_cmd.type = QED_FILTER_TYPE_MCAST;
+       filter_cmd.filter.mcast.type = opcode;
+       filter_cmd.filter.mcast.num = num_macs;
+
+       for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
+               ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
+
+       return edev->ops->filter_config(edev->cdev, &filter_cmd);
+}
+
+int qede_set_mac_addr(struct net_device *ndev, void *p)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       struct sockaddr *addr = p;
+       int rc;
+
+       ASSERT_RTNL(); /* @@@TBD To be removed */
+
+       DP_INFO(edev, "Set_mac_addr called\n");
+
+       if (!is_valid_ether_addr(addr->sa_data)) {
+               DP_NOTICE(edev, "The MAC address is not valid\n");
+               return -EFAULT;
+       }
+
+       if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
+               DP_NOTICE(edev, "qed prevents setting MAC\n");
+               return -EINVAL;
+       }
+
+       ether_addr_copy(ndev->dev_addr, addr->sa_data);
+
+       if (!netif_running(ndev))  {
+               DP_NOTICE(edev, "The device is currently down\n");
+               return 0;
+       }
+
+       /* Remove the previous primary mac */
+       rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+                                  edev->primary_mac);
+       if (rc)
+               return rc;
+
+       edev->ops->common->update_mac(edev->cdev, addr->sa_data);
+
+       /* Add MAC filter according to the new unicast HW MAC address */
+       ether_addr_copy(edev->primary_mac, ndev->dev_addr);
+       return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+                                     edev->primary_mac);
+}
+
+static int
+qede_configure_mcast_filtering(struct net_device *ndev,
+                              enum qed_filter_rx_mode_type *accept_flags)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       unsigned char *mc_macs, *temp;
+       struct netdev_hw_addr *ha;
+       int rc = 0, mc_count;
+       size_t size;
+
+       size = 64 * ETH_ALEN;
+
+       mc_macs = kzalloc(size, GFP_KERNEL);
+       if (!mc_macs) {
+               DP_NOTICE(edev,
+                         "Failed to allocate memory for multicast MACs\n");
+               rc = -ENOMEM;
+               goto exit;
+       }
+
+       temp = mc_macs;
+
+       /* Remove all previously configured MAC filters */
+       rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+                                  mc_macs, 1);
+       if (rc)
+               goto exit;
+
+       netif_addr_lock_bh(ndev);
+
+       mc_count = netdev_mc_count(ndev);
+       if (mc_count < 64) {
+               netdev_for_each_mc_addr(ha, ndev) {
+                       ether_addr_copy(temp, ha->addr);
+                       temp += ETH_ALEN;
+               }
+       }
+
+       netif_addr_unlock_bh(ndev);
+
+       /* Check for all multicast @@@TBD resource allocation */
+       if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
+               if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
+                       *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+       } else {
+               /* Add all multicast MAC filters */
+               rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+                                          mc_macs, mc_count);
+       }
+
+exit:
+       kfree(mc_macs);
+       return rc;
+}
+
+void qede_set_rx_mode(struct net_device *ndev)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+
+       set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
+       schedule_delayed_work(&edev->sp_task, 0);
+}
+
+/* Must be called with qede_lock held */
+void qede_config_rx_mode(struct net_device *ndev)
+{
+       enum qed_filter_rx_mode_type accept_flags;
+       struct qede_dev *edev = netdev_priv(ndev);
+       struct qed_filter_params rx_mode;
+       unsigned char *uc_macs, *temp;
+       struct netdev_hw_addr *ha;
+       int rc, uc_count;
+       size_t size;
+
+       netif_addr_lock_bh(ndev);
+
+       uc_count = netdev_uc_count(ndev);
+       size = uc_count * ETH_ALEN;
+
+       uc_macs = kzalloc(size, GFP_ATOMIC);
+       if (!uc_macs) {
+               DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
+               netif_addr_unlock_bh(ndev);
+               return;
+       }
+
+       temp = uc_macs;
+       netdev_for_each_uc_addr(ha, ndev) {
+               ether_addr_copy(temp, ha->addr);
+               temp += ETH_ALEN;
+       }
+
+       netif_addr_unlock_bh(ndev);
+
+       /* Configure the struct for the Rx mode */
+       memset(&rx_mode, 0, sizeof(struct qed_filter_params));
+       rx_mode.type = QED_FILTER_TYPE_RX_MODE;
+
+       /* Remove all previous unicast secondary macs and multicast macs
+        * (configrue / leave the primary mac)
+        */
+       rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
+                                  edev->primary_mac);
+       if (rc)
+               goto out;
+
+       /* Check for promiscuous */
+       if (ndev->flags & IFF_PROMISC)
+               accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+       else
+               accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
+
+       /* Configure all filters regardless, in case promisc is rejected */
+       if (uc_count < edev->dev_info.num_mac_filters) {
+               int i;
+
+               temp = uc_macs;
+               for (i = 0; i < uc_count; i++) {
+                       rc = qede_set_ucast_rx_mac(edev,
+                                                  QED_FILTER_XCAST_TYPE_ADD,
+                                                  temp);
+                       if (rc)
+                               goto out;
+
+                       temp += ETH_ALEN;
+               }
+       } else {
+               accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+       }
+
+       rc = qede_configure_mcast_filtering(ndev, &accept_flags);
+       if (rc)
+               goto out;
+
+       /* take care of VLAN mode */
+       if (ndev->flags & IFF_PROMISC) {
+               qede_config_accept_any_vlan(edev, true);
+       } else if (!edev->non_configured_vlans) {
+               /* It's possible that accept_any_vlan mode is set due to a
+                * previous setting of IFF_PROMISC. If vlan credits are
+                * sufficient, disable accept_any_vlan.
+                */
+               qede_config_accept_any_vlan(edev, false);
+       }
+
+       rx_mode.filter.accept_flags = accept_flags;
+       edev->ops->filter_config(edev->cdev, &rx_mode);
+out:
+       kfree(uc_macs);
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
new file mode 100644 (file)
index 0000000..1e65038
--- /dev/null
@@ -0,0 +1,1700 @@
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bpf_trace.h>
+#include <net/udp_tunnel.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/ip6_checksum.h>
+#include "qede_ptp.h"
+
+#include <linux/qed/qed_if.h>
+#include "qede.h"
+/*********************************
+ * Content also used by slowpath *
+ *********************************/
+
+int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
+{
+       struct sw_rx_data *sw_rx_data;
+       struct eth_rx_bd *rx_bd;
+       dma_addr_t mapping;
+       struct page *data;
+
+       /* In case lazy-allocation is allowed, postpone allocation until the
+        * end of the NAPI run. We'd still need to make sure the Rx ring has
+        * sufficient buffers to guarantee an additional Rx interrupt.
+        */
+       if (allow_lazy && likely(rxq->filled_buffers > 12)) {
+               rxq->filled_buffers--;
+               return 0;
+       }
+
+       data = alloc_pages(GFP_ATOMIC, 0);
+       if (unlikely(!data))
+               return -ENOMEM;
+
+       /* Map the entire page as it would be used
+        * for multiple RX buffer segment size mapping.
+        */
+       mapping = dma_map_page(rxq->dev, data, 0,
+                              PAGE_SIZE, rxq->data_direction);
+       if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
+               __free_page(data);
+               return -ENOMEM;
+       }
+
+       sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+       sw_rx_data->page_offset = 0;
+       sw_rx_data->data = data;
+       sw_rx_data->mapping = mapping;
+
+       /* Advance PROD and get BD pointer */
+       rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
+       WARN_ON(!rx_bd);
+       rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+       rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+
+       rxq->sw_rx_prod++;
+       rxq->filled_buffers++;
+
+       return 0;
+}
+
+/* Unmap the data and free skb */
+int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
+{
+       u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
+       struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
+       struct eth_tx_1st_bd *first_bd;
+       struct eth_tx_bd *tx_data_bd;
+       int bds_consumed = 0;
+       int nbds;
+       bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
+       int i, split_bd_len = 0;
+
+       if (unlikely(!skb)) {
+               DP_ERR(edev,
+                      "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
+                      idx, txq->sw_tx_cons, txq->sw_tx_prod);
+               return -1;
+       }
+
+       *len = skb->len;
+
+       first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+       bds_consumed++;
+
+       nbds = first_bd->data.nbds;
+
+       if (data_split) {
+               struct eth_tx_bd *split = (struct eth_tx_bd *)
+                       qed_chain_consume(&txq->tx_pbl);
+               split_bd_len = BD_UNMAP_LEN(split);
+               bds_consumed++;
+       }
+       dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+                        BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+       /* Unmap the data of the skb frags */
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
+               tx_data_bd = (struct eth_tx_bd *)
+                       qed_chain_consume(&txq->tx_pbl);
+               dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+                              BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+       }
+
+       while (bds_consumed++ < nbds)
+               qed_chain_consume(&txq->tx_pbl);
+
+       /* Free skb */
+       dev_kfree_skb_any(skb);
+       txq->sw_tx_ring.skbs[idx].skb = NULL;
+       txq->sw_tx_ring.skbs[idx].flags = 0;
+
+       return 0;
+}
+
+/* Unmap the data and free skb when mapping failed during start_xmit */
+static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
+                                   struct eth_tx_1st_bd *first_bd,
+                                   int nbd, bool data_split)
+{
+       u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+       struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
+       struct eth_tx_bd *tx_data_bd;
+       int i, split_bd_len = 0;
+
+       /* Return prod to its position before this skb was handled */
+       qed_chain_set_prod(&txq->tx_pbl,
+                          le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
+
+       first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+       if (data_split) {
+               struct eth_tx_bd *split = (struct eth_tx_bd *)
+                                         qed_chain_produce(&txq->tx_pbl);
+               split_bd_len = BD_UNMAP_LEN(split);
+               nbd--;
+       }
+
+       dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
+                        BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+       /* Unmap the data of the skb frags */
+       for (i = 0; i < nbd; i++) {
+               tx_data_bd = (struct eth_tx_bd *)
+                       qed_chain_produce(&txq->tx_pbl);
+               if (tx_data_bd->nbytes)
+                       dma_unmap_page(txq->dev,
+                                      BD_UNMAP_ADDR(tx_data_bd),
+                                      BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+       }
+
+       /* Return again prod to its position before this skb was handled */
+       qed_chain_set_prod(&txq->tx_pbl,
+                          le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
+
+       /* Free skb */
+       dev_kfree_skb_any(skb);
+       txq->sw_tx_ring.skbs[idx].skb = NULL;
+       txq->sw_tx_ring.skbs[idx].flags = 0;
+}
+
+static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
+{
+       u32 rc = XMIT_L4_CSUM;
+       __be16 l3_proto;
+
+       if (skb->ip_summed != CHECKSUM_PARTIAL)
+               return XMIT_PLAIN;
+
+       l3_proto = vlan_get_protocol(skb);
+       if (l3_proto == htons(ETH_P_IPV6) &&
+           (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+               *ipv6_ext = 1;
+
+       if (skb->encapsulation) {
+               rc |= XMIT_ENC;
+               if (skb_is_gso(skb)) {
+                       unsigned short gso_type = skb_shinfo(skb)->gso_type;
+
+                       if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
+                           (gso_type & SKB_GSO_GRE_CSUM))
+                               rc |= XMIT_ENC_GSO_L4_CSUM;
+
+                       rc |= XMIT_LSO;
+                       return rc;
+               }
+       }
+
+       if (skb_is_gso(skb))
+               rc |= XMIT_LSO;
+
+       return rc;
+}
+
+static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
+                                        struct eth_tx_2nd_bd *second_bd,
+                                        struct eth_tx_3rd_bd *third_bd)
+{
+       u8 l4_proto;
+       u16 bd2_bits1 = 0, bd2_bits2 = 0;
+
+       bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
+
+       bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+                    ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
+                   << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
+
+       bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+                     ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
+
+       if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
+               l4_proto = ipv6_hdr(skb)->nexthdr;
+       else
+               l4_proto = ip_hdr(skb)->protocol;
+
+       if (l4_proto == IPPROTO_UDP)
+               bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+
+       if (third_bd)
+               third_bd->data.bitfields |=
+                       cpu_to_le16(((tcp_hdrlen(skb) / 4) &
+                               ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
+                               ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
+
+       second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
+       second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
+}
+
+static int map_frag_to_bd(struct qede_tx_queue *txq,
+                         skb_frag_t *frag, struct eth_tx_bd *bd)
+{
+       dma_addr_t mapping;
+
+       /* Map skb non-linear frag data for DMA */
+       mapping = skb_frag_dma_map(txq->dev, frag, 0,
+                                  skb_frag_size(frag), DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(txq->dev, mapping)))
+               return -ENOMEM;
+
+       /* Setup the data pointer of the frag data */
+       BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
+
+       return 0;
+}
+
+static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
+{
+       if (is_encap_pkt)
+               return (skb_inner_transport_header(skb) +
+                       inner_tcp_hdrlen(skb) - skb->data);
+       else
+               return (skb_transport_header(skb) +
+                       tcp_hdrlen(skb) - skb->data);
+}
+
+/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
+#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
+static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
+{
+       int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
+
+       if (xmit_type & XMIT_LSO) {
+               int hlen;
+
+               hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
+
+               /* linear payload would require its own BD */
+               if (skb_headlen(skb) > hlen)
+                       allowed_frags--;
+       }
+
+       return (skb_shinfo(skb)->nr_frags > allowed_frags);
+}
+#endif
+
+static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
+{
+       /* wmb makes sure that the BDs data is updated before updating the
+        * producer, otherwise FW may read old data from the BDs.
+        */
+       wmb();
+       barrier();
+       writel(txq->tx_db.raw, txq->doorbell_addr);
+
+       /* mmiowb is needed to synchronize doorbell writes from more than one
+        * processor. It guarantees that the write arrives to the device before
+        * the queue lock is released and another start_xmit is called (possibly
+        * on another CPU). Without this barrier, the next doorbell can bypass
+        * this doorbell. This is applicable to IA64/Altix systems.
+        */
+       mmiowb();
+}
+
+static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
+                        struct sw_rx_data *metadata, u16 padding, u16 length)
+{
+       struct qede_tx_queue *txq = fp->xdp_tx;
+       u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+       struct eth_tx_1st_bd *first_bd;
+
+       if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
+               txq->stopped_cnt++;
+               return -ENOMEM;
+       }
+
+       first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+       memset(first_bd, 0, sizeof(*first_bd));
+       first_bd->data.bd_flags.bitfields =
+           BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
+       first_bd->data.bitfields |=
+           (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+           ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+       first_bd->data.nbds = 1;
+
+       /* We can safely ignore the offset, as it's 0 for XDP */
+       BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
+
+       /* Synchronize the buffer back to device, as program [probably]
+        * has changed it.
+        */
+       dma_sync_single_for_device(&edev->pdev->dev,
+                                  metadata->mapping + padding,
+                                  length, PCI_DMA_TODEVICE);
+
+       txq->sw_tx_ring.pages[idx] = metadata->data;
+       txq->sw_tx_prod++;
+
+       /* Mark the fastpath for future XDP doorbell */
+       fp->xdp_xmit = 1;
+
+       return 0;
+}
+
+int qede_txq_has_work(struct qede_tx_queue *txq)
+{
+       u16 hw_bd_cons;
+
+       /* Tell compiler that consumer and producer can change */
+       barrier();
+       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+       if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
+               return 0;
+
+       return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
+}
+
+static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+       struct eth_tx_1st_bd *bd;
+       u16 hw_bd_cons;
+
+       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+       barrier();
+
+       while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+               bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+               dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd),
+                                PAGE_SIZE, DMA_BIDIRECTIONAL);
+               __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons &
+                                                 NUM_TX_BDS_MAX]);
+
+               txq->sw_tx_cons++;
+               txq->xmit_pkts++;
+       }
+}
+
+static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+       struct netdev_queue *netdev_txq;
+       u16 hw_bd_cons;
+       unsigned int pkts_compl = 0, bytes_compl = 0;
+       int rc;
+
+       netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+
+       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+       barrier();
+
+       while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+               int len = 0;
+
+               rc = qede_free_tx_pkt(edev, txq, &len);
+               if (rc) {
+                       DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
+                                 hw_bd_cons,
+                                 qed_chain_get_cons_idx(&txq->tx_pbl));
+                       break;
+               }
+
+               bytes_compl += len;
+               pkts_compl++;
+               txq->sw_tx_cons++;
+               txq->xmit_pkts++;
+       }
+
+       netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
+
+       /* Need to make the tx_bd_cons update visible to start_xmit()
+        * before checking for netif_tx_queue_stopped().  Without the
+        * memory barrier, there is a small possibility that
+        * start_xmit() will miss it and cause the queue to be stopped
+        * forever.
+        * On the other hand we need an rmb() here to ensure the proper
+        * ordering of bit testing in the following
+        * netif_tx_queue_stopped(txq) call.
+        */
+       smp_mb();
+
+       if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
+               /* Taking tx_lock is needed to prevent reenabling the queue
+                * while it's empty. This could have happen if rx_action() gets
+                * suspended in qede_tx_int() after the condition before
+                * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
+                *
+                * stops the queue->sees fresh tx_bd_cons->releases the queue->
+                * sends some packets consuming the whole queue again->
+                * stops the queue
+                */
+
+               __netif_tx_lock(netdev_txq, smp_processor_id());
+
+               if ((netif_tx_queue_stopped(netdev_txq)) &&
+                   (edev->state == QEDE_STATE_OPEN) &&
+                   (qed_chain_get_elem_left(&txq->tx_pbl)
+                     >= (MAX_SKB_FRAGS + 1))) {
+                       netif_tx_wake_queue(netdev_txq);
+                       DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
+                                  "Wake queue was called\n");
+               }
+
+               __netif_tx_unlock(netdev_txq);
+       }
+
+       return 0;
+}
+
+bool qede_has_rx_work(struct qede_rx_queue *rxq)
+{
+       u16 hw_comp_cons, sw_comp_cons;
+
+       /* Tell compiler that status block fields can change */
+       barrier();
+
+       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+       return hw_comp_cons != sw_comp_cons;
+}
+
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+       qed_chain_consume(&rxq->rx_bd_ring);
+       rxq->sw_rx_cons++;
+}
+
+/* This function reuses the buffer(from an offset) from
+ * consumer index to producer index in the bd ring
+ */
+static inline void qede_reuse_page(struct qede_rx_queue *rxq,
+                                  struct sw_rx_data *curr_cons)
+{
+       struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+       struct sw_rx_data *curr_prod;
+       dma_addr_t new_mapping;
+
+       curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+       *curr_prod = *curr_cons;
+
+       new_mapping = curr_prod->mapping + curr_prod->page_offset;
+
+       rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
+       rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
+
+       rxq->sw_rx_prod++;
+       curr_cons->data = NULL;
+}
+
+/* In case of allocation failures reuse buffers
+ * from consumer index to produce buffers for firmware
+ */
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
+{
+       struct sw_rx_data *curr_cons;
+
+       for (; count > 0; count--) {
+               curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+               qede_reuse_page(rxq, curr_cons);
+               qede_rx_bd_ring_consume(rxq);
+       }
+}
+
+static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
+                                        struct sw_rx_data *curr_cons)
+{
+       /* Move to the next segment in the page */
+       curr_cons->page_offset += rxq->rx_buf_seg_size;
+
+       if (curr_cons->page_offset == PAGE_SIZE) {
+               if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
+                       /* Since we failed to allocate new buffer
+                        * current buffer can be used again.
+                        */
+                       curr_cons->page_offset -= rxq->rx_buf_seg_size;
+
+                       return -ENOMEM;
+               }
+
+               dma_unmap_page(rxq->dev, curr_cons->mapping,
+                              PAGE_SIZE, rxq->data_direction);
+       } else {
+               /* Increment refcount of the page as we don't want
+                * network stack to take the ownership of the page
+                * which can be recycled multiple times by the driver.
+                */
+               page_ref_inc(curr_cons->data);
+               qede_reuse_page(rxq, curr_cons);
+       }
+
+       return 0;
+}
+
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
+{
+       u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
+       u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
+       struct eth_rx_prod_data rx_prods = {0};
+
+       /* Update producers */
+       rx_prods.bd_prod = cpu_to_le16(bd_prod);
+       rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
+
+       /* Make sure that the BD and SGE data is updated before updating the
+        * producers since FW might read the BD/SGE right after the producer
+        * is updated.
+        */
+       wmb();
+
+       internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+                       (u32 *)&rx_prods);
+
+       /* mmiowb is needed to synchronize doorbell writes from more than one
+        * processor. It guarantees that the write arrives to the device before
+        * the napi lock is released and another qede_poll is called (possibly
+        * on another CPU). Without this barrier, the next doorbell can bypass
+        * this doorbell. This is applicable to IA64/Altix systems.
+        */
+       mmiowb();
+}
+
+static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
+{
+       enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
+       enum rss_hash_type htype;
+       u32 hash = 0;
+
+       htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+       if (htype) {
+               hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
+                            (htype == RSS_HASH_TYPE_IPV6)) ?
+                           PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
+               hash = le32_to_cpu(rss_hash);
+       }
+       skb_set_hash(skb, hash, hash_type);
+}
+
+static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
+{
+       skb_checksum_none_assert(skb);
+
+       if (csum_flag & QEDE_CSUM_UNNECESSARY)
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) {
+               skb->csum_level = 1;
+               skb->encapsulation = 1;
+       }
+}
+
+static inline void qede_skb_receive(struct qede_dev *edev,
+                                   struct qede_fastpath *fp,
+                                   struct qede_rx_queue *rxq,
+                                   struct sk_buff *skb, u16 vlan_tag)
+{
+       if (vlan_tag)
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+       napi_gro_receive(&fp->napi, skb);
+       rxq->rcv_pkts++;
+}
+
+static void qede_set_gro_params(struct qede_dev *edev,
+                               struct sk_buff *skb,
+                               struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+       u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
+
+       if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
+           PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
+               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+       else
+               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+       skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
+                                   cqe->header_len;
+}
+
+static int qede_fill_frag_skb(struct qede_dev *edev,
+                             struct qede_rx_queue *rxq,
+                             u8 tpa_agg_index, u16 len_on_bd)
+{
+       struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
+                                                        NUM_RX_BDS_MAX];
+       struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
+       struct sk_buff *skb = tpa_info->skb;
+
+       if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
+               goto out;
+
+       /* Add one frag and update the appropriate fields in the skb */
+       skb_fill_page_desc(skb, tpa_info->frag_id++,
+                          current_bd->data, current_bd->page_offset,
+                          len_on_bd);
+
+       if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
+               /* Incr page ref count to reuse on allocation failure
+                * so that it doesn't get freed while freeing SKB.
+                */
+               page_ref_inc(current_bd->data);
+               goto out;
+       }
+
+       qed_chain_consume(&rxq->rx_bd_ring);
+       rxq->sw_rx_cons++;
+
+       skb->data_len += len_on_bd;
+       skb->truesize += rxq->rx_buf_seg_size;
+       skb->len += len_on_bd;
+
+       return 0;
+
+out:
+       tpa_info->state = QEDE_AGG_STATE_ERROR;
+       qede_recycle_rx_bd_ring(rxq, 1);
+
+       return -ENOMEM;
+}
+
+static bool qede_tunn_exist(u16 flag)
+{
+       return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+                         PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
+}
+
+static u8 qede_check_tunn_csum(u16 flag)
+{
+       u16 csum_flag = 0;
+       u8 tcsum = 0;
+
+       if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+                   PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
+               csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+                            PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+       if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+                   PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+               csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+                            PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+               tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+       }
+
+       csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+                    PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+                    PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+                    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+       if (csum_flag & flag)
+               return QEDE_CSUM_ERROR;
+
+       return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+
+static void qede_tpa_start(struct qede_dev *edev,
+                          struct qede_rx_queue *rxq,
+                          struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+       struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+       struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
+       struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+       struct sw_rx_data *replace_buf = &tpa_info->buffer;
+       dma_addr_t mapping = tpa_info->buffer_mapping;
+       struct sw_rx_data *sw_rx_data_cons;
+       struct sw_rx_data *sw_rx_data_prod;
+
+       sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+       sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+
+       /* Use pre-allocated replacement buffer - we can't release the agg.
+        * start until its over and we don't want to risk allocation failing
+        * here, so re-allocate when aggregation will be over.
+        */
+       sw_rx_data_prod->mapping = replace_buf->mapping;
+
+       sw_rx_data_prod->data = replace_buf->data;
+       rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+       rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
+       sw_rx_data_prod->page_offset = replace_buf->page_offset;
+
+       rxq->sw_rx_prod++;
+
+       /* move partial skb from cons to pool (don't unmap yet)
+        * save mapping, incase we drop the packet later on.
+        */
+       tpa_info->buffer = *sw_rx_data_cons;
+       mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
+                          le32_to_cpu(rx_bd_cons->addr.lo));
+
+       tpa_info->buffer_mapping = mapping;
+       rxq->sw_rx_cons++;
+
+       /* set tpa state to start only if we are able to allocate skb
+        * for this aggregation, otherwise mark as error and aggregation will
+        * be dropped
+        */
+       tpa_info->skb = netdev_alloc_skb(edev->ndev,
+                                        le16_to_cpu(cqe->len_on_first_bd));
+       if (unlikely(!tpa_info->skb)) {
+               DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
+               tpa_info->state = QEDE_AGG_STATE_ERROR;
+               goto cons_buf;
+       }
+
+       /* Start filling in the aggregation info */
+       skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
+       tpa_info->frag_id = 0;
+       tpa_info->state = QEDE_AGG_STATE_START;
+
+       /* Store some information from first CQE */
+       tpa_info->start_cqe_placement_offset = cqe->placement_offset;
+       tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
+       if ((le16_to_cpu(cqe->pars_flags.flags) >>
+            PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
+           PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
+               tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+       else
+               tpa_info->vlan_tag = 0;
+
+       qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
+
+       /* This is needed in order to enable forwarding support */
+       qede_set_gro_params(edev, tpa_info->skb, cqe);
+
+cons_buf: /* We still need to handle bd_len_list to consume buffers */
+       if (likely(cqe->ext_bd_len_list[0]))
+               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+                                  le16_to_cpu(cqe->ext_bd_len_list[0]));
+
+       if (unlikely(cqe->ext_bd_len_list[1])) {
+               DP_ERR(edev,
+                      "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
+               tpa_info->state = QEDE_AGG_STATE_ERROR;
+       }
+}
+
+#ifdef CONFIG_INET
+static void qede_gro_ip_csum(struct sk_buff *skb)
+{
+       const struct iphdr *iph = ip_hdr(skb);
+       struct tcphdr *th;
+
+       skb_set_transport_header(skb, sizeof(struct iphdr));
+       th = tcp_hdr(skb);
+
+       th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+                                 iph->saddr, iph->daddr, 0);
+
+       tcp_gro_complete(skb);
+}
+
+static void qede_gro_ipv6_csum(struct sk_buff *skb)
+{
+       struct ipv6hdr *iph = ipv6_hdr(skb);
+       struct tcphdr *th;
+
+       skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+       th = tcp_hdr(skb);
+
+       th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+                                 &iph->saddr, &iph->daddr, 0);
+       tcp_gro_complete(skb);
+}
+#endif
+
+static void qede_gro_receive(struct qede_dev *edev,
+                            struct qede_fastpath *fp,
+                            struct sk_buff *skb,
+                            u16 vlan_tag)
+{
+       /* FW can send a single MTU sized packet from gro flow
+        * due to aggregation timeout/last segment etc. which
+        * is not expected to be a gro packet. If a skb has zero
+        * frags then simply push it in the stack as non gso skb.
+        */
+       if (unlikely(!skb->data_len)) {
+               skb_shinfo(skb)->gso_type = 0;
+               skb_shinfo(skb)->gso_size = 0;
+               goto send_skb;
+       }
+
+#ifdef CONFIG_INET
+       if (skb_shinfo(skb)->gso_size) {
+               skb_reset_network_header(skb);
+
+               switch (skb->protocol) {
+               case htons(ETH_P_IP):
+                       qede_gro_ip_csum(skb);
+                       break;
+               case htons(ETH_P_IPV6):
+                       qede_gro_ipv6_csum(skb);
+                       break;
+               default:
+                       DP_ERR(edev,
+                              "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+                              ntohs(skb->protocol));
+               }
+       }
+#endif
+
+send_skb:
+       skb_record_rx_queue(skb, fp->rxq->rxq_id);
+       qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
+}
+
+static inline void qede_tpa_cont(struct qede_dev *edev,
+                                struct qede_rx_queue *rxq,
+                                struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+{
+       int i;
+
+       for (i = 0; cqe->len_list[i]; i++)
+               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+                                  le16_to_cpu(cqe->len_list[i]));
+
+       if (unlikely(i > 1))
+               DP_ERR(edev,
+                      "Strange - TPA cont with more than a single len_list entry\n");
+}
+
+static void qede_tpa_end(struct qede_dev *edev,
+                        struct qede_fastpath *fp,
+                        struct eth_fast_path_rx_tpa_end_cqe *cqe)
+{
+       struct qede_rx_queue *rxq = fp->rxq;
+       struct qede_agg_info *tpa_info;
+       struct sk_buff *skb;
+       int i;
+
+       tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+       skb = tpa_info->skb;
+
+       for (i = 0; cqe->len_list[i]; i++)
+               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+                                  le16_to_cpu(cqe->len_list[i]));
+       if (unlikely(i > 1))
+               DP_ERR(edev,
+                      "Strange - TPA emd with more than a single len_list entry\n");
+
+       if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
+               goto err;
+
+       /* Sanity */
+       if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
+               DP_ERR(edev,
+                      "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
+                      cqe->num_of_bds, tpa_info->frag_id);
+       if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
+               DP_ERR(edev,
+                      "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
+                      le16_to_cpu(cqe->total_packet_len), skb->len);
+
+       memcpy(skb->data,
+              page_address(tpa_info->buffer.data) +
+              tpa_info->start_cqe_placement_offset +
+              tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
+
+       /* Finalize the SKB */
+       skb->protocol = eth_type_trans(skb, edev->ndev);
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+        * to skb_shinfo(skb)->gso_segs
+        */
+       NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
+
+       qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
+
+       tpa_info->state = QEDE_AGG_STATE_NONE;
+
+       return;
+err:
+       tpa_info->state = QEDE_AGG_STATE_NONE;
+       dev_kfree_skb_any(tpa_info->skb);
+       tpa_info->skb = NULL;
+}
+
+static u8 qede_check_notunn_csum(u16 flag)
+{
+       u16 csum_flag = 0;
+       u8 csum = 0;
+
+       if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+                   PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+               csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+                            PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+               csum = QEDE_CSUM_UNNECESSARY;
+       }
+
+       csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+                    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+       if (csum_flag & flag)
+               return QEDE_CSUM_ERROR;
+
+       return csum;
+}
+
+static u8 qede_check_csum(u16 flag)
+{
+       if (!qede_tunn_exist(flag))
+               return qede_check_notunn_csum(flag);
+       else
+               return qede_check_tunn_csum(flag);
+}
+
+static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
+                                     u16 flag)
+{
+       u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
+
+       if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
+                            ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
+           (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+                    PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
+               return true;
+
+       return false;
+}
+
+/* Return true iff packet is to be passed to stack */
+static bool qede_rx_xdp(struct qede_dev *edev,
+                       struct qede_fastpath *fp,
+                       struct qede_rx_queue *rxq,
+                       struct bpf_prog *prog,
+                       struct sw_rx_data *bd,
+                       struct eth_fast_path_rx_reg_cqe *cqe)
+{
+       u16 len = le16_to_cpu(cqe->len_on_first_bd);
+       struct xdp_buff xdp;
+       enum xdp_action act;
+
+       xdp.data = page_address(bd->data) + cqe->placement_offset;
+       xdp.data_end = xdp.data + len;
+
+       /* Queues always have a full reset currently, so for the time
+        * being until there's atomic program replace just mark read
+        * side for map helpers.
+        */
+       rcu_read_lock();
+       act = bpf_prog_run_xdp(prog, &xdp);
+       rcu_read_unlock();
+
+       if (act == XDP_PASS)
+               return true;
+
+       /* Count number of packets not to be passed to stack */
+       rxq->xdp_no_pass++;
+
+       switch (act) {
+       case XDP_TX:
+               /* We need the replacement buffer before transmit. */
+               if (qede_alloc_rx_buffer(rxq, true)) {
+                       qede_recycle_rx_bd_ring(rxq, 1);
+                       trace_xdp_exception(edev->ndev, prog, act);
+                       return false;
+               }
+
+               /* Now if there's a transmission problem, we'd still have to
+                * throw current buffer, as replacement was already allocated.
+                */
+               if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) {
+                       dma_unmap_page(rxq->dev, bd->mapping,
+                                      PAGE_SIZE, DMA_BIDIRECTIONAL);
+                       __free_page(bd->data);
+                       trace_xdp_exception(edev->ndev, prog, act);
+               }
+
+               /* Regardless, we've consumed an Rx BD */
+               qede_rx_bd_ring_consume(rxq);
+               return false;
+
+       default:
+               bpf_warn_invalid_xdp_action(act);
+       case XDP_ABORTED:
+               trace_xdp_exception(edev->ndev, prog, act);
+       case XDP_DROP:
+               qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
+       }
+
+       return false;
+}
+
+static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
+                                           struct qede_rx_queue *rxq,
+                                           struct sw_rx_data *bd, u16 len,
+                                           u16 pad)
+{
+       unsigned int offset = bd->page_offset;
+       struct skb_frag_struct *frag;
+       struct page *page = bd->data;
+       unsigned int pull_len;
+       struct sk_buff *skb;
+       unsigned char *va;
+
+       /* Allocate a new SKB with a sufficient large header len */
+       skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
+       if (unlikely(!skb))
+               return NULL;
+
+       /* Copy data into SKB - if it's small, we can simply copy it and
+        * re-use the already allcoated & mapped memory.
+        */
+       if (len + pad <= edev->rx_copybreak) {
+               memcpy(skb_put(skb, len),
+                      page_address(page) + pad + offset, len);
+               qede_reuse_page(rxq, bd);
+               goto out;
+       }
+
+       frag = &skb_shinfo(skb)->frags[0];
+
+       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+                       page, pad + offset, len, rxq->rx_buf_seg_size);
+
+       va = skb_frag_address(frag);
+       pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
+
+       /* Align the pull_len to optimize memcpy */
+       memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
+
+       /* Correct the skb & frag sizes offset after the pull */
+       skb_frag_size_sub(frag, pull_len);
+       frag->page_offset += pull_len;
+       skb->data_len -= pull_len;
+       skb->tail += pull_len;
+
+       if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
+               /* Incr page ref count to reuse on allocation failure so
+                * that it doesn't get freed while freeing SKB [as its
+                * already mapped there].
+                */
+               page_ref_inc(page);
+               dev_kfree_skb_any(skb);
+               return NULL;
+       }
+
+out:
+       /* We've consumed the first BD and prepared an SKB */
+       qede_rx_bd_ring_consume(rxq);
+       return skb;
+}
+
+static int qede_rx_build_jumbo(struct qede_dev *edev,
+                              struct qede_rx_queue *rxq,
+                              struct sk_buff *skb,
+                              struct eth_fast_path_rx_reg_cqe *cqe,
+                              u16 first_bd_len)
+{
+       u16 pkt_len = le16_to_cpu(cqe->pkt_len);
+       struct sw_rx_data *bd;
+       u16 bd_cons_idx;
+       u8 num_frags;
+
+       pkt_len -= first_bd_len;
+
+       /* We've already used one BD for the SKB. Now take care of the rest */
+       for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
+               u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
+                   pkt_len;
+
+               if (unlikely(!cur_size)) {
+                       DP_ERR(edev,
+                              "Still got %d BDs for mapping jumbo, but length became 0\n",
+                              num_frags);
+                       goto out;
+               }
+
+               /* We need a replacement buffer for each BD */
+               if (unlikely(qede_alloc_rx_buffer(rxq, true)))
+                       goto out;
+
+               /* Now that we've allocated the replacement buffer,
+                * we can safely consume the next BD and map it to the SKB.
+                */
+               bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+               bd = &rxq->sw_rx_ring[bd_cons_idx];
+               qede_rx_bd_ring_consume(rxq);
+
+               dma_unmap_page(rxq->dev, bd->mapping,
+                              PAGE_SIZE, DMA_FROM_DEVICE);
+
+               skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
+                                  bd->data, 0, cur_size);
+
+               skb->truesize += PAGE_SIZE;
+               skb->data_len += cur_size;
+               skb->len += cur_size;
+               pkt_len -= cur_size;
+       }
+
+       if (unlikely(pkt_len))
+               DP_ERR(edev,
+                      "Mapped all BDs of jumbo, but still have %d bytes\n",
+                      pkt_len);
+
+out:
+       return num_frags;
+}
+
+static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
+                                  struct qede_fastpath *fp,
+                                  struct qede_rx_queue *rxq,
+                                  union eth_rx_cqe *cqe,
+                                  enum eth_rx_cqe_type type)
+{
+       switch (type) {
+       case ETH_RX_CQE_TYPE_TPA_START:
+               qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
+               return 0;
+       case ETH_RX_CQE_TYPE_TPA_CONT:
+               qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
+               return 0;
+       case ETH_RX_CQE_TYPE_TPA_END:
+               qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static int qede_rx_process_cqe(struct qede_dev *edev,
+                              struct qede_fastpath *fp,
+                              struct qede_rx_queue *rxq)
+{
+       struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
+       struct eth_fast_path_rx_reg_cqe *fp_cqe;
+       u16 len, pad, bd_cons_idx, parse_flag;
+       enum eth_rx_cqe_type cqe_type;
+       union eth_rx_cqe *cqe;
+       struct sw_rx_data *bd;
+       struct sk_buff *skb;
+       __le16 flags;
+       u8 csum_flag;
+
+       /* Get the CQE from the completion ring */
+       cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+       cqe_type = cqe->fast_path_regular.type;
+
+       /* Process an unlikely slowpath event */
+       if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+               struct eth_slow_path_rx_cqe *sp_cqe;
+
+               sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
+               edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
+               return 0;
+       }
+
+       /* Handle TPA cqes */
+       if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
+               return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
+
+       /* Get the data from the SW ring; Consume it only after it's evident
+        * we wouldn't recycle it.
+        */
+       bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+       bd = &rxq->sw_rx_ring[bd_cons_idx];
+
+       fp_cqe = &cqe->fast_path_regular;
+       len = le16_to_cpu(fp_cqe->len_on_first_bd);
+       pad = fp_cqe->placement_offset;
+
+       /* Run eBPF program if one is attached */
+       if (xdp_prog)
+               if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
+                       return 1;
+
+       /* If this is an error packet then drop it */
+       flags = cqe->fast_path_regular.pars_flags.flags;
+       parse_flag = le16_to_cpu(flags);
+
+       csum_flag = qede_check_csum(parse_flag);
+       if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+               if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) {
+                       rxq->rx_ip_frags++;
+               } else {
+                       DP_NOTICE(edev,
+                                 "CQE has error, flags = %x, dropping incoming packet\n",
+                                 parse_flag);
+                       rxq->rx_hw_errors++;
+                       qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
+                       return 0;
+               }
+       }
+
+       /* Basic validation passed; Need to prepare an SKB. This would also
+        * guarantee to finally consume the first BD upon success.
+        */
+       skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
+       if (!skb) {
+               rxq->rx_alloc_errors++;
+               qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
+               return 0;
+       }
+
+       /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
+        * by a single cqe.
+        */
+       if (fp_cqe->bd_num > 1) {
+               u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
+                                                        fp_cqe, len);
+
+               if (unlikely(unmapped_frags > 0)) {
+                       qede_recycle_rx_bd_ring(rxq, unmapped_frags);
+                       dev_kfree_skb_any(skb);
+                       return 0;
+               }
+       }
+
+       /* The SKB contains all the data. Now prepare meta-magic */
+       skb->protocol = eth_type_trans(skb, edev->ndev);
+       qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
+       qede_set_skb_csum(skb, csum_flag);
+       skb_record_rx_queue(skb, rxq->rxq_id);
+       qede_ptp_record_rx_ts(edev, cqe, skb);
+
+       /* SKB is prepared - pass it to stack */
+       qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
+
+       return 1;
+}
+
+static int qede_rx_int(struct qede_fastpath *fp, int budget)
+{
+       struct qede_rx_queue *rxq = fp->rxq;
+       struct qede_dev *edev = fp->edev;
+       u16 hw_comp_cons, sw_comp_cons;
+       int work_done = 0;
+
+       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+       /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+        * / BD in the while-loop before reading hw_comp_cons. If the CQE is
+        * read before it is written by FW, then FW writes CQE and SB, and then
+        * the CPU reads the hw_comp_cons, it will use an old CQE.
+        */
+       rmb();
+
+       /* Loop to complete all indicated BDs */
+       while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
+               qede_rx_process_cqe(edev, fp, rxq);
+               qed_chain_recycle_consumed(&rxq->rx_comp_ring);
+               sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+               work_done++;
+       }
+
+       /* Allocate replacement buffers */
+       while (rxq->num_rx_buffers - rxq->filled_buffers)
+               if (qede_alloc_rx_buffer(rxq, false))
+                       break;
+
+       /* Update producers */
+       qede_update_rx_prod(edev, rxq);
+
+       return work_done;
+}
+
+static bool qede_poll_is_more_work(struct qede_fastpath *fp)
+{
+       qed_sb_update_sb_idx(fp->sb_info);
+
+       /* *_has_*_work() reads the status block, thus we need to ensure that
+        * status block indices have been actually read (qed_sb_update_sb_idx)
+        * prior to this check (*_has_*_work) so that we won't write the
+        * "newer" value of the status block to HW (if there was a DMA right
+        * after qede_has_rx_work and if there is no rmb, the memory reading
+        * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
+        * In this case there will never be another interrupt until there is
+        * another update of the status block, while there is still unhandled
+        * work.
+        */
+       rmb();
+
+       if (likely(fp->type & QEDE_FASTPATH_RX))
+               if (qede_has_rx_work(fp->rxq))
+                       return true;
+
+       if (fp->type & QEDE_FASTPATH_XDP)
+               if (qede_txq_has_work(fp->xdp_tx))
+                       return true;
+
+       if (likely(fp->type & QEDE_FASTPATH_TX))
+               if (qede_txq_has_work(fp->txq))
+                       return true;
+
+       return false;
+}
+
+/*********************
+ * NDO & API related *
+ *********************/
+int qede_poll(struct napi_struct *napi, int budget)
+{
+       struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
+                                               napi);
+       struct qede_dev *edev = fp->edev;
+       int rx_work_done = 0;
+
+       if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
+               qede_tx_int(edev, fp->txq);
+
+       if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
+               qede_xdp_tx_int(edev, fp->xdp_tx);
+
+       rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
+                       qede_has_rx_work(fp->rxq)) ?
+                       qede_rx_int(fp, budget) : 0;
+       if (rx_work_done < budget) {
+               if (!qede_poll_is_more_work(fp)) {
+                       napi_complete_done(napi, rx_work_done);
+
+                       /* Update and reenable interrupts */
+                       qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
+               } else {
+                       rx_work_done = budget;
+               }
+       }
+
+       if (fp->xdp_xmit) {
+               u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
+
+               fp->xdp_xmit = 0;
+               fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
+               qede_update_tx_producer(fp->xdp_tx);
+       }
+
+       return rx_work_done;
+}
+
+irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
+{
+       struct qede_fastpath *fp = fp_cookie;
+
+       qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
+
+       napi_schedule_irqoff(&fp->napi);
+       return IRQ_HANDLED;
+}
+
+/* Main transmit function */
+netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+       struct qede_dev *edev = netdev_priv(ndev);
+       struct netdev_queue *netdev_txq;
+       struct qede_tx_queue *txq;
+       struct eth_tx_1st_bd *first_bd;
+       struct eth_tx_2nd_bd *second_bd = NULL;
+       struct eth_tx_3rd_bd *third_bd = NULL;
+       struct eth_tx_bd *tx_data_bd = NULL;
+       u16 txq_index;
+       u8 nbd = 0;
+       dma_addr_t mapping;
+       int rc, frag_idx = 0, ipv6_ext = 0;
+       u8 xmit_type;
+       u16 idx;
+       u16 hlen;
+       bool data_split = false;
+
+       /* Get tx-queue context and netdev index */
+       txq_index = skb_get_queue_mapping(skb);
+       WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
+       txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
+       netdev_txq = netdev_get_tx_queue(ndev, txq_index);
+
+       WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
+
+       xmit_type = qede_xmit_type(skb, &ipv6_ext);
+
+#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
+       if (qede_pkt_req_lin(skb, xmit_type)) {
+               if (skb_linearize(skb)) {
+                       DP_NOTICE(edev,
+                                 "SKB linearization failed - silently dropping this SKB\n");
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
+       }
+#endif
+
+       /* Fill the entry in the SW ring and the BDs in the FW ring */
+       idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+       txq->sw_tx_ring.skbs[idx].skb = skb;
+       first_bd = (struct eth_tx_1st_bd *)
+                  qed_chain_produce(&txq->tx_pbl);
+       memset(first_bd, 0, sizeof(*first_bd));
+       first_bd->data.bd_flags.bitfields =
+               1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+
+       if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+               qede_ptp_tx_ts(edev, skb);
+
+       /* Map skb linear data for DMA and set in the first BD */
+       mapping = dma_map_single(txq->dev, skb->data,
+                                skb_headlen(skb), DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(txq->dev, mapping))) {
+               DP_NOTICE(edev, "SKB mapping failed\n");
+               qede_free_failed_tx_pkt(txq, first_bd, 0, false);
+               qede_update_tx_producer(txq);
+               return NETDEV_TX_OK;
+       }
+       nbd++;
+       BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+       /* In case there is IPv6 with extension headers or LSO we need 2nd and
+        * 3rd BDs.
+        */
+       if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
+               second_bd = (struct eth_tx_2nd_bd *)
+                       qed_chain_produce(&txq->tx_pbl);
+               memset(second_bd, 0, sizeof(*second_bd));
+
+               nbd++;
+               third_bd = (struct eth_tx_3rd_bd *)
+                       qed_chain_produce(&txq->tx_pbl);
+               memset(third_bd, 0, sizeof(*third_bd));
+
+               nbd++;
+               /* We need to fill in additional data in second_bd... */
+               tx_data_bd = (struct eth_tx_bd *)second_bd;
+       }
+
+       if (skb_vlan_tag_present(skb)) {
+               first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+               first_bd->data.bd_flags.bitfields |=
+                       1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+       }
+
+       /* Fill the parsing flags & params according to the requested offload */
+       if (xmit_type & XMIT_L4_CSUM) {
+               /* We don't re-calculate IP checksum as it is already done by
+                * the upper stack
+                */
+               first_bd->data.bd_flags.bitfields |=
+                       1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+
+               if (xmit_type & XMIT_ENC) {
+                       first_bd->data.bd_flags.bitfields |=
+                               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+                       first_bd->data.bitfields |=
+                           1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+               }
+
+               /* Legacy FW had flipped behavior in regard to this bit -
+                * I.e., needed to set to prevent FW from touching encapsulated
+                * packets when it didn't need to.
+                */
+               if (unlikely(txq->is_legacy))
+                       first_bd->data.bitfields ^=
+                           1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
+               /* If the packet is IPv6 with extension header, indicate that
+                * to FW and pass few params, since the device cracker doesn't
+                * support parsing IPv6 with extension header/s.
+                */
+               if (unlikely(ipv6_ext))
+                       qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
+       }
+
+       if (xmit_type & XMIT_LSO) {
+               first_bd->data.bd_flags.bitfields |=
+                       (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
+               third_bd->data.lso_mss =
+                       cpu_to_le16(skb_shinfo(skb)->gso_size);
+
+               if (unlikely(xmit_type & XMIT_ENC)) {
+                       first_bd->data.bd_flags.bitfields |=
+                               1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+
+                       if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
+                               u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+
+                               first_bd->data.bd_flags.bitfields |= 1 << tmp;
+                       }
+                       hlen = qede_get_skb_hlen(skb, true);
+               } else {
+                       first_bd->data.bd_flags.bitfields |=
+                               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+                       hlen = qede_get_skb_hlen(skb, false);
+               }
+
+               /* @@@TBD - if will not be removed need to check */
+               third_bd->data.bitfields |=
+                       cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+
+               /* Make life easier for FW guys who can't deal with header and
+                * data on same BD. If we need to split, use the second bd...
+                */
+               if (unlikely(skb_headlen(skb) > hlen)) {
+                       DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+                                  "TSO split header size is %d (%x:%x)\n",
+                                  first_bd->nbytes, first_bd->addr.hi,
+                                  first_bd->addr.lo);
+
+                       mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
+                                          le32_to_cpu(first_bd->addr.lo)) +
+                                          hlen;
+
+                       BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
+                                             le16_to_cpu(first_bd->nbytes) -
+                                             hlen);
+
+                       /* this marks the BD as one that has no
+                        * individual mapping
+                        */
+                       txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
+
+                       first_bd->nbytes = cpu_to_le16(hlen);
+
+                       tx_data_bd = (struct eth_tx_bd *)third_bd;
+                       data_split = true;
+               }
+       } else {
+               first_bd->data.bitfields |=
+                   (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+                   ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+       }
+
+       /* Handle fragmented skb */
+       /* special handle for frags inside 2nd and 3rd bds.. */
+       while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
+               rc = map_frag_to_bd(txq,
+                                   &skb_shinfo(skb)->frags[frag_idx],
+                                   tx_data_bd);
+               if (rc) {
+                       qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
+                       qede_update_tx_producer(txq);
+                       return NETDEV_TX_OK;
+               }
+
+               if (tx_data_bd == (struct eth_tx_bd *)second_bd)
+                       tx_data_bd = (struct eth_tx_bd *)third_bd;
+               else
+                       tx_data_bd = NULL;
+
+               frag_idx++;
+       }
+
+       /* map last frags into 4th, 5th .... */
+       for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
+               tx_data_bd = (struct eth_tx_bd *)
+                            qed_chain_produce(&txq->tx_pbl);
+
+               memset(tx_data_bd, 0, sizeof(*tx_data_bd));
+
+               rc = map_frag_to_bd(txq,
+                                   &skb_shinfo(skb)->frags[frag_idx],
+                                   tx_data_bd);
+               if (rc) {
+                       qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
+                       qede_update_tx_producer(txq);
+                       return NETDEV_TX_OK;
+               }
+       }
+
+       /* update the first BD with the actual num BDs */
+       first_bd->data.nbds = nbd;
+
+       netdev_tx_sent_queue(netdev_txq, skb->len);
+
+       skb_tx_timestamp(skb);
+
+       /* Advance packet producer only before sending the packet since mapping
+        * of pages may fail.
+        */
+       txq->sw_tx_prod++;
+
+       /* 'next page' entries are counted in the producer value */
+       txq->tx_db.data.bd_prod =
+               cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+
+       if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
+               qede_update_tx_producer(txq);
+
+       if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
+                     < (MAX_SKB_FRAGS + 1))) {
+               if (skb->xmit_more)
+                       qede_update_tx_producer(txq);
+
+               netif_tx_stop_queue(netdev_txq);
+               txq->stopped_cnt++;
+               DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+                          "Stop queue was called\n");
+               /* paired memory barrier is in qede_tx_int(), we have to keep
+                * ordering of set_bit() in netif_tx_stop_queue() and read of
+                * fp->bd_tx_cons
+                */
+               smp_mb();
+
+               if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
+                    (MAX_SKB_FRAGS + 1)) &&
+                   (edev->state == QEDE_STATE_OPEN)) {
+                       netif_tx_wake_queue(netdev_txq);
+                       DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+                                  "Wake queue was called\n");
+               }
+       }
+
+       return NETDEV_TX_OK;
+}
+
+/* 8B udp header + 8B base tunnel header + 32B option length */
+#define QEDE_MAX_TUN_HDR_LEN 48
+
+netdev_features_t qede_features_check(struct sk_buff *skb,
+                                     struct net_device *dev,
+                                     netdev_features_t features)
+{
+       if (skb->encapsulation) {
+               u8 l4_proto = 0;
+
+               switch (vlan_get_protocol(skb)) {
+               case htons(ETH_P_IP):
+                       l4_proto = ip_hdr(skb)->protocol;
+                       break;
+               case htons(ETH_P_IPV6):
+                       l4_proto = ipv6_hdr(skb)->nexthdr;
+                       break;
+               default:
+                       return features;
+               }
+
+               /* Disable offloads for geneve tunnels, as HW can't parse
+                * the geneve header which has option length greater than 32B.
+                */
+               if ((l4_proto == IPPROTO_UDP) &&
+                   ((skb_inner_mac_header(skb) -
+                     skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN))
+                       return features & ~(NETIF_F_CSUM_MASK |
+                                           NETIF_F_GSO_MASK);
+       }
+
+       return features;
+}
index aecdd1c5c0ea24a368085c0b2a41f5891ce55ac1..d163e72aa2a6601ce634a50ce63313703364c8bc 100644 (file)
@@ -1,11 +1,34 @@
 /* QLogic qede NIC Driver
-* Copyright (c) 2015 QLogic Corporation
-*
-* This software is available under the terms of the GNU General Public License
-* (GPL) Version 2, available from the file COPYING in the main directory of
-* this source tree.
-*/
-
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/version.h>
 #include <linux/random.h>
 #include <net/ip6_checksum.h>
 #include <linux/bitops.h>
+#include <linux/vmalloc.h>
 #include <linux/qed/qede_roce.h>
 #include "qede.h"
+#include "qede_ptp.h"
 
 static char version[] =
        "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
@@ -154,8 +179,12 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
 {
        struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
        struct qed_dev_info *qed_info = &edev->dev_info.common;
+       struct qed_update_vport_params *vport_params;
        int rc;
 
+       vport_params = vzalloc(sizeof(*vport_params));
+       if (!vport_params)
+               return -ENOMEM;
        DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
 
        rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
@@ -163,15 +192,13 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
        /* Enable/Disable Tx switching for PF */
        if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
            qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
-               struct qed_update_vport_params params;
-
-               memset(&params, 0, sizeof(params));
-               params.vport_id = 0;
-               params.update_tx_switching_flg = 1;
-               params.tx_switching_flg = num_vfs_param ? 1 : 0;
-               edev->ops->vport_update(edev->cdev, &params);
+               vport_params->vport_id = 0;
+               vport_params->update_tx_switching_flg = 1;
+               vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
+               edev->ops->vport_update(edev->cdev, vport_params);
        }
 
+       vfree(vport_params);
        return rc;
 }
 #endif
@@ -187,18 +214,6 @@ static struct pci_driver qede_pci_driver = {
 #endif
 };
 
-static void qede_force_mac(void *dev, u8 *mac, bool forced)
-{
-       struct qede_dev *edev = dev;
-
-       /* MAC hints take effect only if we haven't set one already */
-       if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
-               return;
-
-       ether_addr_copy(edev->ndev->dev_addr, mac);
-       ether_addr_copy(edev->primary_mac, mac);
-}
-
 static struct qed_eth_cb_ops qede_ll_ops = {
        {
                .link_update = qede_link_update,
@@ -294,1673 +309,38 @@ static void __exit qede_cleanup(void)
 module_init(qede_init);
 module_exit(qede_cleanup);
 
-/* -------------------------------------------------------------------------
- * START OF FAST-PATH
- * -------------------------------------------------------------------------
- */
-
-/* Unmap the data and free skb */
-static int qede_free_tx_pkt(struct qede_dev *edev,
-                           struct qede_tx_queue *txq, int *len)
-{
-       u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
-       struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
-       struct eth_tx_1st_bd *first_bd;
-       struct eth_tx_bd *tx_data_bd;
-       int bds_consumed = 0;
-       int nbds;
-       bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
-       int i, split_bd_len = 0;
-
-       if (unlikely(!skb)) {
-               DP_ERR(edev,
-                      "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
-                      idx, txq->sw_tx_cons, txq->sw_tx_prod);
-               return -1;
-       }
-
-       *len = skb->len;
-
-       first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
-
-       bds_consumed++;
-
-       nbds = first_bd->data.nbds;
-
-       if (data_split) {
-               struct eth_tx_bd *split = (struct eth_tx_bd *)
-                       qed_chain_consume(&txq->tx_pbl);
-               split_bd_len = BD_UNMAP_LEN(split);
-               bds_consumed++;
-       }
-       dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
-                        BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
-
-       /* Unmap the data of the skb frags */
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
-               tx_data_bd = (struct eth_tx_bd *)
-                       qed_chain_consume(&txq->tx_pbl);
-               dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
-                              BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
-       }
-
-       while (bds_consumed++ < nbds)
-               qed_chain_consume(&txq->tx_pbl);
-
-       /* Free skb */
-       dev_kfree_skb_any(skb);
-       txq->sw_tx_ring.skbs[idx].skb = NULL;
-       txq->sw_tx_ring.skbs[idx].flags = 0;
-
-       return 0;
-}
-
-/* Unmap the data and free skb when mapping failed during start_xmit */
-static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
-                                   struct eth_tx_1st_bd *first_bd,
-                                   int nbd, bool data_split)
-{
-       u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
-       struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
-       struct eth_tx_bd *tx_data_bd;
-       int i, split_bd_len = 0;
-
-       /* Return prod to its position before this skb was handled */
-       qed_chain_set_prod(&txq->tx_pbl,
-                          le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
-
-       first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
-
-       if (data_split) {
-               struct eth_tx_bd *split = (struct eth_tx_bd *)
-                                         qed_chain_produce(&txq->tx_pbl);
-               split_bd_len = BD_UNMAP_LEN(split);
-               nbd--;
-       }
-
-       dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
-                        BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
-
-       /* Unmap the data of the skb frags */
-       for (i = 0; i < nbd; i++) {
-               tx_data_bd = (struct eth_tx_bd *)
-                       qed_chain_produce(&txq->tx_pbl);
-               if (tx_data_bd->nbytes)
-                       dma_unmap_page(txq->dev,
-                                      BD_UNMAP_ADDR(tx_data_bd),
-                                      BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
-       }
-
-       /* Return again prod to its position before this skb was handled */
-       qed_chain_set_prod(&txq->tx_pbl,
-                          le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
-
-       /* Free skb */
-       dev_kfree_skb_any(skb);
-       txq->sw_tx_ring.skbs[idx].skb = NULL;
-       txq->sw_tx_ring.skbs[idx].flags = 0;
-}
-
-static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
-{
-       u32 rc = XMIT_L4_CSUM;
-       __be16 l3_proto;
-
-       if (skb->ip_summed != CHECKSUM_PARTIAL)
-               return XMIT_PLAIN;
-
-       l3_proto = vlan_get_protocol(skb);
-       if (l3_proto == htons(ETH_P_IPV6) &&
-           (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
-               *ipv6_ext = 1;
-
-       if (skb->encapsulation) {
-               rc |= XMIT_ENC;
-               if (skb_is_gso(skb)) {
-                       unsigned short gso_type = skb_shinfo(skb)->gso_type;
-
-                       if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
-                           (gso_type & SKB_GSO_GRE_CSUM))
-                               rc |= XMIT_ENC_GSO_L4_CSUM;
-
-                       rc |= XMIT_LSO;
-                       return rc;
-               }
-       }
-
-       if (skb_is_gso(skb))
-               rc |= XMIT_LSO;
-
-       return rc;
-}
-
-static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
-                                        struct eth_tx_2nd_bd *second_bd,
-                                        struct eth_tx_3rd_bd *third_bd)
-{
-       u8 l4_proto;
-       u16 bd2_bits1 = 0, bd2_bits2 = 0;
-
-       bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
-
-       bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
-                    ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
-                   << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
-
-       bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
-                     ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
-
-       if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
-               l4_proto = ipv6_hdr(skb)->nexthdr;
-       else
-               l4_proto = ip_hdr(skb)->protocol;
-
-       if (l4_proto == IPPROTO_UDP)
-               bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
-
-       if (third_bd)
-               third_bd->data.bitfields |=
-                       cpu_to_le16(((tcp_hdrlen(skb) / 4) &
-                               ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
-                               ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
-
-       second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
-       second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
-}
-
-static int map_frag_to_bd(struct qede_tx_queue *txq,
-                         skb_frag_t *frag, struct eth_tx_bd *bd)
-{
-       dma_addr_t mapping;
-
-       /* Map skb non-linear frag data for DMA */
-       mapping = skb_frag_dma_map(txq->dev, frag, 0,
-                                  skb_frag_size(frag), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(txq->dev, mapping)))
-               return -ENOMEM;
-
-       /* Setup the data pointer of the frag data */
-       BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
-
-       return 0;
-}
-
-static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
-{
-       if (is_encap_pkt)
-               return (skb_inner_transport_header(skb) +
-                       inner_tcp_hdrlen(skb) - skb->data);
-       else
-               return (skb_transport_header(skb) +
-                       tcp_hdrlen(skb) - skb->data);
-}
-
-/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
-#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
-static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
-{
-       int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
-
-       if (xmit_type & XMIT_LSO) {
-               int hlen;
-
-               hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
-
-               /* linear payload would require its own BD */
-               if (skb_headlen(skb) > hlen)
-                       allowed_frags--;
-       }
-
-       return (skb_shinfo(skb)->nr_frags > allowed_frags);
-}
-#endif
-
-static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
-{
-       /* wmb makes sure that the BDs data is updated before updating the
-        * producer, otherwise FW may read old data from the BDs.
-        */
-       wmb();
-       barrier();
-       writel(txq->tx_db.raw, txq->doorbell_addr);
-
-       /* mmiowb is needed to synchronize doorbell writes from more than one
-        * processor. It guarantees that the write arrives to the device before
-        * the queue lock is released and another start_xmit is called (possibly
-        * on another CPU). Without this barrier, the next doorbell can bypass
-        * this doorbell. This is applicable to IA64/Altix systems.
-        */
-       mmiowb();
-}
-
-static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
-                        struct sw_rx_data *metadata, u16 padding, u16 length)
-{
-       struct qede_tx_queue *txq = fp->xdp_tx;
-       u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
-       struct eth_tx_1st_bd *first_bd;
-
-       if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
-               txq->stopped_cnt++;
-               return -ENOMEM;
-       }
-
-       first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
-
-       memset(first_bd, 0, sizeof(*first_bd));
-       first_bd->data.bd_flags.bitfields =
-           BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
-       first_bd->data.bitfields |=
-           (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
-           ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
-       first_bd->data.nbds = 1;
-
-       /* We can safely ignore the offset, as it's 0 for XDP */
-       BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
-
-       /* Synchronize the buffer back to device, as program [probably]
-        * has changed it.
-        */
-       dma_sync_single_for_device(&edev->pdev->dev,
-                                  metadata->mapping + padding,
-                                  length, PCI_DMA_TODEVICE);
-
-       txq->sw_tx_ring.pages[idx] = metadata->data;
-       txq->sw_tx_prod++;
-
-       /* Mark the fastpath for future XDP doorbell */
-       fp->xdp_xmit = 1;
-
-       return 0;
-}
-
-/* Main transmit function */
-static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
-                                  struct net_device *ndev)
-{
-       struct qede_dev *edev = netdev_priv(ndev);
-       struct netdev_queue *netdev_txq;
-       struct qede_tx_queue *txq;
-       struct eth_tx_1st_bd *first_bd;
-       struct eth_tx_2nd_bd *second_bd = NULL;
-       struct eth_tx_3rd_bd *third_bd = NULL;
-       struct eth_tx_bd *tx_data_bd = NULL;
-       u16 txq_index;
-       u8 nbd = 0;
-       dma_addr_t mapping;
-       int rc, frag_idx = 0, ipv6_ext = 0;
-       u8 xmit_type;
-       u16 idx;
-       u16 hlen;
-       bool data_split = false;
-
-       /* Get tx-queue context and netdev index */
-       txq_index = skb_get_queue_mapping(skb);
-       WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
-       txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
-       netdev_txq = netdev_get_tx_queue(ndev, txq_index);
-
-       WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
-
-       xmit_type = qede_xmit_type(skb, &ipv6_ext);
-
-#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
-       if (qede_pkt_req_lin(skb, xmit_type)) {
-               if (skb_linearize(skb)) {
-                       DP_NOTICE(edev,
-                                 "SKB linearization failed - silently dropping this SKB\n");
-                       dev_kfree_skb_any(skb);
-                       return NETDEV_TX_OK;
-               }
-       }
-#endif
-
-       /* Fill the entry in the SW ring and the BDs in the FW ring */
-       idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
-       txq->sw_tx_ring.skbs[idx].skb = skb;
-       first_bd = (struct eth_tx_1st_bd *)
-                  qed_chain_produce(&txq->tx_pbl);
-       memset(first_bd, 0, sizeof(*first_bd));
-       first_bd->data.bd_flags.bitfields =
-               1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
-
-       /* Map skb linear data for DMA and set in the first BD */
-       mapping = dma_map_single(txq->dev, skb->data,
-                                skb_headlen(skb), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(txq->dev, mapping))) {
-               DP_NOTICE(edev, "SKB mapping failed\n");
-               qede_free_failed_tx_pkt(txq, first_bd, 0, false);
-               qede_update_tx_producer(txq);
-               return NETDEV_TX_OK;
-       }
-       nbd++;
-       BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
-
-       /* In case there is IPv6 with extension headers or LSO we need 2nd and
-        * 3rd BDs.
-        */
-       if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
-               second_bd = (struct eth_tx_2nd_bd *)
-                       qed_chain_produce(&txq->tx_pbl);
-               memset(second_bd, 0, sizeof(*second_bd));
-
-               nbd++;
-               third_bd = (struct eth_tx_3rd_bd *)
-                       qed_chain_produce(&txq->tx_pbl);
-               memset(third_bd, 0, sizeof(*third_bd));
-
-               nbd++;
-               /* We need to fill in additional data in second_bd... */
-               tx_data_bd = (struct eth_tx_bd *)second_bd;
-       }
-
-       if (skb_vlan_tag_present(skb)) {
-               first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
-               first_bd->data.bd_flags.bitfields |=
-                       1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
-       }
-
-       /* Fill the parsing flags & params according to the requested offload */
-       if (xmit_type & XMIT_L4_CSUM) {
-               /* We don't re-calculate IP checksum as it is already done by
-                * the upper stack
-                */
-               first_bd->data.bd_flags.bitfields |=
-                       1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
-
-               if (xmit_type & XMIT_ENC) {
-                       first_bd->data.bd_flags.bitfields |=
-                               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
-                       first_bd->data.bitfields |=
-                           1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
-               }
-
-               /* Legacy FW had flipped behavior in regard to this bit -
-                * I.e., needed to set to prevent FW from touching encapsulated
-                * packets when it didn't need to.
-                */
-               if (unlikely(txq->is_legacy))
-                       first_bd->data.bitfields ^=
-                           1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
-
-               /* If the packet is IPv6 with extension header, indicate that
-                * to FW and pass few params, since the device cracker doesn't
-                * support parsing IPv6 with extension header/s.
-                */
-               if (unlikely(ipv6_ext))
-                       qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
-       }
-
-       if (xmit_type & XMIT_LSO) {
-               first_bd->data.bd_flags.bitfields |=
-                       (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
-               third_bd->data.lso_mss =
-                       cpu_to_le16(skb_shinfo(skb)->gso_size);
-
-               if (unlikely(xmit_type & XMIT_ENC)) {
-                       first_bd->data.bd_flags.bitfields |=
-                               1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
-
-                       if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
-                               u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
-
-                               first_bd->data.bd_flags.bitfields |= 1 << tmp;
-                       }
-                       hlen = qede_get_skb_hlen(skb, true);
-               } else {
-                       first_bd->data.bd_flags.bitfields |=
-                               1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
-                       hlen = qede_get_skb_hlen(skb, false);
-               }
-
-               /* @@@TBD - if will not be removed need to check */
-               third_bd->data.bitfields |=
-                       cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT));
-
-               /* Make life easier for FW guys who can't deal with header and
-                * data on same BD. If we need to split, use the second bd...
-                */
-               if (unlikely(skb_headlen(skb) > hlen)) {
-                       DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
-                                  "TSO split header size is %d (%x:%x)\n",
-                                  first_bd->nbytes, first_bd->addr.hi,
-                                  first_bd->addr.lo);
-
-                       mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
-                                          le32_to_cpu(first_bd->addr.lo)) +
-                                          hlen;
-
-                       BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
-                                             le16_to_cpu(first_bd->nbytes) -
-                                             hlen);
-
-                       /* this marks the BD as one that has no
-                        * individual mapping
-                        */
-                       txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
-
-                       first_bd->nbytes = cpu_to_le16(hlen);
-
-                       tx_data_bd = (struct eth_tx_bd *)third_bd;
-                       data_split = true;
-               }
-       } else {
-               first_bd->data.bitfields |=
-                   (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
-                   ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
-       }
-
-       /* Handle fragmented skb */
-       /* special handle for frags inside 2nd and 3rd bds.. */
-       while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
-               rc = map_frag_to_bd(txq,
-                                   &skb_shinfo(skb)->frags[frag_idx],
-                                   tx_data_bd);
-               if (rc) {
-                       qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
-                       qede_update_tx_producer(txq);
-                       return NETDEV_TX_OK;
-               }
-
-               if (tx_data_bd == (struct eth_tx_bd *)second_bd)
-                       tx_data_bd = (struct eth_tx_bd *)third_bd;
-               else
-                       tx_data_bd = NULL;
-
-               frag_idx++;
-       }
-
-       /* map last frags into 4th, 5th .... */
-       for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
-               tx_data_bd = (struct eth_tx_bd *)
-                            qed_chain_produce(&txq->tx_pbl);
-
-               memset(tx_data_bd, 0, sizeof(*tx_data_bd));
-
-               rc = map_frag_to_bd(txq,
-                                   &skb_shinfo(skb)->frags[frag_idx],
-                                   tx_data_bd);
-               if (rc) {
-                       qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
-                       qede_update_tx_producer(txq);
-                       return NETDEV_TX_OK;
-               }
-       }
-
-       /* update the first BD with the actual num BDs */
-       first_bd->data.nbds = nbd;
-
-       netdev_tx_sent_queue(netdev_txq, skb->len);
-
-       skb_tx_timestamp(skb);
-
-       /* Advance packet producer only before sending the packet since mapping
-        * of pages may fail.
-        */
-       txq->sw_tx_prod++;
-
-       /* 'next page' entries are counted in the producer value */
-       txq->tx_db.data.bd_prod =
-               cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
-
-       if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
-               qede_update_tx_producer(txq);
-
-       if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
-                     < (MAX_SKB_FRAGS + 1))) {
-               if (skb->xmit_more)
-                       qede_update_tx_producer(txq);
-
-               netif_tx_stop_queue(netdev_txq);
-               txq->stopped_cnt++;
-               DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
-                          "Stop queue was called\n");
-               /* paired memory barrier is in qede_tx_int(), we have to keep
-                * ordering of set_bit() in netif_tx_stop_queue() and read of
-                * fp->bd_tx_cons
-                */
-               smp_mb();
-
-               if (qed_chain_get_elem_left(&txq->tx_pbl)
-                    >= (MAX_SKB_FRAGS + 1) &&
-                   (edev->state == QEDE_STATE_OPEN)) {
-                       netif_tx_wake_queue(netdev_txq);
-                       DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
-                                  "Wake queue was called\n");
-               }
-       }
-
-       return NETDEV_TX_OK;
-}
-
-int qede_txq_has_work(struct qede_tx_queue *txq)
-{
-       u16 hw_bd_cons;
-
-       /* Tell compiler that consumer and producer can change */
-       barrier();
-       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
-       if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
-               return 0;
-
-       return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
-}
-
-static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
-{
-       struct eth_tx_1st_bd *bd;
-       u16 hw_bd_cons;
-
-       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
-       barrier();
-
-       while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
-               bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
-
-               dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(bd),
-                                PAGE_SIZE, DMA_BIDIRECTIONAL);
-               __free_page(txq->sw_tx_ring.pages[txq->sw_tx_cons &
-                                                 NUM_TX_BDS_MAX]);
-
-               txq->sw_tx_cons++;
-               txq->xmit_pkts++;
-       }
-}
-
-static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
-{
-       struct netdev_queue *netdev_txq;
-       u16 hw_bd_cons;
-       unsigned int pkts_compl = 0, bytes_compl = 0;
-       int rc;
-
-       netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
-
-       hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
-       barrier();
-
-       while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
-               int len = 0;
-
-               rc = qede_free_tx_pkt(edev, txq, &len);
-               if (rc) {
-                       DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
-                                 hw_bd_cons,
-                                 qed_chain_get_cons_idx(&txq->tx_pbl));
-                       break;
-               }
-
-               bytes_compl += len;
-               pkts_compl++;
-               txq->sw_tx_cons++;
-               txq->xmit_pkts++;
-       }
-
-       netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
-
-       /* Need to make the tx_bd_cons update visible to start_xmit()
-        * before checking for netif_tx_queue_stopped().  Without the
-        * memory barrier, there is a small possibility that
-        * start_xmit() will miss it and cause the queue to be stopped
-        * forever.
-        * On the other hand we need an rmb() here to ensure the proper
-        * ordering of bit testing in the following
-        * netif_tx_queue_stopped(txq) call.
-        */
-       smp_mb();
-
-       if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
-               /* Taking tx_lock is needed to prevent reenabling the queue
-                * while it's empty. This could have happen if rx_action() gets
-                * suspended in qede_tx_int() after the condition before
-                * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
-                *
-                * stops the queue->sees fresh tx_bd_cons->releases the queue->
-                * sends some packets consuming the whole queue again->
-                * stops the queue
-                */
-
-               __netif_tx_lock(netdev_txq, smp_processor_id());
-
-               if ((netif_tx_queue_stopped(netdev_txq)) &&
-                   (edev->state == QEDE_STATE_OPEN) &&
-                   (qed_chain_get_elem_left(&txq->tx_pbl)
-                     >= (MAX_SKB_FRAGS + 1))) {
-                       netif_tx_wake_queue(netdev_txq);
-                       DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
-                                  "Wake queue was called\n");
-               }
-
-               __netif_tx_unlock(netdev_txq);
-       }
-
-       return 0;
-}
-
-bool qede_has_rx_work(struct qede_rx_queue *rxq)
-{
-       u16 hw_comp_cons, sw_comp_cons;
-
-       /* Tell compiler that status block fields can change */
-       barrier();
-
-       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
-       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
-
-       return hw_comp_cons != sw_comp_cons;
-}
-
-static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
-{
-       qed_chain_consume(&rxq->rx_bd_ring);
-       rxq->sw_rx_cons++;
-}
-
-/* This function reuses the buffer(from an offset) from
- * consumer index to producer index in the bd ring
- */
-static inline void qede_reuse_page(struct qede_rx_queue *rxq,
-                                  struct sw_rx_data *curr_cons)
-{
-       struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
-       struct sw_rx_data *curr_prod;
-       dma_addr_t new_mapping;
-
-       curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
-       *curr_prod = *curr_cons;
-
-       new_mapping = curr_prod->mapping + curr_prod->page_offset;
-
-       rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
-       rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
-
-       rxq->sw_rx_prod++;
-       curr_cons->data = NULL;
-}
+static int qede_open(struct net_device *ndev);
+static int qede_close(struct net_device *ndev);
 
-/* In case of allocation failures reuse buffers
- * from consumer index to produce buffers for firmware
- */
-void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
+void qede_fill_by_demand_stats(struct qede_dev *edev)
 {
-       struct sw_rx_data *curr_cons;
+       struct qed_eth_stats stats;
 
-       for (; count > 0; count--) {
-               curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
-               qede_reuse_page(rxq, curr_cons);
-               qede_rx_bd_ring_consume(rxq);
-       }
-}
+       edev->ops->get_vport_stats(edev->cdev, &stats);
+       edev->stats.no_buff_discards = stats.no_buff_discards;
+       edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
+       edev->stats.ttl0_discard = stats.ttl0_discard;
+       edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
+       edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
+       edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
+       edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
+       edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
+       edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
+       edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
+       edev->stats.mac_filter_discards = stats.mac_filter_discards;
 
-static int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
-{
-       struct sw_rx_data *sw_rx_data;
-       struct eth_rx_bd *rx_bd;
-       dma_addr_t mapping;
-       struct page *data;
-
-       data = alloc_pages(GFP_ATOMIC, 0);
-       if (unlikely(!data))
-               return -ENOMEM;
-
-       /* Map the entire page as it would be used
-        * for multiple RX buffer segment size mapping.
-        */
-       mapping = dma_map_page(rxq->dev, data, 0,
-                              PAGE_SIZE, rxq->data_direction);
-       if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
-               __free_page(data);
-               return -ENOMEM;
-       }
-
-       sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
-       sw_rx_data->page_offset = 0;
-       sw_rx_data->data = data;
-       sw_rx_data->mapping = mapping;
-
-       /* Advance PROD and get BD pointer */
-       rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
-       WARN_ON(!rx_bd);
-       rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
-       rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
-
-       rxq->sw_rx_prod++;
-
-       return 0;
-}
-
-static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
-                                        struct sw_rx_data *curr_cons)
-{
-       /* Move to the next segment in the page */
-       curr_cons->page_offset += rxq->rx_buf_seg_size;
-
-       if (curr_cons->page_offset == PAGE_SIZE) {
-               if (unlikely(qede_alloc_rx_buffer(rxq))) {
-                       /* Since we failed to allocate new buffer
-                        * current buffer can be used again.
-                        */
-                       curr_cons->page_offset -= rxq->rx_buf_seg_size;
-
-                       return -ENOMEM;
-               }
-
-               dma_unmap_page(rxq->dev, curr_cons->mapping,
-                              PAGE_SIZE, rxq->data_direction);
-       } else {
-               /* Increment refcount of the page as we don't want
-                * network stack to take the ownership of the page
-                * which can be recycled multiple times by the driver.
-                */
-               page_ref_inc(curr_cons->data);
-               qede_reuse_page(rxq, curr_cons);
-       }
-
-       return 0;
-}
-
-void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
-{
-       u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
-       u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
-       struct eth_rx_prod_data rx_prods = {0};
-
-       /* Update producers */
-       rx_prods.bd_prod = cpu_to_le16(bd_prod);
-       rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
-
-       /* Make sure that the BD and SGE data is updated before updating the
-        * producers since FW might read the BD/SGE right after the producer
-        * is updated.
-        */
-       wmb();
-
-       internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
-                       (u32 *)&rx_prods);
-
-       /* mmiowb is needed to synchronize doorbell writes from more than one
-        * processor. It guarantees that the write arrives to the device before
-        * the napi lock is released and another qede_poll is called (possibly
-        * on another CPU). Without this barrier, the next doorbell can bypass
-        * this doorbell. This is applicable to IA64/Altix systems.
-        */
-       mmiowb();
-}
-
-static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
-{
-       enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
-       enum rss_hash_type htype;
-       u32 hash = 0;
-
-       htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
-       if (htype) {
-               hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
-                            (htype == RSS_HASH_TYPE_IPV6)) ?
-                           PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
-               hash = le32_to_cpu(rss_hash);
-       }
-       skb_set_hash(skb, hash, hash_type);
-}
-
-static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
-{
-       skb_checksum_none_assert(skb);
-
-       if (csum_flag & QEDE_CSUM_UNNECESSARY)
-               skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-       if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
-               skb->csum_level = 1;
-}
-
-static inline void qede_skb_receive(struct qede_dev *edev,
-                                   struct qede_fastpath *fp,
-                                   struct qede_rx_queue *rxq,
-                                   struct sk_buff *skb, u16 vlan_tag)
-{
-       if (vlan_tag)
-               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
-       napi_gro_receive(&fp->napi, skb);
-       fp->rxq->rcv_pkts++;
-}
-
-static void qede_set_gro_params(struct qede_dev *edev,
-                               struct sk_buff *skb,
-                               struct eth_fast_path_rx_tpa_start_cqe *cqe)
-{
-       u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
-
-       if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
-           PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
-               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
-       else
-               skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
-
-       skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
-                                       cqe->header_len;
-}
-
-static int qede_fill_frag_skb(struct qede_dev *edev,
-                             struct qede_rx_queue *rxq,
-                             u8 tpa_agg_index, u16 len_on_bd)
-{
-       struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
-                                                        NUM_RX_BDS_MAX];
-       struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
-       struct sk_buff *skb = tpa_info->skb;
-
-       if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
-               goto out;
-
-       /* Add one frag and update the appropriate fields in the skb */
-       skb_fill_page_desc(skb, tpa_info->frag_id++,
-                          current_bd->data, current_bd->page_offset,
-                          len_on_bd);
-
-       if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
-               /* Incr page ref count to reuse on allocation failure
-                * so that it doesn't get freed while freeing SKB.
-                */
-               page_ref_inc(current_bd->data);
-               goto out;
-       }
-
-       qed_chain_consume(&rxq->rx_bd_ring);
-       rxq->sw_rx_cons++;
-
-       skb->data_len += len_on_bd;
-       skb->truesize += rxq->rx_buf_seg_size;
-       skb->len += len_on_bd;
-
-       return 0;
-
-out:
-       tpa_info->state = QEDE_AGG_STATE_ERROR;
-       qede_recycle_rx_bd_ring(rxq, 1);
-
-       return -ENOMEM;
-}
-
-static void qede_tpa_start(struct qede_dev *edev,
-                          struct qede_rx_queue *rxq,
-                          struct eth_fast_path_rx_tpa_start_cqe *cqe)
-{
-       struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
-       struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
-       struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
-       struct sw_rx_data *replace_buf = &tpa_info->buffer;
-       dma_addr_t mapping = tpa_info->buffer_mapping;
-       struct sw_rx_data *sw_rx_data_cons;
-       struct sw_rx_data *sw_rx_data_prod;
-
-       sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
-       sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
-
-       /* Use pre-allocated replacement buffer - we can't release the agg.
-        * start until its over and we don't want to risk allocation failing
-        * here, so re-allocate when aggregation will be over.
-        */
-       sw_rx_data_prod->mapping = replace_buf->mapping;
-
-       sw_rx_data_prod->data = replace_buf->data;
-       rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
-       rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
-       sw_rx_data_prod->page_offset = replace_buf->page_offset;
-
-       rxq->sw_rx_prod++;
-
-       /* move partial skb from cons to pool (don't unmap yet)
-        * save mapping, incase we drop the packet later on.
-        */
-       tpa_info->buffer = *sw_rx_data_cons;
-       mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
-                          le32_to_cpu(rx_bd_cons->addr.lo));
-
-       tpa_info->buffer_mapping = mapping;
-       rxq->sw_rx_cons++;
-
-       /* set tpa state to start only if we are able to allocate skb
-        * for this aggregation, otherwise mark as error and aggregation will
-        * be dropped
-        */
-       tpa_info->skb = netdev_alloc_skb(edev->ndev,
-                                        le16_to_cpu(cqe->len_on_first_bd));
-       if (unlikely(!tpa_info->skb)) {
-               DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
-               tpa_info->state = QEDE_AGG_STATE_ERROR;
-               goto cons_buf;
-       }
-
-       /* Start filling in the aggregation info */
-       skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
-       tpa_info->frag_id = 0;
-       tpa_info->state = QEDE_AGG_STATE_START;
-
-       /* Store some information from first CQE */
-       tpa_info->start_cqe_placement_offset = cqe->placement_offset;
-       tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
-       if ((le16_to_cpu(cqe->pars_flags.flags) >>
-            PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
-           PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
-               tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
-       else
-               tpa_info->vlan_tag = 0;
-
-       qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
-
-       /* This is needed in order to enable forwarding support */
-       qede_set_gro_params(edev, tpa_info->skb, cqe);
-
-cons_buf: /* We still need to handle bd_len_list to consume buffers */
-       if (likely(cqe->ext_bd_len_list[0]))
-               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
-                                  le16_to_cpu(cqe->ext_bd_len_list[0]));
-
-       if (unlikely(cqe->ext_bd_len_list[1])) {
-               DP_ERR(edev,
-                      "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
-               tpa_info->state = QEDE_AGG_STATE_ERROR;
-       }
-}
-
-#ifdef CONFIG_INET
-static void qede_gro_ip_csum(struct sk_buff *skb)
-{
-       const struct iphdr *iph = ip_hdr(skb);
-       struct tcphdr *th;
-
-       skb_set_transport_header(skb, sizeof(struct iphdr));
-       th = tcp_hdr(skb);
-
-       th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
-                                 iph->saddr, iph->daddr, 0);
-
-       tcp_gro_complete(skb);
-}
-
-static void qede_gro_ipv6_csum(struct sk_buff *skb)
-{
-       struct ipv6hdr *iph = ipv6_hdr(skb);
-       struct tcphdr *th;
-
-       skb_set_transport_header(skb, sizeof(struct ipv6hdr));
-       th = tcp_hdr(skb);
-
-       th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
-                                 &iph->saddr, &iph->daddr, 0);
-       tcp_gro_complete(skb);
-}
-#endif
-
-static void qede_gro_receive(struct qede_dev *edev,
-                            struct qede_fastpath *fp,
-                            struct sk_buff *skb,
-                            u16 vlan_tag)
-{
-       /* FW can send a single MTU sized packet from gro flow
-        * due to aggregation timeout/last segment etc. which
-        * is not expected to be a gro packet. If a skb has zero
-        * frags then simply push it in the stack as non gso skb.
-        */
-       if (unlikely(!skb->data_len)) {
-               skb_shinfo(skb)->gso_type = 0;
-               skb_shinfo(skb)->gso_size = 0;
-               goto send_skb;
-       }
-
-#ifdef CONFIG_INET
-       if (skb_shinfo(skb)->gso_size) {
-               skb_reset_network_header(skb);
-
-               switch (skb->protocol) {
-               case htons(ETH_P_IP):
-                       qede_gro_ip_csum(skb);
-                       break;
-               case htons(ETH_P_IPV6):
-                       qede_gro_ipv6_csum(skb);
-                       break;
-               default:
-                       DP_ERR(edev,
-                              "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
-                              ntohs(skb->protocol));
-               }
-       }
-#endif
-
-send_skb:
-       skb_record_rx_queue(skb, fp->rxq->rxq_id);
-       qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
-}
-
-static inline void qede_tpa_cont(struct qede_dev *edev,
-                                struct qede_rx_queue *rxq,
-                                struct eth_fast_path_rx_tpa_cont_cqe *cqe)
-{
-       int i;
-
-       for (i = 0; cqe->len_list[i]; i++)
-               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
-                                  le16_to_cpu(cqe->len_list[i]));
-
-       if (unlikely(i > 1))
-               DP_ERR(edev,
-                      "Strange - TPA cont with more than a single len_list entry\n");
-}
-
-static void qede_tpa_end(struct qede_dev *edev,
-                        struct qede_fastpath *fp,
-                        struct eth_fast_path_rx_tpa_end_cqe *cqe)
-{
-       struct qede_rx_queue *rxq = fp->rxq;
-       struct qede_agg_info *tpa_info;
-       struct sk_buff *skb;
-       int i;
-
-       tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
-       skb = tpa_info->skb;
-
-       for (i = 0; cqe->len_list[i]; i++)
-               qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
-                                  le16_to_cpu(cqe->len_list[i]));
-       if (unlikely(i > 1))
-               DP_ERR(edev,
-                      "Strange - TPA emd with more than a single len_list entry\n");
-
-       if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
-               goto err;
-
-       /* Sanity */
-       if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
-               DP_ERR(edev,
-                      "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
-                      cqe->num_of_bds, tpa_info->frag_id);
-       if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
-               DP_ERR(edev,
-                      "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
-                      le16_to_cpu(cqe->total_packet_len), skb->len);
-
-       memcpy(skb->data,
-              page_address(tpa_info->buffer.data) +
-              tpa_info->start_cqe_placement_offset +
-              tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
-
-       /* Finalize the SKB */
-       skb->protocol = eth_type_trans(skb, edev->ndev);
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
-
-       /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
-        * to skb_shinfo(skb)->gso_segs
-        */
-       NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
-
-       qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
-
-       tpa_info->state = QEDE_AGG_STATE_NONE;
-
-       return;
-err:
-       tpa_info->state = QEDE_AGG_STATE_NONE;
-       dev_kfree_skb_any(tpa_info->skb);
-       tpa_info->skb = NULL;
-}
-
-static bool qede_tunn_exist(u16 flag)
-{
-       return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
-                         PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
-}
-
-static u8 qede_check_tunn_csum(u16 flag)
-{
-       u16 csum_flag = 0;
-       u8 tcsum = 0;
-
-       if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
-                   PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
-               csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
-                            PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
-
-       if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
-                   PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
-               csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
-                            PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
-               tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
-       }
-
-       csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
-                    PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
-                    PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
-                    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
-
-       if (csum_flag & flag)
-               return QEDE_CSUM_ERROR;
-
-       return QEDE_CSUM_UNNECESSARY | tcsum;
-}
-
-static u8 qede_check_notunn_csum(u16 flag)
-{
-       u16 csum_flag = 0;
-       u8 csum = 0;
-
-       if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
-                   PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
-               csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
-                            PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
-               csum = QEDE_CSUM_UNNECESSARY;
-       }
-
-       csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
-                    PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
-
-       if (csum_flag & flag)
-               return QEDE_CSUM_ERROR;
-
-       return csum;
-}
-
-static u8 qede_check_csum(u16 flag)
-{
-       if (!qede_tunn_exist(flag))
-               return qede_check_notunn_csum(flag);
-       else
-               return qede_check_tunn_csum(flag);
-}
-
-static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
-                                     u16 flag)
-{
-       u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
-
-       if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
-                            ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
-           (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
-                    PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
-               return true;
-
-       return false;
-}
-
-/* Return true iff packet is to be passed to stack */
-static bool qede_rx_xdp(struct qede_dev *edev,
-                       struct qede_fastpath *fp,
-                       struct qede_rx_queue *rxq,
-                       struct bpf_prog *prog,
-                       struct sw_rx_data *bd,
-                       struct eth_fast_path_rx_reg_cqe *cqe)
-{
-       u16 len = le16_to_cpu(cqe->len_on_first_bd);
-       struct xdp_buff xdp;
-       enum xdp_action act;
-
-       xdp.data = page_address(bd->data) + cqe->placement_offset;
-       xdp.data_end = xdp.data + len;
-
-       /* Queues always have a full reset currently, so for the time
-        * being until there's atomic program replace just mark read
-        * side for map helpers.
-        */
-       rcu_read_lock();
-       act = bpf_prog_run_xdp(prog, &xdp);
-       rcu_read_unlock();
-
-       if (act == XDP_PASS)
-               return true;
-
-       /* Count number of packets not to be passed to stack */
-       rxq->xdp_no_pass++;
-
-       switch (act) {
-       case XDP_TX:
-               /* We need the replacement buffer before transmit. */
-               if (qede_alloc_rx_buffer(rxq)) {
-                       qede_recycle_rx_bd_ring(rxq, 1);
-                       return false;
-               }
-
-               /* Now if there's a transmission problem, we'd still have to
-                * throw current buffer, as replacement was already allocated.
-                */
-               if (qede_xdp_xmit(edev, fp, bd, cqe->placement_offset, len)) {
-                       dma_unmap_page(rxq->dev, bd->mapping,
-                                      PAGE_SIZE, DMA_BIDIRECTIONAL);
-                       __free_page(bd->data);
-               }
-
-               /* Regardless, we've consumed an Rx BD */
-               qede_rx_bd_ring_consume(rxq);
-               return false;
-
-       default:
-               bpf_warn_invalid_xdp_action(act);
-       case XDP_ABORTED:
-       case XDP_DROP:
-               qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
-       }
-
-       return false;
-}
-
-static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
-                                           struct qede_rx_queue *rxq,
-                                           struct sw_rx_data *bd, u16 len,
-                                           u16 pad)
-{
-       unsigned int offset = bd->page_offset;
-       struct skb_frag_struct *frag;
-       struct page *page = bd->data;
-       unsigned int pull_len;
-       struct sk_buff *skb;
-       unsigned char *va;
-
-       /* Allocate a new SKB with a sufficient large header len */
-       skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
-       if (unlikely(!skb))
-               return NULL;
-
-       /* Copy data into SKB - if it's small, we can simply copy it and
-        * re-use the already allcoated & mapped memory.
-        */
-       if (len + pad <= edev->rx_copybreak) {
-               memcpy(skb_put(skb, len),
-                      page_address(page) + pad + offset, len);
-               qede_reuse_page(rxq, bd);
-               goto out;
-       }
-
-       frag = &skb_shinfo(skb)->frags[0];
-
-       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
-                       page, pad + offset, len, rxq->rx_buf_seg_size);
-
-       va = skb_frag_address(frag);
-       pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
-
-       /* Align the pull_len to optimize memcpy */
-       memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
-
-       /* Correct the skb & frag sizes offset after the pull */
-       skb_frag_size_sub(frag, pull_len);
-       frag->page_offset += pull_len;
-       skb->data_len -= pull_len;
-       skb->tail += pull_len;
-
-       if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
-               /* Incr page ref count to reuse on allocation failure so
-                * that it doesn't get freed while freeing SKB [as its
-                * already mapped there].
-                */
-               page_ref_inc(page);
-               dev_kfree_skb_any(skb);
-               return NULL;
-       }
-
-out:
-       /* We've consumed the first BD and prepared an SKB */
-       qede_rx_bd_ring_consume(rxq);
-       return skb;
-}
-
-static int qede_rx_build_jumbo(struct qede_dev *edev,
-                              struct qede_rx_queue *rxq,
-                              struct sk_buff *skb,
-                              struct eth_fast_path_rx_reg_cqe *cqe,
-                              u16 first_bd_len)
-{
-       u16 pkt_len = le16_to_cpu(cqe->pkt_len);
-       struct sw_rx_data *bd;
-       u16 bd_cons_idx;
-       u8 num_frags;
-
-       pkt_len -= first_bd_len;
-
-       /* We've already used one BD for the SKB. Now take care of the rest */
-       for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
-               u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
-                   pkt_len;
-
-               if (unlikely(!cur_size)) {
-                       DP_ERR(edev,
-                              "Still got %d BDs for mapping jumbo, but length became 0\n",
-                              num_frags);
-                       goto out;
-               }
-
-               /* We need a replacement buffer for each BD */
-               if (unlikely(qede_alloc_rx_buffer(rxq)))
-                       goto out;
-
-               /* Now that we've allocated the replacement buffer,
-                * we can safely consume the next BD and map it to the SKB.
-                */
-               bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
-               bd = &rxq->sw_rx_ring[bd_cons_idx];
-               qede_rx_bd_ring_consume(rxq);
-
-               dma_unmap_page(rxq->dev, bd->mapping,
-                              PAGE_SIZE, DMA_FROM_DEVICE);
-
-               skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
-                                  bd->data, 0, cur_size);
-
-               skb->truesize += PAGE_SIZE;
-               skb->data_len += cur_size;
-               skb->len += cur_size;
-               pkt_len -= cur_size;
-       }
-
-       if (unlikely(pkt_len))
-               DP_ERR(edev,
-                      "Mapped all BDs of jumbo, but still have %d bytes\n",
-                      pkt_len);
-
-out:
-       return num_frags;
-}
-
-static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
-                                  struct qede_fastpath *fp,
-                                  struct qede_rx_queue *rxq,
-                                  union eth_rx_cqe *cqe,
-                                  enum eth_rx_cqe_type type)
-{
-       switch (type) {
-       case ETH_RX_CQE_TYPE_TPA_START:
-               qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
-               return 0;
-       case ETH_RX_CQE_TYPE_TPA_CONT:
-               qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
-               return 0;
-       case ETH_RX_CQE_TYPE_TPA_END:
-               qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
-               return 1;
-       default:
-               return 0;
-       }
-}
-
-static int qede_rx_process_cqe(struct qede_dev *edev,
-                              struct qede_fastpath *fp,
-                              struct qede_rx_queue *rxq)
-{
-       struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
-       struct eth_fast_path_rx_reg_cqe *fp_cqe;
-       u16 len, pad, bd_cons_idx, parse_flag;
-       enum eth_rx_cqe_type cqe_type;
-       union eth_rx_cqe *cqe;
-       struct sw_rx_data *bd;
-       struct sk_buff *skb;
-       __le16 flags;
-       u8 csum_flag;
-
-       /* Get the CQE from the completion ring */
-       cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
-       cqe_type = cqe->fast_path_regular.type;
-
-       /* Process an unlikely slowpath event */
-       if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
-               struct eth_slow_path_rx_cqe *sp_cqe;
-
-               sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
-               edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
-               return 0;
-       }
-
-       /* Handle TPA cqes */
-       if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
-               return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
-
-       /* Get the data from the SW ring; Consume it only after it's evident
-        * we wouldn't recycle it.
-        */
-       bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
-       bd = &rxq->sw_rx_ring[bd_cons_idx];
-
-       fp_cqe = &cqe->fast_path_regular;
-       len = le16_to_cpu(fp_cqe->len_on_first_bd);
-       pad = fp_cqe->placement_offset;
-
-       /* Run eBPF program if one is attached */
-       if (xdp_prog)
-               if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
-                       return 1;
-
-       /* If this is an error packet then drop it */
-       flags = cqe->fast_path_regular.pars_flags.flags;
-       parse_flag = le16_to_cpu(flags);
-
-       csum_flag = qede_check_csum(parse_flag);
-       if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
-               if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) {
-                       rxq->rx_ip_frags++;
-               } else {
-                       DP_NOTICE(edev,
-                                 "CQE has error, flags = %x, dropping incoming packet\n",
-                                 parse_flag);
-                       rxq->rx_hw_errors++;
-                       qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
-                       return 0;
-               }
-       }
-
-       /* Basic validation passed; Need to prepare an SKB. This would also
-        * guarantee to finally consume the first BD upon success.
-        */
-       skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
-       if (!skb) {
-               rxq->rx_alloc_errors++;
-               qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
-               return 0;
-       }
-
-       /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
-        * by a single cqe.
-        */
-       if (fp_cqe->bd_num > 1) {
-               u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
-                                                        fp_cqe, len);
-
-               if (unlikely(unmapped_frags > 0)) {
-                       qede_recycle_rx_bd_ring(rxq, unmapped_frags);
-                       dev_kfree_skb_any(skb);
-                       return 0;
-               }
-       }
-
-       /* The SKB contains all the data. Now prepare meta-magic */
-       skb->protocol = eth_type_trans(skb, edev->ndev);
-       qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
-       qede_set_skb_csum(skb, csum_flag);
-       skb_record_rx_queue(skb, rxq->rxq_id);
-
-       /* SKB is prepared - pass it to stack */
-       qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
-
-       return 1;
-}
-
-static int qede_rx_int(struct qede_fastpath *fp, int budget)
-{
-       struct qede_rx_queue *rxq = fp->rxq;
-       struct qede_dev *edev = fp->edev;
-       u16 hw_comp_cons, sw_comp_cons;
-       int work_done = 0;
-
-       hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
-       sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
-
-       /* Memory barrier to prevent the CPU from doing speculative reads of CQE
-        * / BD in the while-loop before reading hw_comp_cons. If the CQE is
-        * read before it is written by FW, then FW writes CQE and SB, and then
-        * the CPU reads the hw_comp_cons, it will use an old CQE.
-        */
-       rmb();
-
-       /* Loop to complete all indicated BDs */
-       while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
-               qede_rx_process_cqe(edev, fp, rxq);
-               qed_chain_recycle_consumed(&rxq->rx_comp_ring);
-               sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
-               work_done++;
-       }
-
-       /* Update producers */
-       qede_update_rx_prod(edev, rxq);
-
-       return work_done;
-}
-
-static bool qede_poll_is_more_work(struct qede_fastpath *fp)
-{
-       qed_sb_update_sb_idx(fp->sb_info);
-
-       /* *_has_*_work() reads the status block, thus we need to ensure that
-        * status block indices have been actually read (qed_sb_update_sb_idx)
-        * prior to this check (*_has_*_work) so that we won't write the
-        * "newer" value of the status block to HW (if there was a DMA right
-        * after qede_has_rx_work and if there is no rmb, the memory reading
-        * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
-        * In this case there will never be another interrupt until there is
-        * another update of the status block, while there is still unhandled
-        * work.
-        */
-       rmb();
-
-       if (likely(fp->type & QEDE_FASTPATH_RX))
-               if (qede_has_rx_work(fp->rxq))
-                       return true;
-
-       if (fp->type & QEDE_FASTPATH_XDP)
-               if (qede_txq_has_work(fp->xdp_tx))
-                       return true;
-
-       if (likely(fp->type & QEDE_FASTPATH_TX))
-               if (qede_txq_has_work(fp->txq))
-                       return true;
-
-       return false;
-}
-
-static int qede_poll(struct napi_struct *napi, int budget)
-{
-       struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
-                                               napi);
-       struct qede_dev *edev = fp->edev;
-       int rx_work_done = 0;
-
-       if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
-               qede_tx_int(edev, fp->txq);
-
-       if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
-               qede_xdp_tx_int(edev, fp->xdp_tx);
-
-       rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
-                       qede_has_rx_work(fp->rxq)) ?
-                       qede_rx_int(fp, budget) : 0;
-       if (rx_work_done < budget) {
-               if (!qede_poll_is_more_work(fp)) {
-                       napi_complete(napi);
-
-                       /* Update and reenable interrupts */
-                       qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
-               } else {
-                       rx_work_done = budget;
-               }
-       }
-
-       if (fp->xdp_xmit) {
-               u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
-
-               fp->xdp_xmit = 0;
-               fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
-               qede_update_tx_producer(fp->xdp_tx);
-       }
-
-       return rx_work_done;
-}
-
-static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
-{
-       struct qede_fastpath *fp = fp_cookie;
-
-       qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
-
-       napi_schedule_irqoff(&fp->napi);
-       return IRQ_HANDLED;
-}
-
-/* -------------------------------------------------------------------------
- * END OF FAST-PATH
- * -------------------------------------------------------------------------
- */
-
-static int qede_open(struct net_device *ndev);
-static int qede_close(struct net_device *ndev);
-static int qede_set_mac_addr(struct net_device *ndev, void *p);
-static void qede_set_rx_mode(struct net_device *ndev);
-static void qede_config_rx_mode(struct net_device *ndev);
-
-static int qede_set_ucast_rx_mac(struct qede_dev *edev,
-                                enum qed_filter_xcast_params_type opcode,
-                                unsigned char mac[ETH_ALEN])
-{
-       struct qed_filter_params filter_cmd;
-
-       memset(&filter_cmd, 0, sizeof(filter_cmd));
-       filter_cmd.type = QED_FILTER_TYPE_UCAST;
-       filter_cmd.filter.ucast.type = opcode;
-       filter_cmd.filter.ucast.mac_valid = 1;
-       ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
-
-       return edev->ops->filter_config(edev->cdev, &filter_cmd);
-}
-
-static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
-                                 enum qed_filter_xcast_params_type opcode,
-                                 u16 vid)
-{
-       struct qed_filter_params filter_cmd;
-
-       memset(&filter_cmd, 0, sizeof(filter_cmd));
-       filter_cmd.type = QED_FILTER_TYPE_UCAST;
-       filter_cmd.filter.ucast.type = opcode;
-       filter_cmd.filter.ucast.vlan_valid = 1;
-       filter_cmd.filter.ucast.vlan = vid;
-
-       return edev->ops->filter_config(edev->cdev, &filter_cmd);
-}
-
-void qede_fill_by_demand_stats(struct qede_dev *edev)
-{
-       struct qed_eth_stats stats;
-
-       edev->ops->get_vport_stats(edev->cdev, &stats);
-       edev->stats.no_buff_discards = stats.no_buff_discards;
-       edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
-       edev->stats.ttl0_discard = stats.ttl0_discard;
-       edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
-       edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
-       edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
-       edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
-       edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
-       edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
-       edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
-       edev->stats.mac_filter_discards = stats.mac_filter_discards;
-
-       edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
-       edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
-       edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
-       edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
-       edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
-       edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
-       edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
-       edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
-       edev->stats.coalesced_events = stats.tpa_coalesced_events;
-       edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
-       edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
-       edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
+       edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
+       edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
+       edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
+       edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
+       edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
+       edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
+       edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
+       edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
+       edev->stats.coalesced_events = stats.tpa_coalesced_events;
+       edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
+       edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
+       edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
 
        edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
        edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
@@ -2010,531 +390,120 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
                                stats.tx_4096_to_9216_byte_packets;
        edev->stats.tx_9217_to_16383_byte_packets =
                                stats.tx_9217_to_16383_byte_packets;
-       edev->stats.tx_pause_frames = stats.tx_pause_frames;
-       edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
-       edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
-       edev->stats.tx_total_collisions = stats.tx_total_collisions;
-       edev->stats.brb_truncates = stats.brb_truncates;
-       edev->stats.brb_discards = stats.brb_discards;
-       edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
-}
-
-static
-struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
-                                          struct rtnl_link_stats64 *stats)
-{
-       struct qede_dev *edev = netdev_priv(dev);
-
-       qede_fill_by_demand_stats(edev);
-
-       stats->rx_packets = edev->stats.rx_ucast_pkts +
-                           edev->stats.rx_mcast_pkts +
-                           edev->stats.rx_bcast_pkts;
-       stats->tx_packets = edev->stats.tx_ucast_pkts +
-                           edev->stats.tx_mcast_pkts +
-                           edev->stats.tx_bcast_pkts;
-
-       stats->rx_bytes = edev->stats.rx_ucast_bytes +
-                         edev->stats.rx_mcast_bytes +
-                         edev->stats.rx_bcast_bytes;
-
-       stats->tx_bytes = edev->stats.tx_ucast_bytes +
-                         edev->stats.tx_mcast_bytes +
-                         edev->stats.tx_bcast_bytes;
-
-       stats->tx_errors = edev->stats.tx_err_drop_pkts;
-       stats->multicast = edev->stats.rx_mcast_pkts +
-                          edev->stats.rx_bcast_pkts;
-
-       stats->rx_fifo_errors = edev->stats.no_buff_discards;
-
-       stats->collisions = edev->stats.tx_total_collisions;
-       stats->rx_crc_errors = edev->stats.rx_crc_errors;
-       stats->rx_frame_errors = edev->stats.rx_align_errors;
-
-       return stats;
-}
-
-#ifdef CONFIG_QED_SRIOV
-static int qede_get_vf_config(struct net_device *dev, int vfidx,
-                             struct ifla_vf_info *ivi)
-{
-       struct qede_dev *edev = netdev_priv(dev);
-
-       if (!edev->ops)
-               return -EINVAL;
-
-       return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
-}
-
-static int qede_set_vf_rate(struct net_device *dev, int vfidx,
-                           int min_tx_rate, int max_tx_rate)
-{
-       struct qede_dev *edev = netdev_priv(dev);
-
-       return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
-                                       max_tx_rate);
-}
-
-static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
-{
-       struct qede_dev *edev = netdev_priv(dev);
-
-       if (!edev->ops)
-               return -EINVAL;
-
-       return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
-}
-
-static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
-                                 int link_state)
-{
-       struct qede_dev *edev = netdev_priv(dev);
-
-       if (!edev->ops)
-               return -EINVAL;
-
-       return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
-}
-#endif
-
-static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
-{
-       struct qed_update_vport_params params;
-       int rc;
-
-       /* Proceed only if action actually needs to be performed */
-       if (edev->accept_any_vlan == action)
-               return;
-
-       memset(&params, 0, sizeof(params));
-
-       params.vport_id = 0;
-       params.accept_any_vlan = action;
-       params.update_accept_any_vlan_flg = 1;
-
-       rc = edev->ops->vport_update(edev->cdev, &params);
-       if (rc) {
-               DP_ERR(edev, "Failed to %s accept-any-vlan\n",
-                      action ? "enable" : "disable");
-       } else {
-               DP_INFO(edev, "%s accept-any-vlan\n",
-                       action ? "enabled" : "disabled");
-               edev->accept_any_vlan = action;
-       }
-}
-
-static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
-{
-       struct qede_dev *edev = netdev_priv(dev);
-       struct qede_vlan *vlan, *tmp;
-       int rc = 0;
-
-       DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
-
-       vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
-       if (!vlan) {
-               DP_INFO(edev, "Failed to allocate struct for vlan\n");
-               return -ENOMEM;
-       }
-       INIT_LIST_HEAD(&vlan->list);
-       vlan->vid = vid;
-       vlan->configured = false;
-
-       /* Verify vlan isn't already configured */
-       list_for_each_entry(tmp, &edev->vlan_list, list) {
-               if (tmp->vid == vlan->vid) {
-                       DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
-                                  "vlan already configured\n");
-                       kfree(vlan);
-                       return -EEXIST;
-               }
-       }
-
-       /* If interface is down, cache this VLAN ID and return */
-       __qede_lock(edev);
-       if (edev->state != QEDE_STATE_OPEN) {
-               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
-                          "Interface is down, VLAN %d will be configured when interface is up\n",
-                          vid);
-               if (vid != 0)
-                       edev->non_configured_vlans++;
-               list_add(&vlan->list, &edev->vlan_list);
-               goto out;
-       }
-
-       /* Check for the filter limit.
-        * Note - vlan0 has a reserved filter and can be added without
-        * worrying about quota
-        */
-       if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
-           (vlan->vid == 0)) {
-               rc = qede_set_ucast_rx_vlan(edev,
-                                           QED_FILTER_XCAST_TYPE_ADD,
-                                           vlan->vid);
-               if (rc) {
-                       DP_ERR(edev, "Failed to configure VLAN %d\n",
-                              vlan->vid);
-                       kfree(vlan);
-                       goto out;
-               }
-               vlan->configured = true;
-
-               /* vlan0 filter isn't consuming out of our quota */
-               if (vlan->vid != 0)
-                       edev->configured_vlans++;
-       } else {
-               /* Out of quota; Activate accept-any-VLAN mode */
-               if (!edev->non_configured_vlans)
-                       qede_config_accept_any_vlan(edev, true);
-
-               edev->non_configured_vlans++;
-       }
-
-       list_add(&vlan->list, &edev->vlan_list);
-
-out:
-       __qede_unlock(edev);
-       return rc;
-}
-
-static void qede_del_vlan_from_list(struct qede_dev *edev,
-                                   struct qede_vlan *vlan)
-{
-       /* vlan0 filter isn't consuming out of our quota */
-       if (vlan->vid != 0) {
-               if (vlan->configured)
-                       edev->configured_vlans--;
-               else
-                       edev->non_configured_vlans--;
-       }
-
-       list_del(&vlan->list);
-       kfree(vlan);
-}
-
-static int qede_configure_vlan_filters(struct qede_dev *edev)
-{
-       int rc = 0, real_rc = 0, accept_any_vlan = 0;
-       struct qed_dev_eth_info *dev_info;
-       struct qede_vlan *vlan = NULL;
-
-       if (list_empty(&edev->vlan_list))
-               return 0;
-
-       dev_info = &edev->dev_info;
-
-       /* Configure non-configured vlans */
-       list_for_each_entry(vlan, &edev->vlan_list, list) {
-               if (vlan->configured)
-                       continue;
-
-               /* We have used all our credits, now enable accept_any_vlan */
-               if ((vlan->vid != 0) &&
-                   (edev->configured_vlans == dev_info->num_vlan_filters)) {
-                       accept_any_vlan = 1;
-                       continue;
-               }
-
-               DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
-
-               rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
-                                           vlan->vid);
-               if (rc) {
-                       DP_ERR(edev, "Failed to configure VLAN %u\n",
-                              vlan->vid);
-                       real_rc = rc;
-                       continue;
-               }
-
-               vlan->configured = true;
-               /* vlan0 filter doesn't consume our VLAN filter's quota */
-               if (vlan->vid != 0) {
-                       edev->non_configured_vlans--;
-                       edev->configured_vlans++;
-               }
-       }
-
-       /* enable accept_any_vlan mode if we have more VLANs than credits,
-        * or remove accept_any_vlan mode if we've actually removed
-        * a non-configured vlan, and all remaining vlans are truly configured.
-        */
-
-       if (accept_any_vlan)
-               qede_config_accept_any_vlan(edev, true);
-       else if (!edev->non_configured_vlans)
-               qede_config_accept_any_vlan(edev, false);
-
-       return real_rc;
+       edev->stats.tx_pause_frames = stats.tx_pause_frames;
+       edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
+       edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
+       edev->stats.tx_total_collisions = stats.tx_total_collisions;
+       edev->stats.brb_truncates = stats.brb_truncates;
+       edev->stats.brb_discards = stats.brb_discards;
+       edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
 }
 
-static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+static void qede_get_stats64(struct net_device *dev,
+                            struct rtnl_link_stats64 *stats)
 {
        struct qede_dev *edev = netdev_priv(dev);
-       struct qede_vlan *vlan = NULL;
-       int rc = 0;
 
-       DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
-
-       /* Find whether entry exists */
-       __qede_lock(edev);
-       list_for_each_entry(vlan, &edev->vlan_list, list)
-               if (vlan->vid == vid)
-                       break;
+       qede_fill_by_demand_stats(edev);
 
-       if (!vlan || (vlan->vid != vid)) {
-               DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
-                          "Vlan isn't configured\n");
-               goto out;
-       }
+       stats->rx_packets = edev->stats.rx_ucast_pkts +
+                           edev->stats.rx_mcast_pkts +
+                           edev->stats.rx_bcast_pkts;
+       stats->tx_packets = edev->stats.tx_ucast_pkts +
+                           edev->stats.tx_mcast_pkts +
+                           edev->stats.tx_bcast_pkts;
 
-       if (edev->state != QEDE_STATE_OPEN) {
-               /* As interface is already down, we don't have a VPORT
-                * instance to remove vlan filter. So just update vlan list
-                */
-               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
-                          "Interface is down, removing VLAN from list only\n");
-               qede_del_vlan_from_list(edev, vlan);
-               goto out;
-       }
+       stats->rx_bytes = edev->stats.rx_ucast_bytes +
+                         edev->stats.rx_mcast_bytes +
+                         edev->stats.rx_bcast_bytes;
 
-       /* Remove vlan */
-       if (vlan->configured) {
-               rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
-                                           vid);
-               if (rc) {
-                       DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
-                       goto out;
-               }
-       }
+       stats->tx_bytes = edev->stats.tx_ucast_bytes +
+                         edev->stats.tx_mcast_bytes +
+                         edev->stats.tx_bcast_bytes;
 
-       qede_del_vlan_from_list(edev, vlan);
+       stats->tx_errors = edev->stats.tx_err_drop_pkts;
+       stats->multicast = edev->stats.rx_mcast_pkts +
+                          edev->stats.rx_bcast_pkts;
 
-       /* We have removed a VLAN - try to see if we can
-        * configure non-configured VLAN from the list.
-        */
-       rc = qede_configure_vlan_filters(edev);
+       stats->rx_fifo_errors = edev->stats.no_buff_discards;
 
-out:
-       __qede_unlock(edev);
-       return rc;
+       stats->collisions = edev->stats.tx_total_collisions;
+       stats->rx_crc_errors = edev->stats.rx_crc_errors;
+       stats->rx_frame_errors = edev->stats.rx_align_errors;
 }
 
-static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
+#ifdef CONFIG_QED_SRIOV
+static int qede_get_vf_config(struct net_device *dev, int vfidx,
+                             struct ifla_vf_info *ivi)
 {
-       struct qede_vlan *vlan = NULL;
-
-       if (list_empty(&edev->vlan_list))
-               return;
-
-       list_for_each_entry(vlan, &edev->vlan_list, list) {
-               if (!vlan->configured)
-                       continue;
-
-               vlan->configured = false;
-
-               /* vlan0 filter isn't consuming out of our quota */
-               if (vlan->vid != 0) {
-                       edev->non_configured_vlans++;
-                       edev->configured_vlans--;
-               }
-
-               DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
-                          "marked vlan %d as non-configured\n", vlan->vid);
-       }
+       struct qede_dev *edev = netdev_priv(dev);
 
-       edev->accept_any_vlan = false;
-}
+       if (!edev->ops)
+               return -EINVAL;
 
-static void qede_set_features_reload(struct qede_dev *edev,
-                                    struct qede_reload_args *args)
-{
-       edev->ndev->features = args->u.features;
+       return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
 }
 
-int qede_set_features(struct net_device *dev, netdev_features_t features)
+static int qede_set_vf_rate(struct net_device *dev, int vfidx,
+                           int min_tx_rate, int max_tx_rate)
 {
        struct qede_dev *edev = netdev_priv(dev);
-       netdev_features_t changes = features ^ dev->features;
-       bool need_reload = false;
-
-       /* No action needed if hardware GRO is disabled during driver load */
-       if (changes & NETIF_F_GRO) {
-               if (dev->features & NETIF_F_GRO)
-                       need_reload = !edev->gro_disable;
-               else
-                       need_reload = edev->gro_disable;
-       }
-
-       if (need_reload) {
-               struct qede_reload_args args;
-
-               args.u.features = features;
-               args.func = &qede_set_features_reload;
 
-               /* Make sure that we definitely need to reload.
-                * In case of an eBPF attached program, there will be no FW
-                * aggregations, so no need to actually reload.
-                */
-               __qede_lock(edev);
-               if (edev->xdp_prog)
-                       args.func(edev, &args);
-               else
-                       qede_reload(edev, &args, true);
-               __qede_unlock(edev);
-
-               return 1;
-       }
-
-       return 0;
+       return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
+                                       max_tx_rate);
 }
 
-static void qede_udp_tunnel_add(struct net_device *dev,
-                               struct udp_tunnel_info *ti)
+static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
 {
        struct qede_dev *edev = netdev_priv(dev);
-       u16 t_port = ntohs(ti->port);
-
-       switch (ti->type) {
-       case UDP_TUNNEL_TYPE_VXLAN:
-               if (edev->vxlan_dst_port)
-                       return;
-
-               edev->vxlan_dst_port = t_port;
-
-               DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
-                          t_port);
 
-               set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
-               break;
-       case UDP_TUNNEL_TYPE_GENEVE:
-               if (edev->geneve_dst_port)
-                       return;
-
-               edev->geneve_dst_port = t_port;
-
-               DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
-                          t_port);
-               set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
-               break;
-       default:
-               return;
-       }
+       if (!edev->ops)
+               return -EINVAL;
 
-       schedule_delayed_work(&edev->sp_task, 0);
+       return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
 }
 
-static void qede_udp_tunnel_del(struct net_device *dev,
-                               struct udp_tunnel_info *ti)
+static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
+                                 int link_state)
 {
        struct qede_dev *edev = netdev_priv(dev);
-       u16 t_port = ntohs(ti->port);
 
-       switch (ti->type) {
-       case UDP_TUNNEL_TYPE_VXLAN:
-               if (t_port != edev->vxlan_dst_port)
-                       return;
-
-               edev->vxlan_dst_port = 0;
-
-               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
-                          t_port);
-
-               set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
-               break;
-       case UDP_TUNNEL_TYPE_GENEVE:
-               if (t_port != edev->geneve_dst_port)
-                       return;
-
-               edev->geneve_dst_port = 0;
-
-               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
-                          t_port);
-               set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
-               break;
-       default:
-               return;
-       }
+       if (!edev->ops)
+               return -EINVAL;
 
-       schedule_delayed_work(&edev->sp_task, 0);
+       return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
 }
 
-/* 8B udp header + 8B base tunnel header + 32B option length */
-#define QEDE_MAX_TUN_HDR_LEN 48
-
-static netdev_features_t qede_features_check(struct sk_buff *skb,
-                                            struct net_device *dev,
-                                            netdev_features_t features)
+static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
 {
-       if (skb->encapsulation) {
-               u8 l4_proto = 0;
-
-               switch (vlan_get_protocol(skb)) {
-               case htons(ETH_P_IP):
-                       l4_proto = ip_hdr(skb)->protocol;
-                       break;
-               case htons(ETH_P_IPV6):
-                       l4_proto = ipv6_hdr(skb)->nexthdr;
-                       break;
-               default:
-                       return features;
-               }
+       struct qede_dev *edev = netdev_priv(dev);
 
-               /* Disable offloads for geneve tunnels, as HW can't parse
-                * the geneve header which has option length greater than 32B.
-                */
-               if ((l4_proto == IPPROTO_UDP) &&
-                   ((skb_inner_mac_header(skb) -
-                     skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN))
-                       return features & ~(NETIF_F_CSUM_MASK |
-                                           NETIF_F_GSO_MASK);
-       }
+       if (!edev->ops)
+               return -EINVAL;
 
-       return features;
+       return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
 }
+#endif
 
-static void qede_xdp_reload_func(struct qede_dev *edev,
-                                struct qede_reload_args *args)
+static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
-       struct bpf_prog *old;
-
-       old = xchg(&edev->xdp_prog, args->u.new_prog);
-       if (old)
-               bpf_prog_put(old);
-}
+       struct qede_dev *edev = netdev_priv(dev);
 
-static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
-{
-       struct qede_reload_args args;
+       if (!netif_running(dev))
+               return -EAGAIN;
 
-       if (prog && prog->xdp_adjust_head) {
-               DP_ERR(edev, "Does not support bpf_xdp_adjust_head()\n");
+       switch (cmd) {
+       case SIOCSHWTSTAMP:
+               return qede_ptp_hw_ts(edev, ifr);
+       default:
+               DP_VERBOSE(edev, QED_MSG_DEBUG,
+                          "default IOCTL cmd 0x%x\n", cmd);
                return -EOPNOTSUPP;
        }
 
-       /* If we're called, there was already a bpf reference increment */
-       args.func = &qede_xdp_reload_func;
-       args.u.new_prog = prog;
-       qede_reload(edev, &args, false);
-
        return 0;
 }
 
-static int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
-{
-       struct qede_dev *edev = netdev_priv(dev);
-
-       switch (xdp->command) {
-       case XDP_SETUP_PROG:
-               return qede_xdp_set(edev, xdp->prog);
-       case XDP_QUERY_PROG:
-               xdp->prog_attached = !!edev->xdp_prog;
-               return 0;
-       default:
-               return -EINVAL;
-       }
-}
-
 static const struct net_device_ops qede_netdev_ops = {
        .ndo_open = qede_open,
        .ndo_stop = qede_close,
@@ -2543,9 +512,11 @@ static const struct net_device_ops qede_netdev_ops = {
        .ndo_set_mac_address = qede_set_mac_addr,
        .ndo_validate_addr = eth_validate_addr,
        .ndo_change_mtu = qede_change_mtu,
+       .ndo_do_ioctl = qede_ioctl,
 #ifdef CONFIG_QED_SRIOV
        .ndo_set_vf_mac = qede_set_vf_mac,
        .ndo_set_vf_vlan = qede_set_vf_vlan,
+       .ndo_set_vf_trust = qede_set_vf_trust,
 #endif
        .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
@@ -2814,7 +785,7 @@ static void qede_update_pf_params(struct qed_dev *cdev)
 
        /* 64 rx + 64 tx + 64 XDP */
        memset(&pf_params, 0, sizeof(struct qed_pf_params));
-       pf_params.eth_pf_params.num_cons = 192;
+       pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
        qed_ops->common->update_pf_params(cdev, &pf_params);
 }
 
@@ -2891,6 +862,15 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
 
        edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
 
+       /* PTP not supported on VFs */
+       if (!is_vf) {
+               rc = qede_ptp_register_phc(edev);
+               if (rc) {
+                       DP_NOTICE(edev, "Cannot register PHC\n");
+                       goto err5;
+               }
+       }
+
        edev->ops->register_ops(cdev, &qede_ll_ops, edev);
 
 #ifdef CONFIG_DCB
@@ -2906,6 +886,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
 
        return 0;
 
+err5:
+       unregister_netdev(edev->ndev);
 err4:
        qede_roce_dev_remove(edev);
 err3:
@@ -2957,6 +939,8 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
 
        unregister_netdev(ndev);
 
+       qede_ptp_remove(edev);
+
        qede_roce_dev_remove(edev);
 
        edev->ops->common->set_power_state(cdev, PCI_D0);
@@ -3215,8 +1199,9 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
                goto err;
 
        /* Allocate buffers for the Rx ring */
+       rxq->filled_buffers = 0;
        for (i = 0; i < rxq->num_rx_buffers; i++) {
-               rc = qede_alloc_rx_buffer(rxq);
+               rc = qede_alloc_rx_buffer(rxq, false);
                if (rc) {
                        DP_ERR(edev,
                               "Rx buffers allocation failed at index %d\n", i);
@@ -3564,19 +1549,24 @@ static int qede_stop_txq(struct qede_dev *edev,
 
 static int qede_stop_queues(struct qede_dev *edev)
 {
-       struct qed_update_vport_params vport_update_params;
+       struct qed_update_vport_params *vport_update_params;
        struct qed_dev *cdev = edev->cdev;
        struct qede_fastpath *fp;
        int rc, i;
 
        /* Disable the vport */
-       memset(&vport_update_params, 0, sizeof(vport_update_params));
-       vport_update_params.vport_id = 0;
-       vport_update_params.update_vport_active_flg = 1;
-       vport_update_params.vport_active_flg = 0;
-       vport_update_params.update_rss_flg = 0;
+       vport_update_params = vzalloc(sizeof(*vport_update_params));
+       if (!vport_update_params)
+               return -ENOMEM;
+
+       vport_update_params->vport_id = 0;
+       vport_update_params->update_vport_active_flg = 1;
+       vport_update_params->vport_active_flg = 0;
+       vport_update_params->update_rss_flg = 0;
+
+       rc = edev->ops->vport_update(cdev, vport_update_params);
+       vfree(vport_update_params);
 
-       rc = edev->ops->vport_update(cdev, &vport_update_params);
        if (rc) {
                DP_ERR(edev, "Failed to update vport\n");
                return rc;
@@ -3688,11 +1678,10 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
 {
        int vlan_removal_en = 1;
        struct qed_dev *cdev = edev->cdev;
-       struct qed_update_vport_params vport_update_params;
-       struct qed_queue_start_common_params q_params;
        struct qed_dev_info *qed_info = &edev->dev_info.common;
+       struct qed_update_vport_params *vport_update_params;
+       struct qed_queue_start_common_params q_params;
        struct qed_start_vport_params start = {0};
-       bool reset_rss_indir = false;
        int rc, i;
 
        if (!edev->num_queues) {
@@ -3701,6 +1690,11 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
                return -EINVAL;
        }
 
+       vport_update_params = vzalloc(sizeof(*vport_update_params));
+       if (!vport_update_params)
+               return -ENOMEM;
+
+       start.handle_ptp_pkts = !!(edev->ptp);
        start.gro_enable = !edev->gro_disable;
        start.mtu = edev->ndev->mtu;
        start.vport_id = 0;
@@ -3712,7 +1706,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
 
        if (rc) {
                DP_ERR(edev, "Start V-PORT failed %d\n", rc);
-               return rc;
+               goto out;
        }
 
        DP_VERBOSE(edev, NETIF_MSG_IFUP,
@@ -3748,7 +1742,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
                        if (rc) {
                                DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
                                       rc);
-                               return rc;
+                               goto out;
                        }
 
                        /* Use the return parameters */
@@ -3764,108 +1758,44 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
                if (fp->type & QEDE_FASTPATH_XDP) {
                        rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
                        if (rc)
-                               return rc;
+                               goto out;
 
                        fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
                        if (IS_ERR(fp->rxq->xdp_prog)) {
                                rc = PTR_ERR(fp->rxq->xdp_prog);
                                fp->rxq->xdp_prog = NULL;
-                               return rc;
+                               goto out;
                        }
                }
 
                if (fp->type & QEDE_FASTPATH_TX) {
                        rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
                        if (rc)
-                               return rc;
+                               goto out;
                }
        }
 
        /* Prepare and send the vport enable */
-       memset(&vport_update_params, 0, sizeof(vport_update_params));
-       vport_update_params.vport_id = start.vport_id;
-       vport_update_params.update_vport_active_flg = 1;
-       vport_update_params.vport_active_flg = 1;
+       vport_update_params->vport_id = start.vport_id;
+       vport_update_params->update_vport_active_flg = 1;
+       vport_update_params->vport_active_flg = 1;
 
        if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
            qed_info->tx_switching) {
-               vport_update_params.update_tx_switching_flg = 1;
-               vport_update_params.tx_switching_flg = 1;
+               vport_update_params->update_tx_switching_flg = 1;
+               vport_update_params->tx_switching_flg = 1;
        }
 
-       /* Fill struct with RSS params */
-       if (QEDE_RSS_COUNT(edev) > 1) {
-               vport_update_params.update_rss_flg = 1;
-
-               /* Need to validate current RSS config uses valid entries */
-               for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
-                       if (edev->rss_params.rss_ind_table[i] >=
-                           QEDE_RSS_COUNT(edev)) {
-                               reset_rss_indir = true;
-                               break;
-                       }
-               }
-
-               if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
-                   reset_rss_indir) {
-                       u16 val;
-
-                       for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
-                               u16 indir_val;
-
-                               val = QEDE_RSS_COUNT(edev);
-                               indir_val = ethtool_rxfh_indir_default(i, val);
-                               edev->rss_params.rss_ind_table[i] = indir_val;
-                       }
-                       edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
-               }
-
-               if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
-                       netdev_rss_key_fill(edev->rss_params.rss_key,
-                                           sizeof(edev->rss_params.rss_key));
-                       edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
-               }
-
-               if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
-                       edev->rss_params.rss_caps = QED_RSS_IPV4 |
-                                                   QED_RSS_IPV6 |
-                                                   QED_RSS_IPV4_TCP |
-                                                   QED_RSS_IPV6_TCP;
-                       edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
-               }
-
-               memcpy(&vport_update_params.rss_params, &edev->rss_params,
-                      sizeof(vport_update_params.rss_params));
-       } else {
-               memset(&vport_update_params.rss_params, 0,
-                      sizeof(vport_update_params.rss_params));
-       }
+       qede_fill_rss_params(edev, &vport_update_params->rss_params,
+                            &vport_update_params->update_rss_flg);
 
-       rc = edev->ops->vport_update(cdev, &vport_update_params);
-       if (rc) {
+       rc = edev->ops->vport_update(cdev, vport_update_params);
+       if (rc)
                DP_ERR(edev, "Update V-PORT failed %d\n", rc);
-               return rc;
-       }
-
-       return 0;
-}
-
-static int qede_set_mcast_rx_mac(struct qede_dev *edev,
-                                enum qed_filter_xcast_params_type opcode,
-                                unsigned char *mac, int num_macs)
-{
-       struct qed_filter_params filter_cmd;
-       int i;
-
-       memset(&filter_cmd, 0, sizeof(filter_cmd));
-       filter_cmd.type = QED_FILTER_TYPE_MCAST;
-       filter_cmd.filter.mcast.type = opcode;
-       filter_cmd.filter.mcast.num = num_macs;
-
-       for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
-               ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
 
-       return edev->ops->filter_config(edev->cdev, &filter_cmd);
+out:
+       vfree(vport_update_params);
+       return rc;
 }
 
 enum qede_unload_mode {
@@ -3886,6 +1816,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
        qede_roce_dev_event_close(edev);
        edev->state = QEDE_STATE_CLOSED;
 
+       qede_ptp_stop(edev);
+
        /* Close OS Tx */
        netif_tx_disable(edev->ndev);
        netif_carrier_off(edev->ndev);
@@ -3987,6 +1919,8 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
        qede_roce_dev_event_open(edev);
        qede_link_update(edev, &link_output);
 
+       qede_ptp_start(edev, (mode == QEDE_LOAD_NORMAL));
+
        edev->state = QEDE_STATE_OPEN;
 
        DP_INFO(edev, "Ending successfully qede load\n");
@@ -4097,192 +2031,3 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
                }
        }
 }
-
-static int qede_set_mac_addr(struct net_device *ndev, void *p)
-{
-       struct qede_dev *edev = netdev_priv(ndev);
-       struct sockaddr *addr = p;
-       int rc;
-
-       ASSERT_RTNL(); /* @@@TBD To be removed */
-
-       DP_INFO(edev, "Set_mac_addr called\n");
-
-       if (!is_valid_ether_addr(addr->sa_data)) {
-               DP_NOTICE(edev, "The MAC address is not valid\n");
-               return -EFAULT;
-       }
-
-       if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
-               DP_NOTICE(edev, "qed prevents setting MAC\n");
-               return -EINVAL;
-       }
-
-       ether_addr_copy(ndev->dev_addr, addr->sa_data);
-
-       if (!netif_running(ndev))  {
-               DP_NOTICE(edev, "The device is currently down\n");
-               return 0;
-       }
-
-       /* Remove the previous primary mac */
-       rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
-                                  edev->primary_mac);
-       if (rc)
-               return rc;
-
-       edev->ops->common->update_mac(edev->cdev, addr->sa_data);
-
-       /* Add MAC filter according to the new unicast HW MAC address */
-       ether_addr_copy(edev->primary_mac, ndev->dev_addr);
-       return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
-                                     edev->primary_mac);
-}
-
-static int
-qede_configure_mcast_filtering(struct net_device *ndev,
-                              enum qed_filter_rx_mode_type *accept_flags)
-{
-       struct qede_dev *edev = netdev_priv(ndev);
-       unsigned char *mc_macs, *temp;
-       struct netdev_hw_addr *ha;
-       int rc = 0, mc_count;
-       size_t size;
-
-       size = 64 * ETH_ALEN;
-
-       mc_macs = kzalloc(size, GFP_KERNEL);
-       if (!mc_macs) {
-               DP_NOTICE(edev,
-                         "Failed to allocate memory for multicast MACs\n");
-               rc = -ENOMEM;
-               goto exit;
-       }
-
-       temp = mc_macs;
-
-       /* Remove all previously configured MAC filters */
-       rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
-                                  mc_macs, 1);
-       if (rc)
-               goto exit;
-
-       netif_addr_lock_bh(ndev);
-
-       mc_count = netdev_mc_count(ndev);
-       if (mc_count < 64) {
-               netdev_for_each_mc_addr(ha, ndev) {
-                       ether_addr_copy(temp, ha->addr);
-                       temp += ETH_ALEN;
-               }
-       }
-
-       netif_addr_unlock_bh(ndev);
-
-       /* Check for all multicast @@@TBD resource allocation */
-       if ((ndev->flags & IFF_ALLMULTI) ||
-           (mc_count > 64)) {
-               if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
-                       *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
-       } else {
-               /* Add all multicast MAC filters */
-               rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
-                                          mc_macs, mc_count);
-       }
-
-exit:
-       kfree(mc_macs);
-       return rc;
-}
-
-static void qede_set_rx_mode(struct net_device *ndev)
-{
-       struct qede_dev *edev = netdev_priv(ndev);
-
-       set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
-       schedule_delayed_work(&edev->sp_task, 0);
-}
-
-/* Must be called with qede_lock held */
-static void qede_config_rx_mode(struct net_device *ndev)
-{
-       enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
-       struct qede_dev *edev = netdev_priv(ndev);
-       struct qed_filter_params rx_mode;
-       unsigned char *uc_macs, *temp;
-       struct netdev_hw_addr *ha;
-       int rc, uc_count;
-       size_t size;
-
-       netif_addr_lock_bh(ndev);
-
-       uc_count = netdev_uc_count(ndev);
-       size = uc_count * ETH_ALEN;
-
-       uc_macs = kzalloc(size, GFP_ATOMIC);
-       if (!uc_macs) {
-               DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
-               netif_addr_unlock_bh(ndev);
-               return;
-       }
-
-       temp = uc_macs;
-       netdev_for_each_uc_addr(ha, ndev) {
-               ether_addr_copy(temp, ha->addr);
-               temp += ETH_ALEN;
-       }
-
-       netif_addr_unlock_bh(ndev);
-
-       /* Configure the struct for the Rx mode */
-       memset(&rx_mode, 0, sizeof(struct qed_filter_params));
-       rx_mode.type = QED_FILTER_TYPE_RX_MODE;
-
-       /* Remove all previous unicast secondary macs and multicast macs
-        * (configrue / leave the primary mac)
-        */
-       rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
-                                  edev->primary_mac);
-       if (rc)
-               goto out;
-
-       /* Check for promiscuous */
-       if ((ndev->flags & IFF_PROMISC) ||
-           (uc_count > edev->dev_info.num_mac_filters - 1)) {
-               accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
-       } else {
-               /* Add MAC filters according to the unicast secondary macs */
-               int i;
-
-               temp = uc_macs;
-               for (i = 0; i < uc_count; i++) {
-                       rc = qede_set_ucast_rx_mac(edev,
-                                                  QED_FILTER_XCAST_TYPE_ADD,
-                                                  temp);
-                       if (rc)
-                               goto out;
-
-                       temp += ETH_ALEN;
-               }
-
-               rc = qede_configure_mcast_filtering(ndev, &accept_flags);
-               if (rc)
-                       goto out;
-       }
-
-       /* take care of VLAN mode */
-       if (ndev->flags & IFF_PROMISC) {
-               qede_config_accept_any_vlan(edev, true);
-       } else if (!edev->non_configured_vlans) {
-               /* It's possible that accept_any_vlan mode is set due to a
-                * previous setting of IFF_PROMISC. If vlan credits are
-                * sufficient, disable accept_any_vlan.
-                */
-               qede_config_accept_any_vlan(edev, false);
-       }
-
-       rx_mode.filter.accept_flags = accept_flags;
-       edev->ops->filter_config(edev->cdev, &rx_mode);
-out:
-       kfree(uc_macs);
-}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
new file mode 100644 (file)
index 0000000..2e62dec
--- /dev/null
@@ -0,0 +1,536 @@
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "qede_ptp.h"
+
+struct qede_ptp {
+       const struct qed_eth_ptp_ops    *ops;
+       struct ptp_clock_info           clock_info;
+       struct cyclecounter             cc;
+       struct timecounter              tc;
+       struct ptp_clock                *clock;
+       struct work_struct              work;
+       struct qede_dev                 *edev;
+       struct sk_buff                  *tx_skb;
+
+       /* ptp spinlock is used for protecting the cycle/time counter fields
+        * and, also for serializing the qed PTP API invocations.
+        */
+       spinlock_t                      lock;
+       bool                            hw_ts_ioctl_called;
+       u16                             tx_type;
+       u16                             rx_filter;
+};
+
+/**
+ * qede_ptp_adjfreq
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
+ *
+ * Adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ */
+static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb)
+{
+       struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info);
+       struct qede_dev *edev = ptp->edev;
+       int rc;
+
+       __qede_lock(edev);
+       if (edev->state == QEDE_STATE_OPEN) {
+               spin_lock_bh(&ptp->lock);
+               rc = ptp->ops->adjfreq(edev->cdev, ppb);
+               spin_unlock_bh(&ptp->lock);
+       } else {
+               DP_ERR(edev, "PTP adjfreq called while interface is down\n");
+               rc = -EFAULT;
+       }
+       __qede_unlock(edev);
+
+       return rc;
+}
+
+static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+       struct qede_dev *edev;
+       struct qede_ptp *ptp;
+
+       ptp = container_of(info, struct qede_ptp, clock_info);
+       edev = ptp->edev;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n",
+                  delta);
+
+       spin_lock_bh(&ptp->lock);
+       timecounter_adjtime(&ptp->tc, delta);
+       spin_unlock_bh(&ptp->lock);
+
+       return 0;
+}
+
+static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+       struct qede_dev *edev;
+       struct qede_ptp *ptp;
+       u64 ns;
+
+       ptp = container_of(info, struct qede_ptp, clock_info);
+       edev = ptp->edev;
+
+       spin_lock_bh(&ptp->lock);
+       ns = timecounter_read(&ptp->tc);
+       spin_unlock_bh(&ptp->lock);
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns);
+
+       *ts = ns_to_timespec64(ns);
+
+       return 0;
+}
+
+static int qede_ptp_settime(struct ptp_clock_info *info,
+                           const struct timespec64 *ts)
+{
+       struct qede_dev *edev;
+       struct qede_ptp *ptp;
+       u64 ns;
+
+       ptp = container_of(info, struct qede_ptp, clock_info);
+       edev = ptp->edev;
+
+       ns = timespec64_to_ns(ts);
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns);
+
+       /* Re-init the timecounter */
+       spin_lock_bh(&ptp->lock);
+       timecounter_init(&ptp->tc, &ptp->cc, ns);
+       spin_unlock_bh(&ptp->lock);
+
+       return 0;
+}
+
+/* Enable (or disable) ancillary features of the phc subsystem */
+static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info,
+                                            struct ptp_clock_request *rq,
+                                            int on)
+{
+       struct qede_dev *edev;
+       struct qede_ptp *ptp;
+
+       ptp = container_of(info, struct qede_ptp, clock_info);
+       edev = ptp->edev;
+
+       DP_ERR(edev, "PHC ancillary features are not supported\n");
+
+       return -ENOTSUPP;
+}
+
+static void qede_ptp_task(struct work_struct *work)
+{
+       struct skb_shared_hwtstamps shhwtstamps;
+       struct qede_dev *edev;
+       struct qede_ptp *ptp;
+       u64 timestamp, ns;
+       int rc;
+
+       ptp = container_of(work, struct qede_ptp, work);
+       edev = ptp->edev;
+
+       /* Read Tx timestamp registers */
+       spin_lock_bh(&ptp->lock);
+       rc = ptp->ops->read_tx_ts(edev->cdev, &timestamp);
+       spin_unlock_bh(&ptp->lock);
+       if (rc) {
+               /* Reschedule to keep checking for a valid timestamp value */
+               schedule_work(&ptp->work);
+               return;
+       }
+
+       ns = timecounter_cyc2time(&ptp->tc, timestamp);
+       memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+       shhwtstamps.hwtstamp = ns_to_ktime(ns);
+       skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
+       dev_kfree_skb_any(ptp->tx_skb);
+       ptp->tx_skb = NULL;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                  "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
+                  timestamp, ns);
+}
+
+/* Read the PHC. This API is invoked with ptp_lock held. */
+static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
+{
+       struct qede_dev *edev;
+       struct qede_ptp *ptp;
+       u64 phc_cycles;
+       int rc;
+
+       ptp = container_of(cc, struct qede_ptp, cc);
+       edev = ptp->edev;
+       rc = ptp->ops->read_cc(edev->cdev, &phc_cycles);
+       if (rc)
+               WARN_ONCE(1, "PHC read err %d\n", rc);
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles);
+
+       return phc_cycles;
+}
+
+static void qede_ptp_init_cc(struct qede_dev *edev)
+{
+       struct qede_ptp *ptp;
+
+       ptp = edev->ptp;
+       if (!ptp)
+               return;
+
+       memset(&ptp->cc, 0, sizeof(ptp->cc));
+       ptp->cc.read = qede_ptp_read_cc;
+       ptp->cc.mask = CYCLECOUNTER_MASK(64);
+       ptp->cc.shift = 0;
+       ptp->cc.mult = 1;
+}
+
+static int qede_ptp_cfg_filters(struct qede_dev *edev)
+{
+       struct qede_ptp *ptp = edev->ptp;
+
+       if (!ptp)
+               return -EIO;
+
+       if (!ptp->hw_ts_ioctl_called) {
+               DP_INFO(edev, "TS IOCTL not called\n");
+               return 0;
+       }
+
+       switch (ptp->tx_type) {
+       case HWTSTAMP_TX_ON:
+               edev->flags |= QEDE_TX_TIMESTAMPING_EN;
+               ptp->ops->hwtstamp_tx_on(edev->cdev);
+               break;
+
+       case HWTSTAMP_TX_ONESTEP_SYNC:
+               DP_ERR(edev, "One-step timestamping is not supported\n");
+               return -ERANGE;
+       }
+
+       spin_lock_bh(&ptp->lock);
+       switch (ptp->rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               break;
+       case HWTSTAMP_FILTER_ALL:
+       case HWTSTAMP_FILTER_SOME:
+               ptp->rx_filter = HWTSTAMP_FILTER_NONE;
+               break;
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+               ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+               /* Initialize PTP detection for UDP/IPv4 events */
+               ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4);
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+               ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+               /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
+               ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_IPV4_IPV6);
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+               ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+               /* Initialize PTP detection L2 events */
+               ptp->ops->cfg_rx_filters(edev->cdev, QED_PTP_FILTER_L2);
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+               ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+               /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
+               ptp->ops->cfg_rx_filters(edev->cdev,
+                                        QED_PTP_FILTER_L2_IPV4_IPV6);
+               break;
+       }
+
+       spin_unlock_bh(&ptp->lock);
+
+       return 0;
+}
+
+int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
+{
+       struct hwtstamp_config config;
+       struct qede_ptp *ptp;
+       int rc;
+
+       ptp = edev->ptp;
+       if (!ptp)
+               return -EIO;
+
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                  "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
+                  config.tx_type, config.rx_filter);
+
+       if (config.flags) {
+               DP_ERR(edev, "config.flags is reserved for future use\n");
+               return -EINVAL;
+       }
+
+       ptp->hw_ts_ioctl_called = 1;
+       ptp->tx_type = config.tx_type;
+       ptp->rx_filter = config.rx_filter;
+
+       rc = qede_ptp_cfg_filters(edev);
+       if (rc)
+               return rc;
+
+       config.rx_filter = ptp->rx_filter;
+
+       return copy_to_user(ifr->ifr_data, &config,
+                           sizeof(config)) ? -EFAULT : 0;
+}
+
+/* Called during load, to initialize PTP-related stuff */
+static void qede_ptp_init(struct qede_dev *edev, bool init_tc)
+{
+       struct qede_ptp *ptp;
+       int rc;
+
+       ptp = edev->ptp;
+       if (!ptp)
+               return;
+
+       spin_lock_init(&ptp->lock);
+
+       /* Configure PTP in HW */
+       rc = ptp->ops->enable(edev->cdev);
+       if (rc) {
+               DP_ERR(edev, "Stopping PTP initialization\n");
+               return;
+       }
+
+       /* Init work queue for Tx timestamping */
+       INIT_WORK(&ptp->work, qede_ptp_task);
+
+       /* Init cyclecounter and timecounter. This is done only in the first
+        * load. If done in every load, PTP application will fail when doing
+        * unload / load (e.g. MTU change) while it is running.
+        */
+       if (init_tc) {
+               qede_ptp_init_cc(edev);
+               timecounter_init(&ptp->tc, &ptp->cc,
+                                ktime_to_ns(ktime_get_real()));
+       }
+
+       DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP initialization is successful\n");
+}
+
+void qede_ptp_start(struct qede_dev *edev, bool init_tc)
+{
+       qede_ptp_init(edev, init_tc);
+       qede_ptp_cfg_filters(edev);
+}
+
+void qede_ptp_remove(struct qede_dev *edev)
+{
+       struct qede_ptp *ptp;
+
+       ptp = edev->ptp;
+       if (ptp && ptp->clock) {
+               ptp_clock_unregister(ptp->clock);
+               ptp->clock = NULL;
+       }
+
+       kfree(ptp);
+       edev->ptp = NULL;
+}
+
+int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
+{
+       struct qede_ptp *ptp = edev->ptp;
+
+       if (!ptp)
+               return -EIO;
+
+       info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+                               SOF_TIMESTAMPING_RX_SOFTWARE |
+                               SOF_TIMESTAMPING_SOFTWARE |
+                               SOF_TIMESTAMPING_TX_HARDWARE |
+                               SOF_TIMESTAMPING_RX_HARDWARE |
+                               SOF_TIMESTAMPING_RAW_HARDWARE;
+
+       if (ptp->clock)
+               info->phc_index = ptp_clock_index(ptp->clock);
+       else
+               info->phc_index = -1;
+
+       info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+                          BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+                          BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+
+       info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+       return 0;
+}
+
+/* Called during unload, to stop PTP-related stuff */
+void qede_ptp_stop(struct qede_dev *edev)
+{
+       struct qede_ptp *ptp;
+
+       ptp = edev->ptp;
+       if (!ptp)
+               return;
+
+       /* Cancel PTP work queue. Should be done after the Tx queues are
+        * drained to prevent additional scheduling.
+        */
+       cancel_work_sync(&ptp->work);
+       if (ptp->tx_skb) {
+               dev_kfree_skb_any(ptp->tx_skb);
+               ptp->tx_skb = NULL;
+       }
+
+       /* Disable PTP in HW */
+       spin_lock_bh(&ptp->lock);
+       ptp->ops->disable(edev->cdev);
+       spin_unlock_bh(&ptp->lock);
+}
+
+int qede_ptp_register_phc(struct qede_dev *edev)
+{
+       struct qede_ptp *ptp;
+
+       ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
+       if (!ptp) {
+               DP_INFO(edev, "Failed to allocate struct for PTP\n");
+               return -ENOMEM;
+       }
+
+       ptp->edev = edev;
+       ptp->ops = edev->ops->ptp;
+       if (!ptp->ops) {
+               kfree(ptp);
+               edev->ptp = NULL;
+               DP_ERR(edev, "PTP clock registeration failed\n");
+               return -EIO;
+       }
+
+       edev->ptp = ptp;
+
+       /* Fill the ptp_clock_info struct and register PTP clock */
+       ptp->clock_info.owner = THIS_MODULE;
+       snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
+       ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB;
+       ptp->clock_info.n_alarm = 0;
+       ptp->clock_info.n_ext_ts = 0;
+       ptp->clock_info.n_per_out = 0;
+       ptp->clock_info.pps = 0;
+       ptp->clock_info.adjfreq = qede_ptp_adjfreq;
+       ptp->clock_info.adjtime = qede_ptp_adjtime;
+       ptp->clock_info.gettime64 = qede_ptp_gettime;
+       ptp->clock_info.settime64 = qede_ptp_settime;
+       ptp->clock_info.enable = qede_ptp_ancillary_feature_enable;
+
+       ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
+       if (IS_ERR(ptp->clock)) {
+               ptp->clock = NULL;
+               kfree(ptp);
+               edev->ptp = NULL;
+               DP_ERR(edev, "PTP clock registeration failed\n");
+       }
+
+       return 0;
+}
+
+void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
+{
+       struct qede_ptp *ptp;
+
+       ptp = edev->ptp;
+       if (!ptp)
+               return;
+
+       if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) {
+               DP_NOTICE(edev,
+                         "Tx timestamping was not enabled, this packet will not be timestamped\n");
+       } else if (unlikely(ptp->tx_skb)) {
+               DP_NOTICE(edev,
+                         "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
+       } else {
+               skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+               /* schedule check for Tx timestamp */
+               ptp->tx_skb = skb_get(skb);
+               schedule_work(&ptp->work);
+       }
+}
+
+void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb)
+{
+       struct qede_ptp *ptp;
+       u64 timestamp, ns;
+       int rc;
+
+       ptp = edev->ptp;
+       if (!ptp)
+               return;
+
+       spin_lock_bh(&ptp->lock);
+       rc = ptp->ops->read_rx_ts(edev->cdev, &timestamp);
+       if (rc) {
+               spin_unlock_bh(&ptp->lock);
+               DP_INFO(edev, "Invalid Rx timestamp\n");
+               return;
+       }
+
+       ns = timecounter_cyc2time(&ptp->tc, timestamp);
+       spin_unlock_bh(&ptp->lock);
+       skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+       DP_VERBOSE(edev, QED_MSG_DEBUG,
+                  "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
+                  timestamp, ns);
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
new file mode 100644 (file)
index 0000000..f328f9b
--- /dev/null
@@ -0,0 +1,65 @@
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QEDE_PTP_H_
+#define _QEDE_PTP_H_
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/timecounter.h>
+#include "qede.h"
+
+void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
+void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
+int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
+void qede_ptp_start(struct qede_dev *edev, bool init_tc);
+void qede_ptp_stop(struct qede_dev *edev);
+void qede_ptp_remove(struct qede_dev *edev);
+int qede_ptp_register_phc(struct qede_dev *edev);
+int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
+
+static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
+                                        union eth_rx_cqe *cqe,
+                                        struct sk_buff *skb)
+{
+       /* Check if this packet was timestamped */
+       if (unlikely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) &
+                    (1 << PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT))) {
+               if (likely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags)
+                   & (1 << PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT))) {
+                       qede_ptp_rx_ts(edev, skb);
+               } else {
+                       DP_INFO(edev,
+                               "Timestamp recorded for non PTP packets\n");
+               }
+       }
+}
+#endif /* _QEDE_PTP_H_ */
index 49272716a7c495e9767daa9d5dfb217206eb8744..f00657ce7c8faac1681a75a4c89ede3abc468719 100644 (file)
@@ -1,5 +1,5 @@
 /* QLogic qedr NIC Driver
- * Copyright (c) 2015-2016  QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 5c100ab86c0046ec72a011e5f806d6557ac7482c..ea38236f1ced5c2d40c5bb344defc32142e7842e 100644 (file)
@@ -2025,7 +2025,7 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
        skb_checksum_none_assert(skb);
        skb->protocol = eth_type_trans(skb, qdev->ndev);
 
-       netif_receive_skb(skb);
+       napi_gro_receive(&qdev->napi, skb);
        lrg_buf_cb2->skb = NULL;
 
        if (qdev->device_id == QL3022_DEVICE_ID)
@@ -2095,7 +2095,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
        }
        skb2->protocol = eth_type_trans(skb2, qdev->ndev);
 
-       netif_receive_skb(skb2);
+       napi_gro_receive(&qdev->napi, skb2);
        ndev->stats.rx_packets++;
        ndev->stats.rx_bytes += length;
        lrg_buf_cb2->skb = NULL;
@@ -2105,8 +2105,7 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
        ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
 }
 
-static int ql_tx_rx_clean(struct ql3_adapter *qdev,
-                         int *tx_cleaned, int *rx_cleaned, int work_to_do)
+static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget)
 {
        struct net_rsp_iocb *net_rsp;
        struct net_device *ndev = qdev->ndev;
@@ -2114,7 +2113,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
 
        /* While there are entries in the completion queue. */
        while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
-               qdev->rsp_consumer_index) && (work_done < work_to_do)) {
+               qdev->rsp_consumer_index) && (work_done < budget)) {
 
                net_rsp = qdev->rsp_current;
                rmb();
@@ -2130,21 +2129,20 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
                case OPCODE_OB_MAC_IOCB_FN2:
                        ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
                                               net_rsp);
-                       (*tx_cleaned)++;
                        break;
 
                case OPCODE_IB_MAC_IOCB:
                case OPCODE_IB_3032_MAC_IOCB:
                        ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
                                               net_rsp);
-                       (*rx_cleaned)++;
+                       work_done++;
                        break;
 
                case OPCODE_IB_IP_IOCB:
                case OPCODE_IB_3032_IP_IOCB:
                        ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
                                                 net_rsp);
-                       (*rx_cleaned)++;
+                       work_done++;
                        break;
                default: {
                        u32 *tmp = (u32 *)net_rsp;
@@ -2169,7 +2167,6 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
                        qdev->rsp_current++;
                }
 
-               work_done = *tx_cleaned + *rx_cleaned;
        }
 
        return work_done;
@@ -2178,25 +2175,25 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
 static int ql_poll(struct napi_struct *napi, int budget)
 {
        struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
-       int rx_cleaned = 0, tx_cleaned = 0;
-       unsigned long hw_flags;
        struct ql3xxx_port_registers __iomem *port_regs =
                qdev->mem_map_registers;
+       int work_done;
 
-       ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget);
+       work_done = ql_tx_rx_clean(qdev, budget);
 
-       if (tx_cleaned + rx_cleaned != budget) {
-               spin_lock_irqsave(&qdev->hw_lock, hw_flags);
-               __napi_complete(napi);
+       if (work_done < budget && napi_complete_done(napi, work_done)) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&qdev->hw_lock, flags);
                ql_update_small_bufq_prod_index(qdev);
                ql_update_lrg_bufq_prod_index(qdev);
                writel(qdev->rsp_consumer_index,
                            &port_regs->CommonRegs.rspQConsumerIndex);
-               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+               spin_unlock_irqrestore(&qdev->hw_lock, flags);
 
                ql_enable_interrupts(qdev);
        }
-       return tx_cleaned + rx_cleaned;
+       return work_done;
 }
 
 static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
index fedd7366713cf04da3ceb7fff1704d74fb3e26fe..84dd83031a1bfcc31c0f8a908fef0c1bb3e7d155 100644 (file)
@@ -975,7 +975,7 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
                work_done = budget;
 
        if (work_done < budget) {
-               napi_complete(&sds_ring->napi);
+               napi_complete_done(&sds_ring->napi, work_done);
                if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
                        qlcnic_enable_sds_intr(adapter, sds_ring);
                        qlcnic_enable_tx_intr(adapter, tx_ring);
@@ -1019,7 +1019,7 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
        work_done = qlcnic_process_rcv_ring(sds_ring, budget);
 
        if (work_done < budget) {
-               napi_complete(&sds_ring->napi);
+               napi_complete_done(&sds_ring->napi, work_done);
                if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
                        qlcnic_enable_sds_intr(adapter, sds_ring);
        }
@@ -1966,7 +1966,7 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
                work_done = budget;
 
        if (work_done < budget) {
-               napi_complete(&sds_ring->napi);
+               napi_complete_done(&sds_ring->napi, work_done);
                qlcnic_enable_sds_intr(adapter, sds_ring);
        }
 
@@ -1994,7 +1994,7 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
                work_done = budget;
 
        if (work_done < budget) {
-               napi_complete(&sds_ring->napi);
+               napi_complete_done(&sds_ring->napi, work_done);
                qlcnic_enable_sds_intr(adapter, sds_ring);
        }
 
@@ -2032,7 +2032,7 @@ static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
        adapter = sds_ring->adapter;
        work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
        if (work_done < budget) {
-               napi_complete(&sds_ring->napi);
+               napi_complete_done(&sds_ring->napi, work_done);
                if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
                        qlcnic_enable_sds_intr(adapter, sds_ring);
        }
index 4c0cce962585f61b61a43d780b0cbe64217de02e..b6628aaa6e4a45a8eaecd9d5fc4ea8136d7a07af 100644 (file)
@@ -4220,7 +4220,7 @@ recheck:
        if (dev == NULL)
                goto done;
 
-       if (dev->priv_flags & IFF_802_1Q_VLAN) {
+       if (is_vlan_dev(dev)) {
                dev = vlan_dev_real_dev(dev);
                goto recheck;
        }
@@ -4256,7 +4256,7 @@ recheck:
        if (dev == NULL)
                goto done;
 
-       if (dev->priv_flags & IFF_802_1Q_VLAN) {
+       if (is_vlan_dev(dev)) {
                dev = vlan_dev_real_dev(dev);
                goto recheck;
        }
index 1409412ab39da7a7a58e3100fdd8461b8b87992b..e9e647072596d5c6e3c6206fb003b66bfe3cdb81 100644 (file)
@@ -2334,7 +2334,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
        }
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                ql_enable_completion_interrupt(qdev, rx_ring->irq);
        }
        return work_done;
index 7a6687982daecd0c7f570a4ffe549647c139936c..fc57cedf4c0ccf9bb429e717762b1e952e9421e4 100644 (file)
@@ -4,6 +4,6 @@
 
 obj-$(CONFIG_QCOM_EMAC) += qcom-emac.o
 
-qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o \
+qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o emac-ethtool.o \
                  emac-sgmii-fsm9900.o emac-sgmii-qdf2432.o \
                  emac-sgmii-qdf2400.o
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
new file mode 100644 (file)
index 0000000..bbe2463
--- /dev/null
@@ -0,0 +1,261 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include "emac.h"
+
+static const char * const emac_ethtool_stat_strings[] = {
+       "rx_ok",
+       "rx_bcast",
+       "rx_mcast",
+       "rx_pause",
+       "rx_ctrl",
+       "rx_fcs_err",
+       "rx_len_err",
+       "rx_byte_cnt",
+       "rx_runt",
+       "rx_frag",
+       "rx_sz_64",
+       "rx_sz_65_127",
+       "rx_sz_128_255",
+       "rx_sz_256_511",
+       "rx_sz_512_1023",
+       "rx_sz_1024_1518",
+       "rx_sz_1519_max",
+       "rx_sz_ov",
+       "rx_rxf_ov",
+       "rx_align_err",
+       "rx_bcast_byte_cnt",
+       "rx_mcast_byte_cnt",
+       "rx_err_addr",
+       "rx_crc_align",
+       "rx_jabbers",
+       "tx_ok",
+       "tx_bcast",
+       "tx_mcast",
+       "tx_pause",
+       "tx_exc_defer",
+       "tx_ctrl",
+       "tx_defer",
+       "tx_byte_cnt",
+       "tx_sz_64",
+       "tx_sz_65_127",
+       "tx_sz_128_255",
+       "tx_sz_256_511",
+       "tx_sz_512_1023",
+       "tx_sz_1024_1518",
+       "tx_sz_1519_max",
+       "tx_1_col",
+       "tx_2_col",
+       "tx_late_col",
+       "tx_abort_col",
+       "tx_underrun",
+       "tx_rd_eop",
+       "tx_len_err",
+       "tx_trunc",
+       "tx_bcast_byte",
+       "tx_mcast_byte",
+       "tx_col",
+};
+
+#define EMAC_STATS_LEN ARRAY_SIZE(emac_ethtool_stat_strings)
+
+static u32 emac_get_msglevel(struct net_device *netdev)
+{
+       struct emac_adapter *adpt = netdev_priv(netdev);
+
+       return adpt->msg_enable;
+}
+
+static void emac_set_msglevel(struct net_device *netdev, u32 data)
+{
+       struct emac_adapter *adpt = netdev_priv(netdev);
+
+       adpt->msg_enable = data;
+}
+
+static int emac_get_sset_count(struct net_device *netdev, int sset)
+{
+       switch (sset) {
+       case ETH_SS_STATS:
+               return EMAC_STATS_LEN;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+       unsigned int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < EMAC_STATS_LEN; i++) {
+                       strlcpy(data, emac_ethtool_stat_strings[i],
+                               ETH_GSTRING_LEN);
+                       data += ETH_GSTRING_LEN;
+               }
+               break;
+       }
+}
+
+static void emac_get_ethtool_stats(struct net_device *netdev,
+                                  struct ethtool_stats *stats,
+                                  u64 *data)
+{
+       struct emac_adapter *adpt = netdev_priv(netdev);
+
+       spin_lock(&adpt->stats.lock);
+
+       emac_update_hw_stats(adpt);
+       memcpy(data, &adpt->stats, EMAC_STATS_LEN * sizeof(u64));
+
+       spin_unlock(&adpt->stats.lock);
+}
+
+static int emac_nway_reset(struct net_device *netdev)
+{
+       struct phy_device *phydev = netdev->phydev;
+
+       if (!phydev)
+               return -ENODEV;
+
+       return genphy_restart_aneg(phydev);
+}
+
+static void emac_get_ringparam(struct net_device *netdev,
+                              struct ethtool_ringparam *ring)
+{
+       struct emac_adapter *adpt = netdev_priv(netdev);
+
+       ring->rx_max_pending = EMAC_MAX_RX_DESCS;
+       ring->tx_max_pending = EMAC_MAX_TX_DESCS;
+       ring->rx_pending = adpt->rx_desc_cnt;
+       ring->tx_pending = adpt->tx_desc_cnt;
+}
+
+static int emac_set_ringparam(struct net_device *netdev,
+                             struct ethtool_ringparam *ring)
+{
+       struct emac_adapter *adpt = netdev_priv(netdev);
+
+       /* We don't have separate queues/rings for small/large frames, so
+        * reject any attempt to specify those values separately.
+        */
+       if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+               return -EINVAL;
+
+       adpt->tx_desc_cnt =
+               clamp_val(ring->tx_pending, EMAC_MIN_TX_DESCS, EMAC_MAX_TX_DESCS);
+
+       adpt->rx_desc_cnt =
+               clamp_val(ring->rx_pending, EMAC_MIN_RX_DESCS, EMAC_MAX_RX_DESCS);
+
+       if (netif_running(netdev))
+               return emac_reinit_locked(adpt);
+
+       return 0;
+}
+
+static void emac_get_pauseparam(struct net_device *netdev,
+                               struct ethtool_pauseparam *pause)
+{
+       struct emac_adapter *adpt = netdev_priv(netdev);
+
+       pause->autoneg = adpt->automatic ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+       pause->rx_pause = adpt->rx_flow_control ? 1 : 0;
+       pause->tx_pause = adpt->tx_flow_control ? 1 : 0;
+}
+
+static int emac_set_pauseparam(struct net_device *netdev,
+                              struct ethtool_pauseparam *pause)
+{
+       struct emac_adapter *adpt = netdev_priv(netdev);
+
+       adpt->automatic = pause->autoneg == AUTONEG_ENABLE;
+       adpt->rx_flow_control = pause->rx_pause != 0;
+       adpt->tx_flow_control = pause->tx_pause != 0;
+
+       if (netif_running(netdev))
+               return emac_reinit_locked(adpt);
+
+       return 0;
+}
+
+/* Selected registers that might want to track during runtime. */
+static const u16 emac_regs[] = {
+       EMAC_DMA_MAS_CTRL,
+       EMAC_MAC_CTRL,
+       EMAC_TXQ_CTRL_0,
+       EMAC_RXQ_CTRL_0,
+       EMAC_DMA_CTRL,
+       EMAC_INT_MASK,
+       EMAC_AXI_MAST_CTRL,
+       EMAC_CORE_HW_VERSION,
+       EMAC_MISC_CTRL,
+};
+
+/* Every time emac_regs[] above is changed, increase this version number. */
+#define EMAC_REGS_VERSION      0
+
+#define EMAC_MAX_REG_SIZE      ARRAY_SIZE(emac_regs)
+
+static void emac_get_regs(struct net_device *netdev,
+                         struct ethtool_regs *regs, void *buff)
+{
+       struct emac_adapter *adpt = netdev_priv(netdev);
+       u32 *val = buff;
+       unsigned int i;
+
+       regs->version = EMAC_REGS_VERSION;
+       regs->len = EMAC_MAX_REG_SIZE * sizeof(u32);
+
+       for (i = 0; i < EMAC_MAX_REG_SIZE; i++)
+               val[i] = readl(adpt->base + emac_regs[i]);
+}
+
+static int emac_get_regs_len(struct net_device *netdev)
+{
+       return EMAC_MAX_REG_SIZE * sizeof(u32);
+}
+
+static const struct ethtool_ops emac_ethtool_ops = {
+       .get_link_ksettings = phy_ethtool_get_link_ksettings,
+       .set_link_ksettings = phy_ethtool_set_link_ksettings,
+
+       .get_msglevel    = emac_get_msglevel,
+       .set_msglevel    = emac_set_msglevel,
+
+       .get_sset_count  = emac_get_sset_count,
+       .get_strings = emac_get_strings,
+       .get_ethtool_stats = emac_get_ethtool_stats,
+
+       .get_ringparam = emac_get_ringparam,
+       .set_ringparam = emac_set_ringparam,
+
+       .get_pauseparam = emac_get_pauseparam,
+       .set_pauseparam = emac_set_pauseparam,
+
+       .nway_reset = emac_nway_reset,
+
+       .get_link = ethtool_op_get_link,
+
+       .get_regs_len    = emac_get_regs_len,
+       .get_regs        = emac_get_regs,
+};
+
+void emac_set_ethtool_ops(struct net_device *netdev)
+{
+       netdev->ethtool_ops = &emac_ethtool_ops;
+}
index 0b4deb31e742fc4d48a2f91e865d7d750e0023c2..cc065ffbe4b5584a6498237d1e4a929ff1d6ebd0 100644 (file)
 #include "emac.h"
 #include "emac-sgmii.h"
 
-/* EMAC base register offsets */
-#define EMAC_MAC_CTRL                  0x001480
-#define EMAC_WOL_CTRL0                 0x0014a0
-#define EMAC_RSS_KEY0                  0x0014b0
-#define EMAC_H1TPD_BASE_ADDR_LO                0x0014e0
-#define EMAC_H2TPD_BASE_ADDR_LO                0x0014e4
-#define EMAC_H3TPD_BASE_ADDR_LO                0x0014e8
-#define EMAC_INTER_SRAM_PART9          0x001534
-#define EMAC_DESC_CTRL_0               0x001540
-#define EMAC_DESC_CTRL_1               0x001544
-#define EMAC_DESC_CTRL_2               0x001550
-#define EMAC_DESC_CTRL_10              0x001554
-#define EMAC_DESC_CTRL_12              0x001558
-#define EMAC_DESC_CTRL_13              0x00155c
-#define EMAC_DESC_CTRL_3               0x001560
-#define EMAC_DESC_CTRL_4               0x001564
-#define EMAC_DESC_CTRL_5               0x001568
-#define EMAC_DESC_CTRL_14              0x00156c
-#define EMAC_DESC_CTRL_15              0x001570
-#define EMAC_DESC_CTRL_16              0x001574
-#define EMAC_DESC_CTRL_6               0x001578
-#define EMAC_DESC_CTRL_8               0x001580
-#define EMAC_DESC_CTRL_9               0x001584
-#define EMAC_DESC_CTRL_11              0x001588
-#define EMAC_TXQ_CTRL_0                        0x001590
-#define EMAC_TXQ_CTRL_1                        0x001594
-#define EMAC_TXQ_CTRL_2                        0x001598
-#define EMAC_RXQ_CTRL_0                        0x0015a0
-#define EMAC_RXQ_CTRL_1                        0x0015a4
-#define EMAC_RXQ_CTRL_2                        0x0015a8
-#define EMAC_RXQ_CTRL_3                        0x0015ac
-#define EMAC_BASE_CPU_NUMBER           0x0015b8
-#define EMAC_DMA_CTRL                  0x0015c0
-#define EMAC_MAILBOX_0                 0x0015e0
-#define EMAC_MAILBOX_5                 0x0015e4
-#define EMAC_MAILBOX_6                 0x0015e8
-#define EMAC_MAILBOX_13                        0x0015ec
-#define EMAC_MAILBOX_2                 0x0015f4
-#define EMAC_MAILBOX_3                 0x0015f8
-#define EMAC_MAILBOX_11                        0x00160c
-#define EMAC_AXI_MAST_CTRL             0x001610
-#define EMAC_MAILBOX_12                        0x001614
-#define EMAC_MAILBOX_9                 0x001618
-#define EMAC_MAILBOX_10                        0x00161c
-#define EMAC_ATHR_HEADER_CTRL          0x001620
-#define EMAC_CLK_GATE_CTRL             0x001814
-#define EMAC_MISC_CTRL                 0x001990
-#define EMAC_MAILBOX_7                 0x0019e0
-#define EMAC_MAILBOX_8                 0x0019e4
-#define EMAC_MAILBOX_15                        0x001bd4
-#define EMAC_MAILBOX_16                        0x001bd8
-
 /* EMAC_MAC_CTRL */
 #define SINGLE_PAUSE_MODE              0x10000000
 #define DEBUG_MODE                      0x08000000
 #define RXEN                            0x00000002
 #define TXEN                            0x00000001
 
-
-/* EMAC_WOL_CTRL0 */
-#define LK_CHG_PME                     0x20
-#define LK_CHG_EN                      0x10
-#define MG_FRAME_PME                   0x8
-#define MG_FRAME_EN                    0x4
-#define WK_FRAME_EN                    0x1
-
 /* EMAC_DESC_CTRL_3 */
 #define RFD_RING_SIZE_BMSK                                       0xfff
 
@@ -314,8 +254,6 @@ struct emac_skb_cb {
        RX_PKT_INT2     |\
        RX_PKT_INT3)
 
-#define EMAC_MAC_IRQ_RES                                       "core0"
-
 void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr)
 {
        u32 crc32, bit, reg, mta;
@@ -558,7 +496,7 @@ void emac_mac_reset(struct emac_adapter *adpt)
        emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN);
 }
 
-void emac_mac_start(struct emac_adapter *adpt)
+static void emac_mac_start(struct emac_adapter *adpt)
 {
        struct phy_device *phydev = adpt->phydev;
        u32 mac, csr1;
@@ -575,11 +513,19 @@ void emac_mac_start(struct emac_adapter *adpt)
 
        mac |= TXEN | RXEN;     /* enable RX/TX */
 
-       /* Configure MAC flow control to match the PHY's settings. */
-       if (phydev->pause)
-               mac |= RXFC;
-       if (phydev->pause != phydev->asym_pause)
-               mac |= TXFC;
+       /* Configure MAC flow control. If set to automatic, then match
+        * whatever the PHY does. Otherwise, enable or disable it, depending
+        * on what the user configured via ethtool.
+        */
+       mac &= ~(RXFC | TXFC);
+
+       if (adpt->automatic) {
+               /* If it's set to automatic, then update our local values */
+               adpt->rx_flow_control = phydev->pause;
+               adpt->tx_flow_control = phydev->pause != phydev->asym_pause;
+       }
+       mac |= adpt->rx_flow_control ? RXFC : 0;
+       mac |= adpt->tx_flow_control ? TXFC : 0;
 
        /* setup link speed */
        mac &= ~SPEED_MASK;
@@ -621,8 +567,6 @@ void emac_mac_start(struct emac_adapter *adpt)
 
        emac_reg_update32(adpt->base + EMAC_ATHR_HEADER_CTRL,
                          (HEADER_ENABLE | HEADER_CNT_EN), 0);
-
-       emac_reg_update32(adpt->csr + EMAC_EMAC_WRAPPER_CSR2, 0, WOL_EN);
 }
 
 void emac_mac_stop(struct emac_adapter *adpt)
@@ -963,12 +907,16 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
 static void emac_adjust_link(struct net_device *netdev)
 {
        struct emac_adapter *adpt = netdev_priv(netdev);
+       struct emac_sgmii *sgmii = &adpt->phy;
        struct phy_device *phydev = netdev->phydev;
 
-       if (phydev->link)
+       if (phydev->link) {
                emac_mac_start(adpt);
-       else
+               sgmii->link_up(adpt);
+       } else {
+               sgmii->link_down(adpt);
                emac_mac_stop(adpt);
+       }
 
        phy_print_status(phydev);
 }
@@ -977,40 +925,26 @@ static void emac_adjust_link(struct net_device *netdev)
 int emac_mac_up(struct emac_adapter *adpt)
 {
        struct net_device *netdev = adpt->netdev;
-       struct emac_irq *irq = &adpt->irq;
        int ret;
 
        emac_mac_rx_tx_ring_reset_all(adpt);
        emac_mac_config(adpt);
-
-       ret = request_irq(irq->irq, emac_isr, 0, EMAC_MAC_IRQ_RES, irq);
-       if (ret) {
-               netdev_err(adpt->netdev, "could not request %s irq\n",
-                          EMAC_MAC_IRQ_RES);
-               return ret;
-       }
-
        emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
 
+       adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
        ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
                                 PHY_INTERFACE_MODE_SGMII);
        if (ret) {
                netdev_err(adpt->netdev, "could not connect phy\n");
-               free_irq(irq->irq, irq);
                return ret;
        }
 
+       phy_attached_print(adpt->phydev, NULL);
+
        /* enable mac irq */
        writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS);
        writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK);
 
-       /* Enable pause frames.  Without this feature, the EMAC has been shown
-        * to receive (and drop) frames with FCS errors at gigabit connections.
-        */
-       adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-       adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-
-       adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
        phy_start(adpt->phydev);
 
        napi_enable(&adpt->rx_q.napi);
@@ -1036,7 +970,6 @@ void emac_mac_down(struct emac_adapter *adpt)
        writel(DIS_INT, adpt->base + EMAC_INT_STATUS);
        writel(0, adpt->base + EMAC_INT_MASK);
        synchronize_irq(adpt->irq.irq);
-       free_irq(adpt->irq.irq, &adpt->irq);
 
        phy_disconnect(adpt->phydev);
 
@@ -1213,7 +1146,6 @@ void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
                emac_receive_skb(rx_q, skb, (u16)RRD_CVALN_TAG(&rrd),
                                 (bool)RRD_CVTAG(&rrd));
 
-               netdev->last_rx = jiffies;
                (*num_pkts)++;
        } while (*num_pkts < max_pkts);
 
index f3aa24dc4a292003bbdf99b632f340d8f40e75ea..5028fb4bec2ba6cfea9fc49696dcbef1707783df 100644 (file)
@@ -230,7 +230,6 @@ struct emac_adapter;
 int  emac_mac_up(struct emac_adapter *adpt);
 void emac_mac_down(struct emac_adapter *adpt);
 void emac_mac_reset(struct emac_adapter *adpt);
-void emac_mac_start(struct emac_adapter *adpt);
 void emac_mac_stop(struct emac_adapter *adpt);
 void emac_mac_mode_config(struct emac_adapter *adpt);
 void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
index 2851b4c5657049600c6df2c9c9fa90649cbb832a..441c1936648993fa394e5c676b7bd9957e52efa3 100644 (file)
@@ -22,8 +22,6 @@
 #include <linux/acpi.h>
 #include "emac.h"
 #include "emac-mac.h"
-#include "emac-phy.h"
-#include "emac-sgmii.h"
 
 /* EMAC base register offsets */
 #define EMAC_MDIO_CTRL                                        0x001414
@@ -228,8 +226,5 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
                return -ENODEV;
        }
 
-       if (adpt->phydev->drv)
-               phy_attached_print(adpt->phydev, NULL);
-
        return 0;
 }
index 49f3701a6dd71314f332c64314fd6610fdbbc747..c0c301c721295dc364320ed7fa28fb142e336bdd 100644 (file)
 #ifndef _EMAC_PHY_H_
 #define _EMAC_PHY_H_
 
-typedef int (*emac_sgmii_initialize)(struct emac_adapter *adpt);
-
-/** emac_phy - internal emac phy
- * @base base address
- * @digital per-lane digital block
- * @initialize initialization function
- */
-struct emac_phy {
-       void __iomem            *base;
-       void __iomem            *digital;
-       emac_sgmii_initialize   initialize;
-};
-
 struct emac_adapter;
 
 int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt);
index af690e1a6e7b828ac11b42f328ccfd5b9815f65b..10de8d0d9a56e42f6dd9d6e746ab315d4674e787 100644 (file)
@@ -214,7 +214,7 @@ static const struct emac_reg_write tx_rx_setting[] = {
 
 int emac_sgmii_init_fsm9900(struct emac_adapter *adpt)
 {
-       struct emac_phy *phy = &adpt->phy;
+       struct emac_sgmii *phy = &adpt->phy;
        unsigned int i;
 
        emac_reg_write_all(phy->base, physical_coding_sublayer_programming,
index 5b8419498ef16b984a45e060b6e7b54d999621ec..f62c215be779853ad76cf71f02b09dc4f7d62a1a 100644 (file)
@@ -174,7 +174,7 @@ static const struct emac_reg_write physical_coding_sublayer_programming[] = {
 
 int emac_sgmii_init_qdf2400(struct emac_adapter *adpt)
 {
-       struct emac_phy *phy = &adpt->phy;
+       struct emac_sgmii *phy = &adpt->phy;
        void __iomem *phy_regs = phy->base;
        void __iomem *laned = phy->digital;
        unsigned int i;
index 6170200d74799bb3aae227c73833602f24e7199e..b9c0df7bdd151b7d8fc822f17fbde53c4024c2b3 100644 (file)
@@ -167,7 +167,7 @@ static const struct emac_reg_write physical_coding_sublayer_programming[] = {
 
 int emac_sgmii_init_qdf2432(struct emac_adapter *adpt)
 {
-       struct emac_phy *phy = &adpt->phy;
+       struct emac_sgmii *phy = &adpt->phy;
        void __iomem *phy_regs = phy->base;
        void __iomem *laned = phy->digital;
        unsigned int i;
index bf722a9bb09d96ffdf3f4a2d46d934d76a82d066..040b28977ee74c8cbd9a3f83c73a14597677ddbd 100644 (file)
@@ -25,7 +25,9 @@
 #define EMAC_SGMII_PHY_SPEED_CFG1              0x0074
 #define EMAC_SGMII_PHY_IRQ_CMD                 0x00ac
 #define EMAC_SGMII_PHY_INTERRUPT_CLEAR         0x00b0
+#define EMAC_SGMII_PHY_INTERRUPT_MASK          0x00b4
 #define EMAC_SGMII_PHY_INTERRUPT_STATUS                0x00b8
+#define EMAC_SGMII_PHY_RX_CHK_STATUS           0x00d4
 
 #define FORCE_AN_TX_CFG                                BIT(5)
 #define FORCE_AN_RX_CFG                                BIT(4)
@@ -36,6 +38,8 @@
 #define SPDMODE_100                            BIT(0)
 #define SPDMODE_10                             0
 
+#define CDR_ALIGN_DET                          BIT(6)
+
 #define IRQ_GLOBAL_CLEAR                       BIT(0)
 
 #define DECODE_CODE_ERR                                BIT(7)
 #define SGMII_PHY_IRQ_CLR_WAIT_TIME            10
 
 #define SGMII_PHY_INTERRUPT_ERR                (DECODE_CODE_ERR | DECODE_DISP_ERR)
+#define SGMII_ISR_MASK                 (SGMII_PHY_INTERRUPT_ERR)
 
 #define SERDES_START_WAIT_TIMES                        100
 
-static int emac_sgmii_link_init(struct emac_adapter *adpt)
+/* Initialize the SGMII link between the internal and external PHYs. */
+static void emac_sgmii_link_init(struct emac_adapter *adpt)
 {
-       struct phy_device *phydev = adpt->phydev;
-       struct emac_phy *phy = &adpt->phy;
+       struct emac_sgmii *phy = &adpt->phy;
        u32 val;
 
+       /* Always use autonegotiation. It works no matter how the external
+        * PHY is configured.
+        */
        val = readl(phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
-
-       if (phydev->autoneg == AUTONEG_ENABLE) {
-               val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG);
-               val |= AN_ENABLE;
-               writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
-       } else {
-               u32 speed_cfg;
-
-               switch (phydev->speed) {
-               case SPEED_10:
-                       speed_cfg = SPDMODE_10;
-                       break;
-               case SPEED_100:
-                       speed_cfg = SPDMODE_100;
-                       break;
-               case SPEED_1000:
-                       speed_cfg = SPDMODE_1000;
-                       break;
-               default:
-                       return -EINVAL;
-               }
-
-               if (phydev->duplex == DUPLEX_FULL)
-                       speed_cfg |= DUPLEX_MODE;
-
-               val &= ~AN_ENABLE;
-               writel(speed_cfg, phy->base + EMAC_SGMII_PHY_SPEED_CFG1);
-               writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
-       }
-
-       return 0;
+       val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG);
+       val |= AN_ENABLE;
+       writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
 }
 
 static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits)
 {
-       struct emac_phy *phy = &adpt->phy;
+       struct emac_sgmii *phy = &adpt->phy;
        u32 status;
 
        writel_relaxed(irq_bits, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR);
@@ -121,9 +101,54 @@ static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits)
        return 0;
 }
 
+/* The number of decode errors that triggers a reset */
+#define DECODE_ERROR_LIMIT     2
+
+static irqreturn_t emac_sgmii_interrupt(int irq, void *data)
+{
+       struct emac_adapter *adpt = data;
+       struct emac_sgmii *phy = &adpt->phy;
+       u32 status;
+
+       status = readl(phy->base + EMAC_SGMII_PHY_INTERRUPT_STATUS);
+       status &= SGMII_ISR_MASK;
+       if (!status)
+               return IRQ_HANDLED;
+
+       /* If we get a decoding error and CDR is not locked, then try
+        * resetting the internal PHY.  The internal PHY uses an embedded
+        * clock with Clock and Data Recovery (CDR) to recover the
+        * clock and data.
+        */
+       if (status & SGMII_PHY_INTERRUPT_ERR) {
+               int count;
+
+               /* The SGMII is capable of recovering from some decode
+                * errors automatically.  However, if we get multiple
+                * decode errors in a row, then assume that something
+                * is wrong and reset the interface.
+                */
+               count = atomic_inc_return(&phy->decode_error_count);
+               if (count == DECODE_ERROR_LIMIT) {
+                       schedule_work(&adpt->work_thread);
+                       atomic_set(&phy->decode_error_count, 0);
+               }
+       } else {
+               /* We only care about consecutive decode errors. */
+               atomic_set(&phy->decode_error_count, 0);
+       }
+
+       if (emac_sgmii_irq_clear(adpt, status)) {
+               netdev_warn(adpt->netdev, "failed to clear SGMII interrupt\n");
+               schedule_work(&adpt->work_thread);
+       }
+
+       return IRQ_HANDLED;
+}
+
 static void emac_sgmii_reset_prepare(struct emac_adapter *adpt)
 {
-       struct emac_phy *phy = &adpt->phy;
+       struct emac_sgmii *phy = &adpt->phy;
        u32 val;
 
        /* Reset PHY */
@@ -145,12 +170,7 @@ void emac_sgmii_reset(struct emac_adapter *adpt)
        int ret;
 
        emac_sgmii_reset_prepare(adpt);
-
-       ret = emac_sgmii_link_init(adpt);
-       if (ret) {
-               netdev_err(adpt->netdev, "unsupported link speed\n");
-               return;
-       }
+       emac_sgmii_link_init(adpt);
 
        ret = adpt->phy.initialize(adpt);
        if (ret)
@@ -159,6 +179,68 @@ void emac_sgmii_reset(struct emac_adapter *adpt)
                           ret);
 }
 
+static int emac_sgmii_open(struct emac_adapter *adpt)
+{
+       struct emac_sgmii *sgmii = &adpt->phy;
+       int ret;
+
+       if (sgmii->irq) {
+               /* Make sure interrupts are cleared and disabled first */
+               ret = emac_sgmii_irq_clear(adpt, 0xff);
+               if (ret)
+                       return ret;
+               writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+               ret = request_irq(sgmii->irq, emac_sgmii_interrupt, 0,
+                                 "emac-sgmii", adpt);
+               if (ret) {
+                       netdev_err(adpt->netdev,
+                                  "could not register handler for internal PHY\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int emac_sgmii_close(struct emac_adapter *adpt)
+{
+       struct emac_sgmii *sgmii = &adpt->phy;
+
+       /* Make sure interrupts are disabled */
+       writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+       free_irq(sgmii->irq, adpt);
+
+       return 0;
+}
+
+/* The error interrupts are only valid after the link is up */
+static int emac_sgmii_link_up(struct emac_adapter *adpt)
+{
+       struct emac_sgmii *sgmii = &adpt->phy;
+       int ret;
+
+       /* Clear and enable interrupts */
+       ret = emac_sgmii_irq_clear(adpt, 0xff);
+       if (ret)
+               return ret;
+
+       writel(SGMII_ISR_MASK, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+       return 0;
+}
+
+static int emac_sgmii_link_down(struct emac_adapter *adpt)
+{
+       struct emac_sgmii *sgmii = &adpt->phy;
+
+       /* Disable interrupts */
+       writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+       synchronize_irq(sgmii->irq);
+
+       return 0;
+}
+
 static int emac_sgmii_acpi_match(struct device *dev, void *data)
 {
 #ifdef CONFIG_ACPI
@@ -169,7 +251,7 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data)
                {}
        };
        const struct acpi_device_id *id = acpi_match_device(match_table, dev);
-       emac_sgmii_initialize *initialize = data;
+       emac_sgmii_function *initialize = data;
 
        if (id) {
                acpi_handle handle = ACPI_HANDLE(dev);
@@ -217,7 +299,7 @@ static const struct of_device_id emac_sgmii_dt_match[] = {
 int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
 {
        struct platform_device *sgmii_pdev = NULL;
-       struct emac_phy *phy = &adpt->phy;
+       struct emac_sgmii *phy = &adpt->phy;
        struct resource *res;
        int ret;
 
@@ -256,9 +338,14 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
                        goto error_put_device;
                }
 
-               phy->initialize = (emac_sgmii_initialize)match->data;
+               phy->initialize = (emac_sgmii_function)match->data;
        }
 
+       phy->open = emac_sgmii_open;
+       phy->close = emac_sgmii_close;
+       phy->link_up = emac_sgmii_link_up;
+       phy->link_down = emac_sgmii_link_down;
+
        /* Base address is the first address */
        res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0);
        if (!res) {
@@ -286,7 +373,11 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
        if (ret)
                goto error;
 
-       emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR);
+       emac_sgmii_link_init(adpt);
+
+       ret = platform_get_irq(sgmii_pdev, 0);
+       if (ret > 0)
+               phy->irq = ret;
 
        /* We've remapped the addresses, so we don't need the device any
         * more.  of_find_device_by_node() says we should release it.
index 80ed3dc3157aa8685d25f15e2a89f66578a6fdb5..e7c0c3b2baa444a64823f2fec608c6566498c090 100644 (file)
 struct emac_adapter;
 struct platform_device;
 
+typedef int (*emac_sgmii_function)(struct emac_adapter *adpt);
+
+/** emac_sgmii - internal emac phy
+ * @base base address
+ * @digital per-lane digital block
+ * @irq the interrupt number
+ * @decode_error_count reference count of consecutive decode errors
+ * @initialize initialization function
+ * @open called when the driver is opened
+ * @close called when the driver is closed
+ * @link_up called when the link comes up
+ * @link_down called when the link comes down
+ */
+struct emac_sgmii {
+       void __iomem            *base;
+       void __iomem            *digital;
+       unsigned int            irq;
+       atomic_t                decode_error_count;
+       emac_sgmii_function     initialize;
+       emac_sgmii_function     open;
+       emac_sgmii_function     close;
+       emac_sgmii_function     link_up;
+       emac_sgmii_function     link_down;
+};
+
 int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt);
 void emac_sgmii_reset(struct emac_adapter *adpt);
 
index f46d300bd58597ce64cc4cefe3be2b1aa4b03afa..28a8cdc364851e56a5757a8f2970853c0a462cc4 100644 (file)
@@ -129,7 +129,7 @@ static int emac_napi_rtx(struct napi_struct *napi, int budget)
        emac_mac_rx_process(adpt, rx_q, &work_done, budget);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                irq->mask |= rx_q->intr;
                writel(irq->mask, adpt->base + EMAC_INT_MASK);
@@ -256,22 +256,37 @@ static int emac_change_mtu(struct net_device *netdev, int new_mtu)
 static int emac_open(struct net_device *netdev)
 {
        struct emac_adapter *adpt = netdev_priv(netdev);
+       struct emac_irq *irq = &adpt->irq;
        int ret;
 
+       ret = request_irq(irq->irq, emac_isr, 0, "emac-core0", irq);
+       if (ret) {
+               netdev_err(adpt->netdev, "could not request emac-core0 irq\n");
+               return ret;
+       }
+
        /* allocate rx/tx dma buffer & descriptors */
        ret = emac_mac_rx_tx_rings_alloc_all(adpt);
        if (ret) {
                netdev_err(adpt->netdev, "error allocating rx/tx rings\n");
+               free_irq(irq->irq, irq);
                return ret;
        }
 
        ret = emac_mac_up(adpt);
        if (ret) {
                emac_mac_rx_tx_rings_free_all(adpt);
+               free_irq(irq->irq, irq);
                return ret;
        }
 
-       emac_mac_start(adpt);
+       ret = adpt->phy.open(adpt);
+       if (ret) {
+               emac_mac_down(adpt);
+               emac_mac_rx_tx_rings_free_all(adpt);
+               free_irq(irq->irq, irq);
+               return ret;
+       }
 
        return 0;
 }
@@ -283,9 +298,12 @@ static int emac_close(struct net_device *netdev)
 
        mutex_lock(&adpt->reset_lock);
 
+       adpt->phy.close(adpt);
        emac_mac_down(adpt);
        emac_mac_rx_tx_rings_free_all(adpt);
 
+       free_irq(adpt->irq.irq, &adpt->irq);
+
        mutex_unlock(&adpt->reset_lock);
 
        return 0;
@@ -311,45 +329,56 @@ static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
        return phy_mii_ioctl(netdev->phydev, ifr, cmd);
 }
 
-/* Provide network statistics info for the interface */
-static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev,
-                                                 struct rtnl_link_stats64 *net_stats)
+/**
+ * emac_update_hw_stats - read the EMAC stat registers
+ *
+ * Reads the stats registers and write the values to adpt->stats.
+ *
+ * adpt->stats.lock must be held while calling this function,
+ * and while reading from adpt->stats.
+ */
+void emac_update_hw_stats(struct emac_adapter *adpt)
 {
-       struct emac_adapter *adpt = netdev_priv(netdev);
-       unsigned int addr = REG_MAC_RX_STATUS_BIN;
        struct emac_stats *stats = &adpt->stats;
        u64 *stats_itr = &adpt->stats.rx_ok;
-       u32 val;
-
-       spin_lock(&stats->lock);
+       void __iomem *base = adpt->base;
+       unsigned int addr;
 
+       addr = REG_MAC_RX_STATUS_BIN;
        while (addr <= REG_MAC_RX_STATUS_END) {
-               val = readl_relaxed(adpt->base + addr);
-               *stats_itr += val;
+               *stats_itr += readl_relaxed(base + addr);
                stats_itr++;
                addr += sizeof(u32);
        }
 
        /* additional rx status */
-       val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG23);
-       adpt->stats.rx_crc_align += val;
-       val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG24);
-       adpt->stats.rx_jabbers += val;
+       stats->rx_crc_align += readl_relaxed(base + EMAC_RXMAC_STATC_REG23);
+       stats->rx_jabbers += readl_relaxed(base + EMAC_RXMAC_STATC_REG24);
 
        /* update tx status */
        addr = REG_MAC_TX_STATUS_BIN;
-       stats_itr = &adpt->stats.tx_ok;
+       stats_itr = &stats->tx_ok;
 
        while (addr <= REG_MAC_TX_STATUS_END) {
-               val = readl_relaxed(adpt->base + addr);
-               *stats_itr += val;
-               ++stats_itr;
+               *stats_itr += readl_relaxed(base + addr);
+               stats_itr++;
                addr += sizeof(u32);
        }
 
        /* additional tx status */
-       val = readl_relaxed(adpt->base + EMAC_TXMAC_STATC_REG25);
-       adpt->stats.tx_col += val;
+       stats->tx_col += readl_relaxed(base + EMAC_TXMAC_STATC_REG25);
+}
+
+/* Provide network statistics info for the interface */
+static void emac_get_stats64(struct net_device *netdev,
+                            struct rtnl_link_stats64 *net_stats)
+{
+       struct emac_adapter *adpt = netdev_priv(netdev);
+       struct emac_stats *stats = &adpt->stats;
+
+       spin_lock(&stats->lock);
+
+       emac_update_hw_stats(adpt);
 
        /* return parsed statistics */
        net_stats->rx_packets = stats->rx_ok;
@@ -377,8 +406,6 @@ static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev,
        net_stats->tx_window_errors = stats->tx_late_col;
 
        spin_unlock(&stats->lock);
-
-       return net_stats;
 }
 
 static const struct net_device_ops emac_netdev_ops = {
@@ -409,6 +436,10 @@ static void emac_init_adapter(struct emac_adapter *adpt)
 {
        u32 reg;
 
+       adpt->rrd_size = EMAC_RRD_SIZE;
+       adpt->tpd_size = EMAC_TPD_SIZE;
+       adpt->rfd_size = EMAC_RFD_SIZE;
+
        /* descriptors */
        adpt->tx_desc_cnt = EMAC_DEF_TX_DESCS;
        adpt->rx_desc_cnt = EMAC_DEF_RX_DESCS;
@@ -429,6 +460,9 @@ static void emac_init_adapter(struct emac_adapter *adpt)
 
        /* others */
        adpt->preamble = EMAC_PREAMBLE_DEF;
+
+       /* default to automatic flow control */
+       adpt->automatic = true;
 }
 
 /* Get the clock */
@@ -593,7 +627,7 @@ static int emac_probe(struct platform_device *pdev)
 {
        struct net_device *netdev;
        struct emac_adapter *adpt;
-       struct emac_phy *phy;
+       struct emac_sgmii *phy;
        u16 devid, revid;
        u32 reg;
        int ret;
@@ -620,12 +654,14 @@ static int emac_probe(struct platform_device *pdev)
 
        dev_set_drvdata(&pdev->dev, netdev);
        SET_NETDEV_DEV(netdev, &pdev->dev);
+       emac_set_ethtool_ops(netdev);
 
        adpt = netdev_priv(netdev);
        adpt->netdev = netdev;
        adpt->msg_enable = EMAC_MSG_DEFAULT;
 
        phy = &adpt->phy;
+       atomic_set(&phy->decode_error_count, 0);
 
        mutex_init(&adpt->reset_lock);
        spin_lock_init(&adpt->stats.lock);
@@ -646,10 +682,6 @@ static int emac_probe(struct platform_device *pdev)
        netdev->watchdog_timeo = EMAC_WATCHDOG_TIME;
        netdev->irq = adpt->irq.irq;
 
-       adpt->rrd_size = EMAC_RRD_SIZE;
-       adpt->tpd_size = EMAC_TPD_SIZE;
-       adpt->rfd_size = EMAC_RFD_SIZE;
-
        netdev->netdev_ops = &emac_netdev_ops;
 
        emac_init_adapter(adpt);
index 0c76e6cb8c9edeac2dd31462c3f90e808257fc45..8ee4ec6aef2e4379060f0726a75ab8fd8ad1690c 100644 (file)
 #include <linux/platform_device.h>
 #include "emac-mac.h"
 #include "emac-phy.h"
+#include "emac-sgmii.h"
 
 /* EMAC base register offsets */
-#define EMAC_DMA_MAS_CTRL                                     0x001400
-#define EMAC_IRQ_MOD_TIM_INIT                                 0x001408
-#define EMAC_BLK_IDLE_STS                                     0x00140c
-#define EMAC_PHY_LINK_DELAY                                   0x00141c
-#define EMAC_SYS_ALIV_CTRL                                    0x001434
-#define EMAC_MAC_IPGIFG_CTRL                                  0x001484
-#define EMAC_MAC_STA_ADDR0                                    0x001488
-#define EMAC_MAC_STA_ADDR1                                    0x00148c
-#define EMAC_HASH_TAB_REG0                                    0x001490
-#define EMAC_HASH_TAB_REG1                                    0x001494
-#define EMAC_MAC_HALF_DPLX_CTRL                               0x001498
-#define EMAC_MAX_FRAM_LEN_CTRL                                0x00149c
-#define EMAC_INT_STATUS                                       0x001600
-#define EMAC_INT_MASK                                         0x001604
-#define EMAC_RXMAC_STATC_REG0                                 0x001700
-#define EMAC_RXMAC_STATC_REG22                                0x001758
-#define EMAC_TXMAC_STATC_REG0                                 0x001760
-#define EMAC_TXMAC_STATC_REG24                                0x0017c0
-#define EMAC_CORE_HW_VERSION                                  0x001974
-#define EMAC_IDT_TABLE0                                       0x001b00
-#define EMAC_RXMAC_STATC_REG23                                0x001bc8
-#define EMAC_RXMAC_STATC_REG24                                0x001bcc
-#define EMAC_TXMAC_STATC_REG25                                0x001bd0
-#define EMAC_INT1_MASK                                        0x001bf0
-#define EMAC_INT1_STATUS                                      0x001bf4
-#define EMAC_INT2_MASK                                        0x001bf8
-#define EMAC_INT2_STATUS                                      0x001bfc
-#define EMAC_INT3_MASK                                        0x001c00
-#define EMAC_INT3_STATUS                                      0x001c04
+#define EMAC_DMA_MAS_CTRL              0x1400
+#define EMAC_IRQ_MOD_TIM_INIT          0x1408
+#define EMAC_BLK_IDLE_STS              0x140c
+#define EMAC_PHY_LINK_DELAY            0x141c
+#define EMAC_SYS_ALIV_CTRL             0x1434
+#define EMAC_MAC_CTRL                  0x1480
+#define EMAC_MAC_IPGIFG_CTRL           0x1484
+#define EMAC_MAC_STA_ADDR0             0x1488
+#define EMAC_MAC_STA_ADDR1             0x148c
+#define EMAC_HASH_TAB_REG0             0x1490
+#define EMAC_HASH_TAB_REG1             0x1494
+#define EMAC_MAC_HALF_DPLX_CTRL                0x1498
+#define EMAC_MAX_FRAM_LEN_CTRL         0x149c
+#define EMAC_WOL_CTRL0                 0x14a0
+#define EMAC_RSS_KEY0                  0x14b0
+#define EMAC_H1TPD_BASE_ADDR_LO                0x14e0
+#define EMAC_H2TPD_BASE_ADDR_LO                0x14e4
+#define EMAC_H3TPD_BASE_ADDR_LO                0x14e8
+#define EMAC_INTER_SRAM_PART9          0x1534
+#define EMAC_DESC_CTRL_0               0x1540
+#define EMAC_DESC_CTRL_1               0x1544
+#define EMAC_DESC_CTRL_2               0x1550
+#define EMAC_DESC_CTRL_10              0x1554
+#define EMAC_DESC_CTRL_12              0x1558
+#define EMAC_DESC_CTRL_13              0x155c
+#define EMAC_DESC_CTRL_3               0x1560
+#define EMAC_DESC_CTRL_4               0x1564
+#define EMAC_DESC_CTRL_5               0x1568
+#define EMAC_DESC_CTRL_14              0x156c
+#define EMAC_DESC_CTRL_15              0x1570
+#define EMAC_DESC_CTRL_16              0x1574
+#define EMAC_DESC_CTRL_6               0x1578
+#define EMAC_DESC_CTRL_8               0x1580
+#define EMAC_DESC_CTRL_9               0x1584
+#define EMAC_DESC_CTRL_11              0x1588
+#define EMAC_TXQ_CTRL_0                        0x1590
+#define EMAC_TXQ_CTRL_1                        0x1594
+#define EMAC_TXQ_CTRL_2                        0x1598
+#define EMAC_RXQ_CTRL_0                        0x15a0
+#define EMAC_RXQ_CTRL_1                        0x15a4
+#define EMAC_RXQ_CTRL_2                        0x15a8
+#define EMAC_RXQ_CTRL_3                        0x15ac
+#define EMAC_BASE_CPU_NUMBER           0x15b8
+#define EMAC_DMA_CTRL                  0x15c0
+#define EMAC_MAILBOX_0                 0x15e0
+#define EMAC_MAILBOX_5                 0x15e4
+#define EMAC_MAILBOX_6                 0x15e8
+#define EMAC_MAILBOX_13                        0x15ec
+#define EMAC_MAILBOX_2                 0x15f4
+#define EMAC_MAILBOX_3                 0x15f8
+#define EMAC_INT_STATUS                        0x1600
+#define EMAC_INT_MASK                  0x1604
+#define EMAC_MAILBOX_11                        0x160c
+#define EMAC_AXI_MAST_CTRL             0x1610
+#define EMAC_MAILBOX_12                        0x1614
+#define EMAC_MAILBOX_9                 0x1618
+#define EMAC_MAILBOX_10                        0x161c
+#define EMAC_ATHR_HEADER_CTRL          0x1620
+#define EMAC_RXMAC_STATC_REG0          0x1700
+#define EMAC_RXMAC_STATC_REG22         0x1758
+#define EMAC_TXMAC_STATC_REG0          0x1760
+#define EMAC_TXMAC_STATC_REG24         0x17c0
+#define EMAC_CLK_GATE_CTRL             0x1814
+#define EMAC_CORE_HW_VERSION           0x1974
+#define EMAC_MISC_CTRL                 0x1990
+#define EMAC_MAILBOX_7                 0x19e0
+#define EMAC_MAILBOX_8                 0x19e4
+#define EMAC_IDT_TABLE0                        0x1b00
+#define EMAC_RXMAC_STATC_REG23         0x1bc8
+#define EMAC_RXMAC_STATC_REG24         0x1bcc
+#define EMAC_TXMAC_STATC_REG25         0x1bd0
+#define EMAC_MAILBOX_15                        0x1bd4
+#define EMAC_MAILBOX_16                        0x1bd8
+#define EMAC_INT1_MASK                 0x1bf0
+#define EMAC_INT1_STATUS               0x1bf4
+#define EMAC_INT2_MASK                 0x1bf8
+#define EMAC_INT2_STATUS               0x1bfc
+#define EMAC_INT3_MASK                 0x1c00
+#define EMAC_INT3_STATUS               0x1c04
 
 /* EMAC_DMA_MAS_CTRL */
 #define DEV_ID_NUM_BMSK                                     0x7f000000
@@ -166,10 +217,6 @@ enum emac_clk_id {
 
 #define EMAC_MAX_SETUP_LNK_CYCLE                                   100
 
-/* Wake On Lan */
-#define EMAC_WOL_PHY                     0x00000001 /* PHY Status Change */
-#define EMAC_WOL_MAGIC                   0x00000002 /* Magic Packet */
-
 struct emac_stats {
        /* rx */
        u64 rx_ok;              /* good packets */
@@ -291,7 +338,7 @@ struct emac_adapter {
        void __iomem                    *base;
        void __iomem                    *csr;
 
-       struct emac_phy                 phy;
+       struct emac_sgmii               phy;
        struct emac_stats               stats;
 
        struct emac_irq                 irq;
@@ -309,6 +356,13 @@ struct emac_adapter {
 
        unsigned int                    rxbuf_size;
 
+       /* Flow control / pause frames support. If automatic=True, do whatever
+        * the PHY does. Otherwise, use tx_flow_control and rx_flow_control.
+        */
+       bool                            automatic;
+       bool                            tx_flow_control;
+       bool                            rx_flow_control;
+
        /* Ring parameter */
        u8                              tpd_burst;
        u8                              rfd_burst;
@@ -330,6 +384,8 @@ struct emac_adapter {
 
 int emac_reinit_locked(struct emac_adapter *adpt);
 void emac_reg_update32(void __iomem *addr, u32 mask, u32 val);
-irqreturn_t emac_isr(int irq, void *data);
+
+void emac_set_ethtool_ops(struct net_device *netdev);
+void emac_update_hw_stats(struct emac_adapter *adpt);
 
 #endif /* _EMAC_H_ */
index 0b3cd58093d5ecf00abef6eda1386f13ac5e287b..672f6b696069ad8b47989ecb1dd03d3a81fde565 100644 (file)
@@ -465,10 +465,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
        struct cp_private *cp = container_of(napi, struct cp_private, napi);
        struct net_device *dev = cp->dev;
        unsigned int rx_tail = cp->rx_tail;
-       int rx;
+       int rx = 0;
 
-       rx = 0;
-rx_status_loop:
        cpw16(IntrStatus, cp_rx_intr_mask);
 
        while (rx < budget) {
@@ -556,15 +554,10 @@ rx_next:
        /* if we did not reach work limit, then we're done with
         * this round of polling
         */
-       if (rx < budget) {
+       if (rx < budget && napi_complete_done(napi, rx)) {
                unsigned long flags;
 
-               if (cpr16(IntrStatus) & cp_rx_intr_mask)
-                       goto rx_status_loop;
-
-               napi_gro_flush(napi, false);
                spin_lock_irqsave(&cp->lock, flags);
-               __napi_complete(napi);
                cpw16_f(IntrMask, cp_intr_mask);
                spin_unlock_irqrestore(&cp->lock, flags);
        }
index 9bc047ac883bf91397a4eb4b6bb9cdb7812acaf0..89631753e79962d91456d93b71929af768917da1 100644 (file)
@@ -653,9 +653,8 @@ static int rtl8139_poll(struct napi_struct *napi, int budget);
 static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance);
 static int rtl8139_close (struct net_device *dev);
 static int netdev_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
-static struct rtnl_link_stats64 *rtl8139_get_stats64(struct net_device *dev,
-                                                   struct rtnl_link_stats64
-                                                   *stats);
+static void rtl8139_get_stats64(struct net_device *dev,
+                               struct rtnl_link_stats64 *stats);
 static void rtl8139_set_rx_mode (struct net_device *dev);
 static void __set_rx_mode (struct net_device *dev);
 static void rtl8139_hw_start (struct net_device *dev);
@@ -2136,14 +2135,10 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
        if (likely(RTL_R16(IntrStatus) & RxAckBits))
                work_done += rtl8139_rx(dev, tp, budget);
 
-       if (work_done < budget) {
+       if (work_done < budget && napi_complete_done(napi, work_done)) {
                unsigned long flags;
-               /*
-                * Order is important since data can get interrupted
-                * again when we think we are done.
-                */
+
                spin_lock_irqsave(&tp->lock, flags);
-               __napi_complete(napi);
                RTL_W16_F(IntrMask, rtl8139_intr_mask);
                spin_unlock_irqrestore(&tp->lock, flags);
        }
@@ -2516,7 +2511,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 }
 
 
-static struct rtnl_link_stats64 *
+static void
 rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct rtl8139_private *tp = netdev_priv(dev);
@@ -2544,8 +2539,6 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
                stats->tx_packets = tp->tx_stats.packets;
                stats->tx_bytes = tp->tx_stats.bytes;
        } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
-
-       return stats;
 }
 
 /* Set or clear the multicast filter for this adaptor.
index 570ed3bd3cbfcd0cbe1dc0ce5764823d1fc70ff6..9bcd4aefc9c507af16325c6099fdfa198998b93e 100644 (file)
@@ -170,7 +170,7 @@ struct net_local {
     spinlock_t lock;
     struct net_device *next_module;
     struct timer_list timer;   /* Media selection timer. */
-    long last_rx_time;         /* Last Rx, in jiffies, to handle Rx hang. */
+    unsigned long last_rx_time;        /* Last Rx, in jiffies, to handle Rx hang. */
     int saved_tx_size;
     unsigned int tx_unit_busy:1;
     unsigned char re_tx,       /* Number of packet retransmissions. */
@@ -668,11 +668,11 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
                        }
                        num_tx_since_rx++;
                } else if (num_tx_since_rx > 8 &&
-                          time_after(jiffies, dev->last_rx + HZ)) {
+                          time_after(jiffies, lp->last_rx_time + HZ)) {
                        if (net_debug > 2)
                                printk(KERN_DEBUG "%s: Missed packet? No Rx after %d Tx and "
                                           "%ld jiffies status %02x  CMR1 %02x.\n", dev->name,
-                                          num_tx_since_rx, jiffies - dev->last_rx, status,
+                                          num_tx_since_rx, jiffies - lp->last_rx_time, status,
                                           (read_nibble(ioaddr, CMR1) >> 3) & 15);
                        dev->stats.rx_missed_errors++;
                        hardware_init(dev);
@@ -789,7 +789,6 @@ static void net_rx(struct net_device *dev)
                read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port);
                skb->protocol = eth_type_trans(skb, dev);
                netif_rx(skb);
-               dev->last_rx = jiffies;
                dev->stats.rx_packets++;
                dev->stats.rx_bytes += pkt_len;
        }
index 8f1623bf2134700498198a98cb6aca9dddd2a6cd..81f18a8335276495a59fa93219c4607c2b8a47aa 100644 (file)
@@ -7583,7 +7583,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
        }
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                rtl_irq_enable(tp, enable_mask);
                mmiowb();
@@ -7755,7 +7755,7 @@ err_pm_runtime_put:
        goto out;
 }
 
-static struct rtnl_link_stats64 *
+static void
 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
@@ -7809,8 +7809,6 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
                le16_to_cpu(tp->tc_offset.tx_aborted);
 
        pm_runtime_put_noidle(&pdev->dev);
-
-       return stats;
 }
 
 static void rtl8169_net_suspend(struct net_device *dev)
index f1109661a533ee0db3a35573cb4acde310f77577..0525bd696d5d02e5fe8d45ac7791a2617f3a8af5 100644 (file)
@@ -76,6 +76,7 @@ enum ravb_reg {
        CDAR20  = 0x0060,
        CDAR21  = 0x0064,
        ESR     = 0x0088,
+       APSR    = 0x008C,       /* R-Car Gen3 only */
        RCR     = 0x0090,
        RQC0    = 0x0094,
        RQC1    = 0x0098,
@@ -248,6 +249,15 @@ enum ESR_BIT {
        ESR_EIL         = 0x00001000,
 };
 
+/* APSR */
+enum APSR_BIT {
+       APSR_MEMS               = 0x00000002,
+       APSR_CMSW               = 0x00000010,
+       APSR_DM                 = 0x00006000,   /* Undocumented? */
+       APSR_DM_RDM             = 0x00002000,
+       APSR_DM_TDM             = 0x00004000,
+};
+
 /* RCR */
 enum RCR_BIT {
        RCR_EFFS        = 0x00000001,
index 301f48755093bb084ce7741b24091b4c14eea67b..8cfc4a54f2dc69240ae1fc42195ef854e0b1c2c9 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/sys_soc.h>
 
 #include <asm/div64.h>
 
@@ -988,6 +989,11 @@ static void ravb_adjust_link(struct net_device *ndev)
                phy_print_status(phydev);
 }
 
+static const struct soc_device_attribute r8a7795es10[] = {
+       { .soc_id = "r8a7795", .revision = "ES1.0", },
+       { /* sentinel */ }
+};
+
 /* PHY init function */
 static int ravb_phy_init(struct net_device *ndev)
 {
@@ -1023,10 +1029,10 @@ static int ravb_phy_init(struct net_device *ndev)
                goto err_deregister_fixed_link;
        }
 
-       /* This driver only support 10/100Mbit speeds on Gen3
+       /* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0
         * at this time.
         */
-       if (priv->chip_id == RCAR_GEN3) {
+       if (soc_device_match(r8a7795es10)) {
                err = phy_set_max_speed(phydev, SPEED_100);
                if (err) {
                        netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
@@ -1920,6 +1926,23 @@ static void ravb_set_config_mode(struct net_device *ndev)
        }
 }
 
+/* Set tx and rx clock internal delay modes */
+static void ravb_set_delay_mode(struct net_device *ndev)
+{
+       struct ravb_private *priv = netdev_priv(ndev);
+       int set = 0;
+
+       if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+           priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
+               set |= APSR_DM_RDM;
+
+       if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+           priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
+               set |= APSR_DM_TDM;
+
+       ravb_modify(ndev, APSR, APSR_DM, set);
+}
+
 static int ravb_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
@@ -2032,6 +2055,9 @@ static int ravb_probe(struct platform_device *pdev)
        /* Request GTI loading */
        ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
 
+       if (priv->chip_id != RCAR_GEN2)
+               ravb_set_delay_mode(ndev);
+
        /* Allocate descriptor base address table */
        priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
        priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
@@ -2168,6 +2194,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
        /* Request GTI loading */
        ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
 
+       if (priv->chip_id != RCAR_GEN2)
+               ravb_set_delay_mode(ndev);
+
        /* Restore descriptor base address table */
        ravb_write(ndev, priv->desc_bat_dma, DBAT);
 
index f729a6b43958cc82a1b2d38293cb50baf767f39a..54248775f227b062addf85044f486ad4512039f5 100644 (file)
@@ -1,9 +1,9 @@
 /*  SuperH Ethernet device driver
  *
- *  Copyright (C) 2014  Renesas Electronics Corporation
+ *  Copyright (C) 2014 Renesas Electronics Corporation
  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
  *  Copyright (C) 2008-2014 Renesas Solutions Corp.
- *  Copyright (C) 2013-2016 Cogent Embedded, Inc.
+ *  Copyright (C) 2013-2017 Cogent Embedded, Inc.
  *  Copyright (C) 2014 Codethink Limited
  *
  *  This program is free software; you can redistribute it and/or modify it
@@ -518,12 +518,19 @@ static struct sh_eth_cpu_data r7s72100_data = {
 
        .ecsr_value     = ECSR_ICD,
        .ecsipr_value   = ECSIPR_ICDIP,
-       .eesipr_value   = 0xe77f009f,
+       .eesipr_value   = EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP |
+                         EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP |
+                         EESIPR_ECIIP |
+                         EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+                         EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+                         EESIPR_RMAFIP | EESIPR_RRFIP |
+                         EESIPR_RTLFIP | EESIPR_RTSFIP |
+                         EESIPR_PREIP | EESIPR_CERFIP,
 
        .tx_check       = EESR_TC1 | EESR_FTC,
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
                          EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
-                         EESR_TDE | EESR_ECI,
+                         EESR_TDE,
        .fdr_value      = 0x0000070f,
 
        .no_psr         = 1,
@@ -535,9 +542,8 @@ static struct sh_eth_cpu_data r7s72100_data = {
        .rpadir_value   = 2 << 16,
        .no_trimd       = 1,
        .no_ade         = 1,
-       .hw_crc         = 1,
+       .hw_checksum    = 1,
        .tsu            = 1,
-       .shift_rd0      = 1,
 };
 
 static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
@@ -557,12 +563,19 @@ static struct sh_eth_cpu_data r8a7740_data = {
 
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
-       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+       .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
+                         EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+                         EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+                         0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
+                         EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
+                         EESIPR_CEEFIP | EESIPR_CELFIP |
+                         EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+                         EESIPR_PREIP | EESIPR_CERFIP,
 
        .tx_check       = EESR_TC1 | EESR_FTC,
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
                          EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
-                         EESR_TDE | EESR_ECI,
+                         EESR_TDE,
        .fdr_value      = 0x0000070f,
 
        .apr            = 1,
@@ -574,10 +587,10 @@ static struct sh_eth_cpu_data r8a7740_data = {
        .rpadir_value   = 2 << 16,
        .no_trimd       = 1,
        .no_ade         = 1,
-       .hw_crc         = 1,
+       .hw_checksum    = 1,
        .tsu            = 1,
        .select_mii     = 1,
-       .shift_rd0      = 1,
+       .magic          = 1,
 };
 
 /* There is CPU dependent code */
@@ -604,12 +617,16 @@ static struct sh_eth_cpu_data r8a777x_data = {
 
        .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
        .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
-       .eesipr_value   = 0x01ff009f,
+       .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
+                         EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+                         EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+                         EESIPR_RMAFIP | EESIPR_RRFIP |
+                         EESIPR_RTLFIP | EESIPR_RTSFIP |
+                         EESIPR_PREIP | EESIPR_CERFIP,
 
        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
-                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
-                         EESR_ECI,
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
        .fdr_value      = 0x00000f0f,
 
        .apr            = 1,
@@ -625,14 +642,19 @@ static struct sh_eth_cpu_data r8a779x_data = {
 
        .register_type  = SH_ETH_REG_FAST_RCAR,
 
-       .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
-       .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
-       .eesipr_value   = 0x01ff009f,
+       .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
+       .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
+                         ECSIPR_MPDIP,
+       .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
+                         EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+                         EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+                         EESIPR_RMAFIP | EESIPR_RRFIP |
+                         EESIPR_RTLFIP | EESIPR_RTSFIP |
+                         EESIPR_PREIP | EESIPR_CERFIP,
 
        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
-                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
-                         EESR_ECI,
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
        .fdr_value      = 0x00000f0f,
 
        .trscer_err_mask = DESC_I_RINT8,
@@ -642,6 +664,7 @@ static struct sh_eth_cpu_data r8a779x_data = {
        .tpauser        = 1,
        .hw_swap        = 1,
        .rmiimode       = 1,
+       .magic          = 1,
 };
 #endif /* CONFIG_OF */
 
@@ -668,12 +691,16 @@ static struct sh_eth_cpu_data sh7724_data = {
 
        .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
        .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
-       .eesipr_value   = 0x01ff009f,
+       .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
+                         EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+                         EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+                         EESIPR_RMAFIP | EESIPR_RRFIP |
+                         EESIPR_RTLFIP | EESIPR_RTSFIP |
+                         EESIPR_PREIP | EESIPR_CERFIP,
 
        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
-                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
-                         EESR_ECI,
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 
        .apr            = 1,
        .mpr            = 1,
@@ -704,12 +731,18 @@ static struct sh_eth_cpu_data sh7757_data = {
 
        .register_type  = SH_ETH_REG_FAST_SH4,
 
-       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+       .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
+                         EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+                         EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+                         0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
+                         EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
+                         EESIPR_CEEFIP | EESIPR_CELFIP |
+                         EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+                         EESIPR_PREIP | EESIPR_CERFIP,
 
        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
-                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
-                         EESR_ECI,
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 
        .irq_flags      = IRQF_SHARED,
        .apr            = 1,
@@ -772,12 +805,19 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
 
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
-       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+       .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
+                         EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+                         EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+                         0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
+                         EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
+                         EESIPR_CEEFIP | EESIPR_CELFIP |
+                         EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+                         EESIPR_PREIP | EESIPR_CERFIP,
 
        .tx_check       = EESR_TC1 | EESR_FTC,
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
                          EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
-                         EESR_TDE | EESR_ECI,
+                         EESR_TDE,
        .fdr_value      = 0x0000072f,
 
        .irq_flags      = IRQF_SHARED,
@@ -803,12 +843,18 @@ static struct sh_eth_cpu_data sh7734_data = {
 
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
-       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
+       .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
+                         EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+                         EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+                         EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
+                         EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
+                         EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+                         EESIPR_PREIP | EESIPR_CERFIP,
 
        .tx_check       = EESR_TC1 | EESR_FTC,
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
                          EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
-                         EESR_TDE | EESR_ECI,
+                         EESR_TDE,
 
        .apr            = 1,
        .mpr            = 1,
@@ -818,9 +864,9 @@ static struct sh_eth_cpu_data sh7734_data = {
        .no_trimd       = 1,
        .no_ade         = 1,
        .tsu            = 1,
-       .hw_crc         = 1,
+       .hw_checksum    = 1,
        .select_mii     = 1,
-       .shift_rd0      = 1,
+       .magic          = 1,
 };
 
 /* SH7763 */
@@ -833,12 +879,17 @@ static struct sh_eth_cpu_data sh7763_data = {
 
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
-       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
+       .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
+                         EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+                         EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+                         EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
+                         EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
+                         EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+                         EESIPR_PREIP | EESIPR_CERFIP,
 
        .tx_check       = EESR_TC1 | EESR_FTC,
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
-                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
-                         EESR_ECI,
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
 
        .apr            = 1,
        .mpr            = 1,
@@ -849,12 +900,20 @@ static struct sh_eth_cpu_data sh7763_data = {
        .no_ade         = 1,
        .tsu            = 1,
        .irq_flags      = IRQF_SHARED,
+       .magic          = 1,
 };
 
 static struct sh_eth_cpu_data sh7619_data = {
        .register_type  = SH_ETH_REG_FAST_SH3_SH2,
 
-       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+       .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
+                         EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+                         EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+                         0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
+                         EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
+                         EESIPR_CEEFIP | EESIPR_CELFIP |
+                         EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+                         EESIPR_PREIP | EESIPR_CERFIP,
 
        .apr            = 1,
        .mpr            = 1,
@@ -865,7 +924,14 @@ static struct sh_eth_cpu_data sh7619_data = {
 static struct sh_eth_cpu_data sh771x_data = {
        .register_type  = SH_ETH_REG_FAST_SH3_SH2,
 
-       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+       .eesipr_value   = EESIPR_RFCOFIP | EESIPR_ECIIP |
+                         EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+                         EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+                         0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
+                         EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
+                         EESIPR_CEEFIP | EESIPR_CELFIP |
+                         EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+                         EESIPR_PREIP | EESIPR_CERFIP,
        .tsu            = 1,
 };
 
@@ -936,7 +1002,7 @@ static int sh_eth_reset(struct net_device *ndev)
                sh_eth_write(ndev, 0x0, RDFFR);
 
                /* Reset HW CRC register */
-               if (mdp->cd->hw_crc)
+               if (mdp->cd->hw_checksum)
                        sh_eth_write(ndev, 0x0, CSMR);
 
                /* Select MII mode */
@@ -1421,7 +1487,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                 * the RFS bits are from bit 25 to bit 16. So, the
                 * driver needs right shifting by 16.
                 */
-               if (mdp->cd->shift_rd0)
+               if (mdp->cd->hw_checksum)
                        desc_status >>= 16;
 
                skb = mdp->rx_skbuff[entry];
@@ -1528,44 +1594,46 @@ static void sh_eth_rcv_snd_enable(struct net_device *ndev)
        sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
 }
 
-/* error control function */
-static void sh_eth_error(struct net_device *ndev, u32 intr_status)
+/* E-MAC interrupt handler */
+static void sh_eth_emac_interrupt(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
        u32 felic_stat;
        u32 link_stat;
-       u32 mask;
 
-       if (intr_status & EESR_ECI) {
-               felic_stat = sh_eth_read(ndev, ECSR);
-               sh_eth_write(ndev, felic_stat, ECSR);   /* clear int */
-               if (felic_stat & ECSR_ICD)
-                       ndev->stats.tx_carrier_errors++;
-               if (felic_stat & ECSR_LCHNG) {
-                       /* Link Changed */
-                       if (mdp->cd->no_psr || mdp->no_ether_link) {
-                               goto ignore_link;
-                       } else {
-                               link_stat = (sh_eth_read(ndev, PSR));
-                               if (mdp->ether_link_active_low)
-                                       link_stat = ~link_stat;
-                       }
-                       if (!(link_stat & PHY_ST_LINK)) {
-                               sh_eth_rcv_snd_disable(ndev);
-                       } else {
-                               /* Link Up */
-                               sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0);
-                               /* clear int */
-                               sh_eth_modify(ndev, ECSR, 0, 0);
-                               sh_eth_modify(ndev, EESIPR, DMAC_M_ECI,
-                                             DMAC_M_ECI);
-                               /* enable tx and rx */
-                               sh_eth_rcv_snd_enable(ndev);
-                       }
+       felic_stat = sh_eth_read(ndev, ECSR) & sh_eth_read(ndev, ECSIPR);
+       sh_eth_write(ndev, felic_stat, ECSR);   /* clear int */
+       if (felic_stat & ECSR_ICD)
+               ndev->stats.tx_carrier_errors++;
+       if (felic_stat & ECSR_MPD)
+               pm_wakeup_event(&mdp->pdev->dev, 0);
+       if (felic_stat & ECSR_LCHNG) {
+               /* Link Changed */
+               if (mdp->cd->no_psr || mdp->no_ether_link)
+                       return;
+               link_stat = sh_eth_read(ndev, PSR);
+               if (mdp->ether_link_active_low)
+                       link_stat = ~link_stat;
+               if (!(link_stat & PHY_ST_LINK)) {
+                       sh_eth_rcv_snd_disable(ndev);
+               } else {
+                       /* Link Up */
+                       sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0);
+                       /* clear int */
+                       sh_eth_modify(ndev, ECSR, 0, 0);
+                       sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP);
+                       /* enable tx and rx */
+                       sh_eth_rcv_snd_enable(ndev);
                }
        }
+}
+
+/* error control function */
+static void sh_eth_error(struct net_device *ndev, u32 intr_status)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       u32 mask;
 
-ignore_link:
        if (intr_status & EESR_TWB) {
                /* Unused write back interrupt */
                if (intr_status & EESR_TABT) {  /* Transmit Abort int */
@@ -1646,14 +1714,16 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
 
        /* Get interrupt status */
        intr_status = sh_eth_read(ndev, EESR);
-       /* Mask it with the interrupt mask, forcing ECI interrupt to be always
-        * enabled since it's the one that  comes thru regardless of the mask,
-        * and we need to fully handle it in sh_eth_error() in order to quench
-        * it as it doesn't get cleared by just writing 1 to the ECI bit...
+       /* Mask it with the interrupt mask, forcing ECI interrupt  to be always
+        * enabled since it's the one that  comes  thru regardless of the mask,
+        * and  we need to fully handle it  in sh_eth_emac_interrupt() in order
+        * to quench it as it doesn't get cleared by just writing 1 to the  ECI
+        * bit...
         */
        intr_enable = sh_eth_read(ndev, EESIPR);
-       intr_status &= intr_enable | DMAC_M_ECI;
-       if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
+       intr_status &= intr_enable | EESIPR_ECIIP;
+       if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI |
+                          cd->eesr_err_check))
                ret = IRQ_HANDLED;
        else
                goto out;
@@ -1685,6 +1755,10 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
                netif_wake_queue(ndev);
        }
 
+       /* E-MAC interrupt */
+       if (intr_status & EESR_ECI)
+               sh_eth_emac_interrupt(ndev);
+
        if (intr_status & cd->eesr_err_check) {
                /* Clear error interrupts */
                sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
@@ -1989,7 +2063,7 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
        add_reg(MAFCR);
        if (cd->rtrate)
                add_reg(RTRATE);
-       if (cd->hw_crc)
+       if (cd->hw_checksum)
                add_reg(CSMR);
        if (cd->select_mii)
                add_reg(RMII_MII);
@@ -2201,6 +2275,33 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
        return 0;
 }
 
+static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+
+       wol->supported = 0;
+       wol->wolopts = 0;
+
+       if (mdp->cd->magic && mdp->clk) {
+               wol->supported = WAKE_MAGIC;
+               wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
+       }
+}
+
+static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+
+       if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC)
+               return -EOPNOTSUPP;
+
+       mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
+
+       device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled);
+
+       return 0;
+}
+
 static const struct ethtool_ops sh_eth_ethtool_ops = {
        .get_regs_len   = sh_eth_get_regs_len,
        .get_regs       = sh_eth_get_regs,
@@ -2215,6 +2316,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
        .set_ringparam  = sh_eth_set_ringparam,
        .get_link_ksettings = sh_eth_get_link_ksettings,
        .set_link_ksettings = sh_eth_set_link_ksettings,
+       .get_wol        = sh_eth_get_wol,
+       .set_wol        = sh_eth_set_wol,
 };
 
 /* network device open function */
@@ -3017,6 +3120,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
                goto out_release;
        }
 
+       /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */
+       mdp->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(mdp->clk))
+               mdp->clk = NULL;
+
        ndev->base_addr = res->start;
 
        spin_lock_init(&mdp->lock);
@@ -3111,6 +3219,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
        if (ret)
                goto out_napi_del;
 
+       if (mdp->cd->magic && mdp->clk)
+               device_set_wakeup_capable(&pdev->dev, 1);
+
        /* print device information */
        netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
                    (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
@@ -3150,15 +3261,67 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
 
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM_SLEEP
+static int sh_eth_wol_setup(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+
+       /* Only allow ECI interrupts */
+       synchronize_irq(ndev->irq);
+       napi_disable(&mdp->napi);
+       sh_eth_write(ndev, EESIPR_ECIIP, EESIPR);
+
+       /* Enable MagicPacket */
+       sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
+
+       /* Increased clock usage so device won't be suspended */
+       clk_enable(mdp->clk);
+
+       return enable_irq_wake(ndev->irq);
+}
+
+static int sh_eth_wol_restore(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       int ret;
+
+       napi_enable(&mdp->napi);
+
+       /* Disable MagicPacket */
+       sh_eth_modify(ndev, ECMR, ECMR_MPDE, 0);
+
+       /* The device needs to be reset to restore MagicPacket logic
+        * for next wakeup. If we close and open the device it will
+        * both be reset and all registers restored. This is what
+        * happens during suspend and resume without WoL enabled.
+        */
+       ret = sh_eth_close(ndev);
+       if (ret < 0)
+               return ret;
+       ret = sh_eth_open(ndev);
+       if (ret < 0)
+               return ret;
+
+       /* Restore clock usage count */
+       clk_disable(mdp->clk);
+
+       return disable_irq_wake(ndev->irq);
+}
+
 static int sh_eth_suspend(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
+       struct sh_eth_private *mdp = netdev_priv(ndev);
        int ret = 0;
 
-       if (netif_running(ndev)) {
-               netif_device_detach(ndev);
+       if (!netif_running(ndev))
+               return 0;
+
+       netif_device_detach(ndev);
+
+       if (mdp->wol_enabled)
+               ret = sh_eth_wol_setup(ndev);
+       else
                ret = sh_eth_close(ndev);
-       }
 
        return ret;
 }
@@ -3166,14 +3329,21 @@ static int sh_eth_suspend(struct device *dev)
 static int sh_eth_resume(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
+       struct sh_eth_private *mdp = netdev_priv(ndev);
        int ret = 0;
 
-       if (netif_running(ndev)) {
+       if (!netif_running(ndev))
+               return 0;
+
+       if (mdp->wol_enabled)
+               ret = sh_eth_wol_restore(ndev);
+       else
                ret = sh_eth_open(ndev);
-               if (ret < 0)
-                       return ret;
-               netif_device_attach(ndev);
-       }
+
+       if (ret < 0)
+               return ret;
+
+       netif_device_attach(ndev);
 
        return ret;
 }
index d050f37f3e0fb802b5063b2a1421857f77e34c87..a6753ccba711cd0dc331e132eaa7d0f20795f10c 100644 (file)
@@ -265,22 +265,38 @@ enum EESR_BIT {
                                 EESR_RTO)
 #define DEFAULT_EESR_ERR_CHECK (EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \
                                 EESR_RDE | EESR_RFRMER | EESR_ADE | \
-                                EESR_TFE | EESR_TDE | EESR_ECI)
+                                EESR_TFE | EESR_TDE)
 
 /* EESIPR */
-enum DMAC_IM_BIT {
-       DMAC_M_TWB = 0x40000000, DMAC_M_TABT = 0x04000000,
-       DMAC_M_RABT = 0x02000000,
-       DMAC_M_RFRMER = 0x01000000, DMAC_M_ADF = 0x00800000,
-       DMAC_M_ECI = 0x00400000, DMAC_M_FTC = 0x00200000,
-       DMAC_M_TDE = 0x00100000, DMAC_M_TFE = 0x00080000,
-       DMAC_M_FRC = 0x00040000, DMAC_M_RDE = 0x00020000,
-       DMAC_M_RFE = 0x00010000, DMAC_M_TINT4 = 0x00000800,
-       DMAC_M_TINT3 = 0x00000400, DMAC_M_TINT2 = 0x00000200,
-       DMAC_M_TINT1 = 0x00000100, DMAC_M_RINT8 = 0x00000080,
-       DMAC_M_RINT5 = 0x00000010, DMAC_M_RINT4 = 0x00000008,
-       DMAC_M_RINT3 = 0x00000004, DMAC_M_RINT2 = 0x00000002,
-       DMAC_M_RINT1 = 0x00000001,
+enum EESIPR_BIT {
+       EESIPR_TWB1IP   = 0x80000000,
+       EESIPR_TWBIP    = 0x40000000,   /* same as TWB0IP */
+       EESIPR_TC1IP    = 0x20000000,
+       EESIPR_TUCIP    = 0x10000000,
+       EESIPR_ROCIP    = 0x08000000,
+       EESIPR_TABTIP   = 0x04000000,
+       EESIPR_RABTIP   = 0x02000000,
+       EESIPR_RFCOFIP  = 0x01000000,
+       EESIPR_ADEIP    = 0x00800000,
+       EESIPR_ECIIP    = 0x00400000,
+       EESIPR_FTCIP    = 0x00200000,   /* same as TC0IP */
+       EESIPR_TDEIP    = 0x00100000,
+       EESIPR_TFUFIP   = 0x00080000,
+       EESIPR_FRIP     = 0x00040000,
+       EESIPR_RDEIP    = 0x00020000,
+       EESIPR_RFOFIP   = 0x00010000,
+       EESIPR_CNDIP    = 0x00000800,
+       EESIPR_DLCIP    = 0x00000400,
+       EESIPR_CDIP     = 0x00000200,
+       EESIPR_TROIP    = 0x00000100,
+       EESIPR_RMAFIP   = 0x00000080,
+       EESIPR_CEEFIP   = 0x00000040,
+       EESIPR_CELFIP   = 0x00000020,
+       EESIPR_RRFIP    = 0x00000010,
+       EESIPR_RTLFIP   = 0x00000008,
+       EESIPR_RTSFIP   = 0x00000004,
+       EESIPR_PREIP    = 0x00000002,
+       EESIPR_CERFIP   = 0x00000001,
 };
 
 /* Receive descriptor 0 bits */
@@ -339,7 +355,7 @@ enum FELIC_MODE_BIT {
        ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
        ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
        ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
-       ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
+       ECMR_MPDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
        ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004,
        ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001,
 };
@@ -488,11 +504,11 @@ struct sh_eth_cpu_data {
        unsigned rpadir:1;      /* E-DMAC have RPADIR */
        unsigned no_trimd:1;    /* E-DMAC DO NOT have TRIMD */
        unsigned no_ade:1;      /* E-DMAC DO NOT have ADE bit in EESR */
-       unsigned hw_crc:1;      /* E-DMAC have CSMR */
+       unsigned hw_checksum:1; /* E-DMAC has CSMR */
        unsigned select_mii:1;  /* EtherC have RMII_MII (MII select register) */
-       unsigned shift_rd0:1;   /* shift Rx descriptor word 0 right by 16 */
        unsigned rmiimode:1;    /* EtherC has RMIIMODE register */
        unsigned rtrate:1;      /* EtherC has RTRATE register */
+       unsigned magic:1;       /* EtherC has ECMR.MPDE and ECSR.MPD */
 };
 
 struct sh_eth_private {
@@ -501,6 +517,7 @@ struct sh_eth_private {
        const u16 *reg_offset;
        void __iomem *addr;
        void __iomem *tsu_addr;
+       struct clk *clk;
        u32 num_rx_ring;
        u32 num_tx_ring;
        dma_addr_t rx_desc_dma;
@@ -529,6 +546,7 @@ struct sh_eth_private {
        unsigned no_ether_link:1;
        unsigned ether_link_active_low:1;
        unsigned is_opened:1;
+       unsigned wol_enabled:1;
 };
 
 static inline void sh_eth_soft_swap(char *src, int len)
index 7c450b5a1138e28b41b23bf48d5c5ce65f18ea82..0f63a44a955deb4de7b8da6c69b5dd6125d19dad 100644 (file)
@@ -2517,7 +2517,7 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
        }
 
        if (credits < budget)
-               napi_complete(napi);
+               napi_complete_done(napi, credits);
 
        rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
 
index cddcff5a00a7c63a7e426b601de990c8f5b269cf..d54490d3f7ad3228dc561fb58a51b142042259f3 100644 (file)
@@ -1563,7 +1563,7 @@ static int sxgbe_poll(struct napi_struct *napi, int budget)
 
        work_done = sxgbe_rx(priv, budget);
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
        }
 
@@ -1706,11 +1706,9 @@ static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
  *  This function is a driver entry point whenever ifconfig command gets
  *  executed to see device statistics. Statistics are number of
  *  bytes sent or received, errors occurred etc.
- *  Return value:
- *  This function returns various statistical information of device.
  */
-static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
-                                                  struct rtnl_link_stats64 *stats)
+static void sxgbe_get_stats64(struct net_device *dev,
+                             struct rtnl_link_stats64 *stats)
 {
        struct sxgbe_priv_data *priv = netdev_priv(dev);
        void __iomem *ioaddr = priv->ioaddr;
@@ -1761,8 +1759,6 @@ static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
                                                 SXGBE_MMC_TXUFLWHI_GBCNT_REG);
        writel(0, ioaddr + SXGBE_MMC_CTL_REG);
        spin_unlock(&priv->stats_lock);
-
-       return stats;
 }
 
 /*  sxgbe_set_features - entry point to set offload features of the device.
index 17d83f37fbf26cbeee5e7aa4fe1c66d923d41a58..41ad07d45144ec79f373052e3ee5742545ba5eed 100644 (file)
@@ -433,6 +433,9 @@ typedef union efx_oword {
                (oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
        } while (0)
 
+#define EFX_AND_QWORD(qword, from, mask)                       \
+               (qword).u64[0] = (from).u64[0] & (mask).u64[0]
+
 #define EFX_OR_OWORD(oword, from, mask)                                \
        do {                                                    \
                (oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
index 5eb0e684fd76a3de1f46210f9a9b6118d98041e3..92e1c6d8b2937e0fc15a30162903cca675b4faa7 100644 (file)
@@ -60,15 +60,33 @@ struct efx_ef10_vlan {
        u16 vid;
 };
 
+enum efx_ef10_default_filters {
+       EFX_EF10_BCAST,
+       EFX_EF10_UCDEF,
+       EFX_EF10_MCDEF,
+       EFX_EF10_VXLAN4_UCDEF,
+       EFX_EF10_VXLAN4_MCDEF,
+       EFX_EF10_VXLAN6_UCDEF,
+       EFX_EF10_VXLAN6_MCDEF,
+       EFX_EF10_NVGRE4_UCDEF,
+       EFX_EF10_NVGRE4_MCDEF,
+       EFX_EF10_NVGRE6_UCDEF,
+       EFX_EF10_NVGRE6_MCDEF,
+       EFX_EF10_GENEVE4_UCDEF,
+       EFX_EF10_GENEVE4_MCDEF,
+       EFX_EF10_GENEVE6_UCDEF,
+       EFX_EF10_GENEVE6_MCDEF,
+
+       EFX_EF10_NUM_DEFAULT_FILTERS
+};
+
 /* Per-VLAN filters information */
 struct efx_ef10_filter_vlan {
        struct list_head list;
        u16 vid;
        u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
        u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
-       u16 ucdef;
-       u16 bcast;
-       u16 mcdef;
+       u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
 };
 
 struct efx_ef10_dev_addr {
@@ -78,7 +96,7 @@ struct efx_ef10_dev_addr {
 struct efx_ef10_filter_table {
 /* The MCDI match masks supported by this fw & hw, in order of priority */
        u32 rx_match_mcdi_flags[
-               MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
+               MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
        unsigned int rx_match_count;
 
        struct {
@@ -114,6 +132,23 @@ static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
 static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
                                              struct efx_ef10_filter_vlan *vlan);
 static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
+static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading);
+
+static u32 efx_ef10_filter_get_unsafe_id(u32 filter_id)
+{
+       WARN_ON_ONCE(filter_id == EFX_EF10_FILTER_ID_INVALID);
+       return filter_id & (HUNT_FILTER_TBL_ROWS - 1);
+}
+
+static unsigned int efx_ef10_filter_get_unsafe_pri(u32 filter_id)
+{
+       return filter_id / (HUNT_FILTER_TBL_ROWS * 2);
+}
+
+static u32 efx_ef10_make_filter_id(unsigned int pri, u16 idx)
+{
+       return pri * HUNT_FILTER_TBL_ROWS * 2 + idx;
+}
 
 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
 {
@@ -197,11 +232,15 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
        nic_data->datapath_caps =
                MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
 
-       if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
+       if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {
                nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
                                GET_CAPABILITIES_V2_OUT_FLAGS2);
-       else
+               nic_data->piobuf_size = MCDI_WORD(outbuf,
+                               GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF);
+       } else {
                nic_data->datapath_caps2 = 0;
+               nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE;
+       }
 
        /* record the DPCPU firmware IDs to determine VEB vswitching support.
         */
@@ -547,7 +586,6 @@ static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
 static int efx_ef10_probe(struct efx_nic *efx)
 {
        struct efx_ef10_nic_data *nic_data;
-       struct net_device *net_dev = efx->net_dev;
        int i, rc;
 
        /* We can have one VI for each 8K region.  However, until we
@@ -603,6 +641,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
        if (rc)
                goto fail2;
 
+       mutex_init(&nic_data->udp_tunnels_lock);
+
        /* Reset (most) configuration for this function */
        rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
        if (rc)
@@ -637,7 +677,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
        if (rc < 0)
                goto fail5;
        efx->port_num = rc;
-       net_dev->dev_port = rc;
 
        rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
        if (rc)
@@ -692,6 +731,14 @@ fail5:
 fail4:
        device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
 fail3:
+       efx_mcdi_detach(efx);
+
+       mutex_lock(&nic_data->udp_tunnels_lock);
+       memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
+       (void)efx_ef10_set_udp_tnl_ports(efx, true);
+       mutex_unlock(&nic_data->udp_tunnels_lock);
+       mutex_destroy(&nic_data->udp_tunnels_lock);
+
        efx_mcdi_fini(efx);
 fail2:
        efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
@@ -825,8 +872,8 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
                        offset = ((efx->tx_channel_offset + efx->n_tx_channels -
                                   tx_queue->channel->channel - 1) *
                                  efx_piobuf_size);
-                       index = offset / ER_DZ_TX_PIOBUF_SIZE;
-                       offset = offset % ER_DZ_TX_PIOBUF_SIZE;
+                       index = offset / nic_data->piobuf_size;
+                       offset = offset % nic_data->piobuf_size;
 
                        /* When the host page size is 4K, the first
                         * host page in the WC mapping may be within
@@ -961,6 +1008,15 @@ static void efx_ef10_remove(struct efx_nic *efx)
        device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
        device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
 
+       efx_mcdi_detach(efx);
+
+       memset(nic_data->udp_tunnels, 0, sizeof(nic_data->udp_tunnels));
+       mutex_lock(&nic_data->udp_tunnels_lock);
+       (void)efx_ef10_set_udp_tnl_ports(efx, true);
+       mutex_unlock(&nic_data->udp_tunnels_lock);
+
+       mutex_destroy(&nic_data->udp_tunnels_lock);
+
        efx_mcdi_fini(efx);
        efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
        kfree(nic_data);
@@ -1161,14 +1217,20 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
         * functions of the controller.
         */
        if (efx_piobuf_size != 0 &&
-           ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
+           nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
            efx->n_tx_channels) {
                unsigned int n_piobufs =
                        DIV_ROUND_UP(efx->n_tx_channels,
-                                    ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
+                                    nic_data->piobuf_size / efx_piobuf_size);
 
                rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
-               if (rc)
+               if (rc == -ENOSPC)
+                       netif_dbg(efx, probe, efx->net_dev,
+                                 "out of PIO buffers; cannot allocate more\n");
+               else if (rc == -EPERM)
+                       netif_dbg(efx, probe, efx->net_dev,
+                                 "not permitted to allocate PIO buffers\n");
+               else if (rc)
                        netif_err(efx, probe, efx->net_dev,
                                  "failed to allocate PIO buffers (%d)\n", rc);
                else
@@ -1315,15 +1377,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
                                efx_ef10_free_piobufs(efx);
                }
 
-               /* Log an error on failure, but this is non-fatal */
-               if (rc)
+               /* Log an error on failure, but this is non-fatal.
+                * Permission errors are less important - we've presumably
+                * had the PIO buffer licence removed.
+                */
+               if (rc == -EPERM)
+                       netif_dbg(efx, drv, efx->net_dev,
+                                 "not permitted to restore PIO buffers\n");
+               else if (rc)
                        netif_err(efx, drv, efx->net_dev,
                                  "failed to restore PIO buffers (%d)\n", rc);
                nic_data->must_restore_piobufs = false;
        }
 
        /* don't fail init if RSS setup doesn't work */
-       rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+       rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table, NULL);
        efx->rss_active = (rc == 0);
 
        return 0;
@@ -2360,7 +2428,11 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
                /* Create TX descriptor ring entry */
                if (buffer->flags & EFX_TX_BUF_OPTION) {
                        *txd = buffer->option;
+                       if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1)
+                               /* PIO descriptor */
+                               tx_queue->packet_write_count = tx_queue->write_count;
                } else {
+                       tx_queue->packet_write_count = tx_queue->write_count;
                        BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
                        EFX_POPULATE_QWORD_3(
                                *txd,
@@ -2529,7 +2601,7 @@ static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
 }
 
 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
-                                      const u32 *rx_indir_table)
+                                      const u32 *rx_indir_table, const u8 *key)
 {
        MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
        MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
@@ -2540,6 +2612,11 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
        BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
                     MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
 
+       /* This iterates over the length of efx->rx_indir_table, but copies
+        * bytes from rx_indir_table.  That's because the latter is a pointer
+        * rather than an array, but should have the same length.
+        * The efx->rx_hash_key loop below is similar.
+        */
        for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
                MCDI_PTR(tablebuf,
                         RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
@@ -2555,8 +2632,7 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
        BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
                     MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
        for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
-               MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
-                       efx->rx_hash_key[i];
+               MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
 
        return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
                            sizeof(keybuf), NULL, 0, NULL);
@@ -2589,7 +2665,8 @@ static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
 }
 
 static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
-                                                const u32 *rx_indir_table)
+                                                const u32 *rx_indir_table,
+                                                const u8 *key)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
        int rc;
@@ -2608,7 +2685,7 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
        }
 
        rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
-                                        rx_indir_table);
+                                        rx_indir_table, key);
        if (rc != 0)
                goto fail2;
 
@@ -2619,6 +2696,9 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
        if (rx_indir_table != efx->rx_indir_table)
                memcpy(efx->rx_indir_table, rx_indir_table,
                       sizeof(efx->rx_indir_table));
+       if (key != efx->rx_hash_key)
+               memcpy(efx->rx_hash_key, key, efx->type->rx_hash_key_size);
+
        return 0;
 
 fail2:
@@ -2629,15 +2709,69 @@ fail1:
        return rc;
 }
 
+static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
+       MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
+       MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
+       size_t outlen;
+       int rc, i;
+
+       BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
+                    MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
+
+       if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
+               return -ENOENT;
+
+       MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
+                      nic_data->rx_rss_context);
+       BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+                    MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
+       rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
+                         tablebuf, sizeof(tablebuf), &outlen);
+       if (rc != 0)
+               return rc;
+
+       if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
+               return -EIO;
+
+       for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+               efx->rx_indir_table[i] = MCDI_PTR(tablebuf,
+                               RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
+
+       MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
+                      nic_data->rx_rss_context);
+       BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
+                    MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+       rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
+                         keybuf, sizeof(keybuf), &outlen);
+       if (rc != 0)
+               return rc;
+
+       if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
+               return -EIO;
+
+       for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
+               efx->rx_hash_key[i] = MCDI_PTR(
+                               keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
+
+       return 0;
+}
+
 static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
-                                         const u32 *rx_indir_table)
+                                         const u32 *rx_indir_table,
+                                         const u8 *key)
 {
        int rc;
 
        if (efx->rss_spread == 1)
                return 0;
 
-       rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table);
+       if (!key)
+               key = efx->rx_hash_key;
+
+       rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
 
        if (rc == -ENOBUFS && !user) {
                unsigned context_size;
@@ -2675,6 +2809,8 @@ static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
 
 static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
                                          const u32 *rx_indir_table
+                                         __attribute__ ((unused)),
+                                         const u8 *key
                                          __attribute__ ((unused)))
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -3054,13 +3190,103 @@ static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
        ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
 }
 
+static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel,
+                                          unsigned int n_packets,
+                                          unsigned int rx_encap_hdr,
+                                          unsigned int rx_l3_class,
+                                          unsigned int rx_l4_class,
+                                          const efx_qword_t *event)
+{
+       struct efx_nic *efx = channel->efx;
+
+       if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) {
+               if (!efx->loopback_selftest)
+                       channel->n_rx_eth_crc_err += n_packets;
+               return EFX_RX_PKT_DISCARD;
+       }
+       if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) {
+               if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
+                            rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
+                            rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
+                            rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
+                            rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
+                       netdev_WARN(efx->net_dev,
+                                   "invalid class for RX_IPCKSUM_ERR: event="
+                                   EFX_QWORD_FMT "\n",
+                                   EFX_QWORD_VAL(*event));
+               if (!efx->loopback_selftest)
+                       *(rx_encap_hdr ?
+                         &channel->n_rx_outer_ip_hdr_chksum_err :
+                         &channel->n_rx_ip_hdr_chksum_err) += n_packets;
+               return 0;
+       }
+       if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
+               if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN &&
+                            ((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
+                              rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
+                             (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
+                              rx_l4_class != ESE_DZ_L4_CLASS_UDP))))
+                       netdev_WARN(efx->net_dev,
+                                   "invalid class for RX_TCPUDP_CKSUM_ERR: event="
+                                   EFX_QWORD_FMT "\n",
+                                   EFX_QWORD_VAL(*event));
+               if (!efx->loopback_selftest)
+                       *(rx_encap_hdr ?
+                         &channel->n_rx_outer_tcp_udp_chksum_err :
+                         &channel->n_rx_tcp_udp_chksum_err) += n_packets;
+               return 0;
+       }
+       if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_IP_INNER_CHKSUM_ERR)) {
+               if (unlikely(!rx_encap_hdr))
+                       netdev_WARN(efx->net_dev,
+                                   "invalid encapsulation type for RX_IP_INNER_CHKSUM_ERR: event="
+                                   EFX_QWORD_FMT "\n",
+                                   EFX_QWORD_VAL(*event));
+               else if (unlikely(rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
+                                 rx_l3_class != ESE_DZ_L3_CLASS_IP4_FRAG &&
+                                 rx_l3_class != ESE_DZ_L3_CLASS_IP6 &&
+                                 rx_l3_class != ESE_DZ_L3_CLASS_IP6_FRAG))
+                       netdev_WARN(efx->net_dev,
+                                   "invalid class for RX_IP_INNER_CHKSUM_ERR: event="
+                                   EFX_QWORD_FMT "\n",
+                                   EFX_QWORD_VAL(*event));
+               if (!efx->loopback_selftest)
+                       channel->n_rx_inner_ip_hdr_chksum_err += n_packets;
+               return 0;
+       }
+       if (EFX_QWORD_FIELD(*event, ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR)) {
+               if (unlikely(!rx_encap_hdr))
+                       netdev_WARN(efx->net_dev,
+                                   "invalid encapsulation type for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
+                                   EFX_QWORD_FMT "\n",
+                                   EFX_QWORD_VAL(*event));
+               else if (unlikely((rx_l3_class != ESE_DZ_L3_CLASS_IP4 &&
+                                  rx_l3_class != ESE_DZ_L3_CLASS_IP6) ||
+                                 (rx_l4_class != ESE_DZ_L4_CLASS_TCP &&
+                                  rx_l4_class != ESE_DZ_L4_CLASS_UDP)))
+                       netdev_WARN(efx->net_dev,
+                                   "invalid class for RX_TCP_UDP_INNER_CHKSUM_ERR: event="
+                                   EFX_QWORD_FMT "\n",
+                                   EFX_QWORD_VAL(*event));
+               if (!efx->loopback_selftest)
+                       channel->n_rx_inner_tcp_udp_chksum_err += n_packets;
+               return 0;
+       }
+
+       WARN_ON(1); /* No error bits were recognised */
+       return 0;
+}
+
 static int efx_ef10_handle_rx_event(struct efx_channel *channel,
                                    const efx_qword_t *event)
 {
-       unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
+       unsigned int rx_bytes, next_ptr_lbits, rx_queue_label;
+       unsigned int rx_l3_class, rx_l4_class, rx_encap_hdr;
        unsigned int n_descs, n_packets, i;
        struct efx_nic *efx = channel->efx;
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
        struct efx_rx_queue *rx_queue;
+       efx_qword_t errors;
        bool rx_cont;
        u16 flags = 0;
 
@@ -3071,8 +3297,14 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
        rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
        next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
        rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
+       rx_l3_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L3_CLASS);
        rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
        rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
+       rx_encap_hdr =
+               nic_data->datapath_caps &
+                       (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) ?
+               EFX_QWORD_FIELD(*event, ESF_EZ_RX_ENCAP_HDR) :
+               ESE_EZ_ENCAP_HDR_NONE;
 
        if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
                netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
@@ -3132,17 +3364,38 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel,
                n_packets = 1;
        }
 
-       if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
-               flags |= EFX_RX_PKT_DISCARD;
-
-       if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
-               channel->n_rx_ip_hdr_chksum_err += n_packets;
-       } else if (unlikely(EFX_QWORD_FIELD(*event,
-                                           ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
-               channel->n_rx_tcp_udp_chksum_err += n_packets;
-       } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
-                  rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
-               flags |= EFX_RX_PKT_CSUMMED;
+       EFX_POPULATE_QWORD_5(errors, ESF_DZ_RX_ECRC_ERR, 1,
+                                    ESF_DZ_RX_IPCKSUM_ERR, 1,
+                                    ESF_DZ_RX_TCPUDP_CKSUM_ERR, 1,
+                                    ESF_EZ_RX_IP_INNER_CHKSUM_ERR, 1,
+                                    ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR, 1);
+       EFX_AND_QWORD(errors, *event, errors);
+       if (unlikely(!EFX_QWORD_IS_ZERO(errors))) {
+               flags |= efx_ef10_handle_rx_event_errors(channel, n_packets,
+                                                        rx_encap_hdr,
+                                                        rx_l3_class, rx_l4_class,
+                                                        event);
+       } else {
+               bool tcpudp = rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
+                             rx_l4_class == ESE_DZ_L4_CLASS_UDP;
+
+               switch (rx_encap_hdr) {
+               case ESE_EZ_ENCAP_HDR_VXLAN: /* VxLAN or GENEVE */
+                       flags |= EFX_RX_PKT_CSUMMED; /* outer UDP csum */
+                       if (tcpudp)
+                               flags |= EFX_RX_PKT_CSUM_LEVEL; /* inner L4 */
+                       break;
+               case ESE_EZ_ENCAP_HDR_GRE:
+               case ESE_EZ_ENCAP_HDR_NONE:
+                       if (tcpudp)
+                               flags |= EFX_RX_PKT_CSUMMED;
+                       break;
+               default:
+                       netdev_WARN(efx->net_dev,
+                                   "unknown encapsulation type: event="
+                                   EFX_QWORD_FMT "\n",
+                                   EFX_QWORD_VAL(*event));
+               }
        }
 
        if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
@@ -3510,6 +3763,104 @@ efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
        table->entry[filter_idx].spec = (unsigned long)spec | flags;
 }
 
+static void
+efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx,
+                                          const struct efx_filter_spec *spec,
+                                          efx_dword_t *inbuf)
+{
+       enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
+       u32 match_fields = 0, uc_match, mc_match;
+
+       MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+                      efx_ef10_filter_is_exclusive(spec) ?
+                      MC_CMD_FILTER_OP_IN_OP_INSERT :
+                      MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
+
+       /* Convert match flags and values.  Unlike almost
+        * everything else in MCDI, these fields are in
+        * network byte order.
+        */
+#define COPY_VALUE(value, mcdi_field)                                       \
+       do {                                                         \
+               match_fields |=                                      \
+                       1 << MC_CMD_FILTER_OP_IN_MATCH_ ##           \
+                       mcdi_field ## _LBN;                          \
+               BUILD_BUG_ON(                                        \
+                       MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
+                       sizeof(value));                              \
+               memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
+                      &value, sizeof(value));                       \
+       } while (0)
+#define COPY_FIELD(gen_flag, gen_field, mcdi_field)                         \
+       if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) {     \
+               COPY_VALUE(spec->gen_field, mcdi_field);             \
+       }
+       /* Handle encap filters first.  They will always be mismatch
+        * (unknown UC or MC) filters
+        */
+       if (encap_type) {
+               /* ether_type and outer_ip_proto need to be variables
+                * because COPY_VALUE wants to memcpy them
+                */
+               __be16 ether_type =
+                       htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
+                             ETH_P_IPV6 : ETH_P_IP);
+               u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
+               u8 outer_ip_proto;
+
+               switch (encap_type & EFX_ENCAP_TYPES_MASK) {
+               case EFX_ENCAP_TYPE_VXLAN:
+                       vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
+                       /* fallthrough */
+               case EFX_ENCAP_TYPE_GENEVE:
+                       COPY_VALUE(ether_type, ETHER_TYPE);
+                       outer_ip_proto = IPPROTO_UDP;
+                       COPY_VALUE(outer_ip_proto, IP_PROTO);
+                       /* We always need to set the type field, even
+                        * though we're not matching on the TNI.
+                        */
+                       MCDI_POPULATE_DWORD_1(inbuf,
+                               FILTER_OP_EXT_IN_VNI_OR_VSID,
+                               FILTER_OP_EXT_IN_VNI_TYPE,
+                               vni_type);
+                       break;
+               case EFX_ENCAP_TYPE_NVGRE:
+                       COPY_VALUE(ether_type, ETHER_TYPE);
+                       outer_ip_proto = IPPROTO_GRE;
+                       COPY_VALUE(outer_ip_proto, IP_PROTO);
+                       break;
+               default:
+                       WARN_ON(1);
+               }
+
+               uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
+               mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
+       } else {
+               uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+               mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
+       }
+
+       if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
+               match_fields |=
+                       is_multicast_ether_addr(spec->loc_mac) ?
+                       1 << mc_match :
+                       1 << uc_match;
+       COPY_FIELD(REM_HOST, rem_host, SRC_IP);
+       COPY_FIELD(LOC_HOST, loc_host, DST_IP);
+       COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
+       COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
+       COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
+       COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
+       COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
+       COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
+       COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
+       COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
+#undef COPY_FIELD
+#undef COPY_VALUE
+       MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
+                      match_fields);
+}
+
 static void efx_ef10_filter_push_prep(struct efx_nic *efx,
                                      const struct efx_filter_spec *spec,
                                      efx_dword_t *inbuf, u64 handle,
@@ -3518,7 +3869,7 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
        u32 flags = spec->flags;
 
-       memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
+       memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
 
        /* Remove RSS flag if we don't have an RSS context. */
        if (flags & EFX_FILTER_FLAG_RX_RSS &&
@@ -3531,46 +3882,7 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
                               MC_CMD_FILTER_OP_IN_OP_REPLACE);
                MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
        } else {
-               u32 match_fields = 0;
-
-               MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
-                              efx_ef10_filter_is_exclusive(spec) ?
-                              MC_CMD_FILTER_OP_IN_OP_INSERT :
-                              MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
-
-               /* Convert match flags and values.  Unlike almost
-                * everything else in MCDI, these fields are in
-                * network byte order.
-                */
-               if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
-                       match_fields |=
-                               is_multicast_ether_addr(spec->loc_mac) ?
-                               1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
-                               1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
-#define COPY_FIELD(gen_flag, gen_field, mcdi_field)                         \
-               if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) {     \
-                       match_fields |=                                      \
-                               1 << MC_CMD_FILTER_OP_IN_MATCH_ ##           \
-                               mcdi_field ## _LBN;                          \
-                       BUILD_BUG_ON(                                        \
-                               MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
-                               sizeof(spec->gen_field));                    \
-                       memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
-                              &spec->gen_field, sizeof(spec->gen_field));   \
-               }
-               COPY_FIELD(REM_HOST, rem_host, SRC_IP);
-               COPY_FIELD(LOC_HOST, loc_host, DST_IP);
-               COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
-               COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
-               COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
-               COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
-               COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
-               COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
-               COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
-               COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
-#undef COPY_FIELD
-               MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
-                              match_fields);
+               efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf);
        }
 
        MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
@@ -3599,8 +3911,8 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
                                const struct efx_filter_spec *spec,
                                u64 *handle, bool replacing)
 {
-       MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
-       MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
        int rc;
 
        efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
@@ -3615,37 +3927,58 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
 
 static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
 {
+       enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
        unsigned int match_flags = spec->match_flags;
+       unsigned int uc_match, mc_match;
        u32 mcdi_flags = 0;
 
+#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) {         \
+               unsigned int  old_match_flags = match_flags;            \
+               match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag;          \
+               if (match_flags != old_match_flags)                     \
+                       mcdi_flags |=                                   \
+                               (1 << ((encap) ?                        \
+                                      MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
+                                      mcdi_field ## _LBN :             \
+                                      MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
+                                      mcdi_field ## _LBN));            \
+       }
+       /* inner or outer based on encap type */
+       MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
+       MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
+       MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
+       MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
+       MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
+       MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
+       MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
+       MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
+       /* always outer */
+       MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
+       MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
+#undef MAP_FILTER_TO_MCDI_FLAG
+
+       /* special handling for encap type, and mismatch */
+       if (encap_type) {
+               match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
+               mcdi_flags |=
+                       (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
+               mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
+
+               uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
+               mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
+       } else {
+               uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+               mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
+       }
+
        if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
                match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
                mcdi_flags |=
                        is_multicast_ether_addr(spec->loc_mac) ?
-                       (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) :
-                       (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN);
+                       1 << mc_match :
+                       1 << uc_match;
        }
 
-#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field) {                        \
-               unsigned int old_match_flags = match_flags;             \
-               match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag;          \
-               if (match_flags != old_match_flags)                     \
-                       mcdi_flags |=                                   \
-                               (1 << MC_CMD_FILTER_OP_IN_MATCH_ ##     \
-                                mcdi_field ## _LBN);                   \
-       }
-       MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP);
-       MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP);
-       MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC);
-       MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT);
-       MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC);
-       MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT);
-       MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE);
-       MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN);
-       MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN);
-       MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO);
-#undef MAP_FILTER_TO_MCDI_FLAG
-
        /* Did we map them all? */
        WARN_ON_ONCE(match_flags);
 
@@ -3877,7 +4210,7 @@ found:
 
        /* If successful, return the inserted filter ID */
        if (rc == 0)
-               rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
+               rc = efx_ef10_make_filter_id(match_pri, ins_index);
 
        wake_up_all(&table->waitq);
 out_unlock:
@@ -3900,7 +4233,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
                                           unsigned int priority_mask,
                                           u32 filter_id, bool by_index)
 {
-       unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
+       unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
        struct efx_ef10_filter_table *table = efx->filter_state;
        MCDI_DECLARE_BUF(inbuf,
                         MC_CMD_FILTER_OP_IN_HANDLE_OFST +
@@ -3927,7 +4260,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
        if (!spec ||
            (!by_index &&
             efx_ef10_filter_pri(table, spec) !=
-            filter_id / HUNT_FILTER_TBL_ROWS)) {
+            efx_ef10_filter_get_unsafe_pri(filter_id))) {
                rc = -ENOENT;
                goto out_unlock;
        }
@@ -3976,13 +4309,18 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
                               MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
                MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
                               table->entry[filter_idx].handle);
-               rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
-                                 inbuf, sizeof(inbuf), NULL, 0, NULL);
+               rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FILTER_OP,
+                                       inbuf, sizeof(inbuf), NULL, 0, NULL);
 
                spin_lock_bh(&efx->filter_lock);
-               if (rc == 0) {
+               if ((rc == 0) || (rc == -ENOENT)) {
+                       /* Filter removed OK or didn't actually exist */
                        kfree(spec);
                        efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+               } else {
+                       efx_mcdi_display_error(efx, MC_CMD_FILTER_OP,
+                                              MC_CMD_FILTER_OP_IN_LEN,
+                                              NULL, 0, rc);
                }
        }
 
@@ -4002,11 +4340,6 @@ static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
                                               filter_id, false);
 }
 
-static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
-{
-       return filter_id % HUNT_FILTER_TBL_ROWS;
-}
-
 static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
                                          enum efx_filter_priority priority,
                                          u32 filter_id)
@@ -4020,7 +4353,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
                                    enum efx_filter_priority priority,
                                    u32 filter_id, struct efx_filter_spec *spec)
 {
-       unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
+       unsigned int filter_idx = efx_ef10_filter_get_unsafe_id(filter_id);
        struct efx_ef10_filter_table *table = efx->filter_state;
        const struct efx_filter_spec *saved_spec;
        int rc;
@@ -4029,7 +4362,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
        saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
        if (saved_spec && saved_spec->priority == priority &&
            efx_ef10_filter_pri(table, saved_spec) ==
-           filter_id / HUNT_FILTER_TBL_ROWS) {
+           efx_ef10_filter_get_unsafe_pri(filter_id)) {
                *spec = *saved_spec;
                rc = 0;
        } else {
@@ -4081,7 +4414,7 @@ static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
 {
        struct efx_ef10_filter_table *table = efx->filter_state;
 
-       return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
+       return table->rx_match_count * HUNT_FILTER_TBL_ROWS * 2;
 }
 
 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
@@ -4101,8 +4434,9 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
                                count = -EMSGSIZE;
                                break;
                        }
-                       buf[count++] = (efx_ef10_filter_pri(table, spec) *
-                                       HUNT_FILTER_TBL_ROWS +
+                       buf[count++] =
+                               efx_ef10_make_filter_id(
+                                       efx_ef10_filter_pri(table, spec),
                                        filter_idx);
                }
        }
@@ -4305,29 +4639,54 @@ efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
 
 #endif /* CONFIG_RFS_ACCEL */
 
-static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
+static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
 {
        int match_flags = 0;
 
-#define MAP_FLAG(gen_flag, mcdi_field) {                               \
+#define MAP_FLAG(gen_flag, mcdi_field) do {                            \
                u32 old_mcdi_flags = mcdi_flags;                        \
-               mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ##      \
-                               mcdi_field ## _LBN);                    \
+               mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##  \
+                                    mcdi_field ## _LBN);               \
                if (mcdi_flags != old_mcdi_flags)                       \
                        match_flags |= EFX_FILTER_MATCH_ ## gen_flag;   \
+       } while (0)
+
+       if (encap) {
+               /* encap filters must specify encap type */
+               match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+               /* and imply ethertype and ip proto */
+               mcdi_flags &=
+                       ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
+               mcdi_flags &=
+                       ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
+               /* VLAN tags refer to the outer packet */
+               MAP_FLAG(INNER_VID, INNER_VLAN);
+               MAP_FLAG(OUTER_VID, OUTER_VLAN);
+               /* everything else refers to the inner packet */
+               MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
+               MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
+               MAP_FLAG(REM_HOST, IFRM_SRC_IP);
+               MAP_FLAG(LOC_HOST, IFRM_DST_IP);
+               MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
+               MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
+               MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
+               MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
+               MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
+               MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
+       } else {
+               MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
+               MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
+               MAP_FLAG(REM_HOST, SRC_IP);
+               MAP_FLAG(LOC_HOST, DST_IP);
+               MAP_FLAG(REM_MAC, SRC_MAC);
+               MAP_FLAG(REM_PORT, SRC_PORT);
+               MAP_FLAG(LOC_MAC, DST_MAC);
+               MAP_FLAG(LOC_PORT, DST_PORT);
+               MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
+               MAP_FLAG(INNER_VID, INNER_VLAN);
+               MAP_FLAG(OUTER_VID, OUTER_VLAN);
+               MAP_FLAG(IP_PROTO, IP_PROTO);
        }
-       MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
-       MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
-       MAP_FLAG(REM_HOST, SRC_IP);
-       MAP_FLAG(LOC_HOST, DST_IP);
-       MAP_FLAG(REM_MAC, SRC_MAC);
-       MAP_FLAG(REM_PORT, SRC_PORT);
-       MAP_FLAG(LOC_MAC, DST_MAC);
-       MAP_FLAG(LOC_PORT, DST_PORT);
-       MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
-       MAP_FLAG(INNER_VID, INNER_VLAN);
-       MAP_FLAG(OUTER_VID, OUTER_VLAN);
-       MAP_FLAG(IP_PROTO, IP_PROTO);
 #undef MAP_FLAG
 
        /* Did we map them all? */
@@ -4354,6 +4713,7 @@ static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
 }
 
 static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
+                                           bool encap,
                                            enum efx_filter_match_flags match_flags)
 {
        unsigned int match_pri;
@@ -4362,7 +4722,7 @@ static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
        for (match_pri = 0;
             match_pri < table->rx_match_count;
             match_pri++) {
-               mf = efx_ef10_filter_match_flags_from_mcdi(
+               mf = efx_ef10_filter_match_flags_from_mcdi(encap,
                                table->rx_match_mcdi_flags[match_pri]);
                if (mf == match_flags)
                        return true;
@@ -4371,39 +4731,30 @@ static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
        return false;
 }
 
-static int efx_ef10_filter_table_probe(struct efx_nic *efx)
+static int
+efx_ef10_filter_table_probe_matches(struct efx_nic *efx,
+                                   struct efx_ef10_filter_table *table,
+                                   bool encap)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
        MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
-       struct efx_ef10_nic_data *nic_data = efx->nic_data;
-       struct net_device *net_dev = efx->net_dev;
        unsigned int pd_match_pri, pd_match_count;
-       struct efx_ef10_filter_table *table;
-       struct efx_ef10_vlan *vlan;
        size_t outlen;
        int rc;
 
-       if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
-               return -EINVAL;
-
-       if (efx->filter_state) /* already probed */
-               return 0;
-
-       table = kzalloc(sizeof(*table), GFP_KERNEL);
-       if (!table)
-               return -ENOMEM;
-
        /* Find out which RX filter types are supported, and their priorities */
        MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
+                      encap ?
+                      MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
                       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
        rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
                          inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
                          &outlen);
        if (rc)
-               goto fail;
+               return rc;
+
        pd_match_count = MCDI_VAR_ARRAY_LEN(
                outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
-       table->rx_match_count = 0;
 
        for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
                u32 mcdi_flags =
@@ -4411,7 +4762,7 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
                                outbuf,
                                GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
                                pd_match_pri);
-               rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
+               rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags);
                if (rc < 0) {
                        netif_dbg(efx, probe, efx->net_dev,
                                  "%s: fw flags %#x pri %u not supported in driver\n",
@@ -4426,10 +4777,40 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
                }
        }
 
+       return 0;
+}
+
+static int efx_ef10_filter_table_probe(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       struct net_device *net_dev = efx->net_dev;
+       struct efx_ef10_filter_table *table;
+       struct efx_ef10_vlan *vlan;
+       int rc;
+
+       if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+               return -EINVAL;
+
+       if (efx->filter_state) /* already probed */
+               return 0;
+
+       table = kzalloc(sizeof(*table), GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
+
+       table->rx_match_count = 0;
+       rc = efx_ef10_filter_table_probe_matches(efx, table, false);
+       if (rc)
+               goto fail;
+       if (nic_data->datapath_caps &
+                  (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+               rc = efx_ef10_filter_table_probe_matches(efx, table, true);
+       if (rc)
+               goto fail;
        if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
-           !(efx_ef10_filter_match_supported(table,
+           !(efx_ef10_filter_match_supported(table, false,
                (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
-             efx_ef10_filter_match_supported(table,
+             efx_ef10_filter_match_supported(table, false,
                (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
                netif_info(efx, probe, net_dev,
                           "VLAN filters are not supported in this firmware variant\n");
@@ -4475,10 +4856,13 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
 {
        struct efx_ef10_filter_table *table = efx->filter_state;
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       unsigned int invalid_filters = 0, failed = 0;
+       struct efx_ef10_filter_vlan *vlan;
        struct efx_filter_spec *spec;
        unsigned int filter_idx;
-       bool failed = false;
-       int rc;
+       u32 mcdi_flags;
+       int match_pri;
+       int rc, i;
 
        WARN_ON(!rwsem_is_locked(&efx->filter_sem));
 
@@ -4495,6 +4879,20 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
                if (!spec)
                        continue;
 
+               mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
+               match_pri = 0;
+               while (match_pri < table->rx_match_count &&
+                      table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
+                       ++match_pri;
+               if (match_pri >= table->rx_match_count) {
+                       invalid_filters++;
+                       goto not_restored;
+               }
+               if (spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT &&
+                   spec->rss_context != nic_data->rx_rss_context)
+                       netif_warn(efx, drv, efx->net_dev,
+                                  "Warning: unable to restore a filter with specific RSS context.\n");
+
                table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
                spin_unlock_bh(&efx->filter_lock);
 
@@ -4502,10 +4900,17 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
                                          &table->entry[filter_idx].handle,
                                          false);
                if (rc)
-                       failed = true;
-
+                       failed++;
                spin_lock_bh(&efx->filter_lock);
+
                if (rc) {
+not_restored:
+                       list_for_each_entry(vlan, &table->vlan_list, list)
+                               for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
+                                       if (vlan->default_filters[i] == filter_idx)
+                                               vlan->default_filters[i] =
+                                                       EFX_EF10_FILTER_ID_INVALID;
+
                        kfree(spec);
                        efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
                } else {
@@ -4516,9 +4921,17 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
 
        spin_unlock_bh(&efx->filter_lock);
 
+       /* This can happen validly if the MC's capabilities have changed, so
+        * is not an error.
+        */
+       if (invalid_filters)
+               netif_dbg(efx, drv, efx->net_dev,
+                         "Did not restore %u filters that are now unsupported.\n",
+                         invalid_filters);
+
        if (failed)
                netif_err(efx, hw, efx->net_dev,
-                         "unable to restore all filters\n");
+                         "unable to restore %u filters\n", failed);
        else
                nic_data->must_restore_filters = false;
 }
@@ -4575,7 +4988,7 @@ static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
        unsigned int filter_idx;
 
        if (*id != EFX_EF10_FILTER_ID_INVALID) {
-               filter_idx = efx_ef10_filter_get_unsafe_id(efx, *id);
+               filter_idx = efx_ef10_filter_get_unsafe_id(*id);
                if (!table->entry[filter_idx].spec)
                        netif_dbg(efx, drv, efx->net_dev,
                                  "marked null spec old %04x:%04x\n", *id,
@@ -4596,9 +5009,8 @@ static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
                efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
        for (i = 0; i < table->dev_mc_count; i++)
                efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
-       efx_ef10_filter_mark_one_old(efx, &vlan->ucdef);
-       efx_ef10_filter_mark_one_old(efx, &vlan->bcast);
-       efx_ef10_filter_mark_one_old(efx, &vlan->mcdef);
+       for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+               efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]);
 }
 
 /* Mark old filters that may need to be removed.
@@ -4711,11 +5123,13 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
                                rc = EFX_EF10_FILTER_ID_INVALID;
                        }
                }
-               ids[i] = efx_ef10_filter_get_unsafe_id(efx, rc);
+               ids[i] = efx_ef10_filter_get_unsafe_id(rc);
        }
 
        if (multicast && rollback) {
                /* Also need an Ethernet broadcast filter */
+               EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
+                                    EFX_EF10_FILTER_ID_INVALID);
                efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
                eth_broadcast_addr(baddr);
                efx_filter_set_eth_local(&spec, vlan->vid, baddr);
@@ -4732,9 +5146,8 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
                        }
                        return rc;
                } else {
-                       EFX_WARN_ON_PARANOID(vlan->bcast !=
-                                            EFX_EF10_FILTER_ID_INVALID);
-                       vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
+                       vlan->default_filters[EFX_EF10_BCAST] =
+                               efx_ef10_filter_get_unsafe_id(rc);
                }
        }
 
@@ -4743,6 +5156,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
 
 static int efx_ef10_filter_insert_def(struct efx_nic *efx,
                                      struct efx_ef10_filter_vlan *vlan,
+                                     enum efx_encap_type encap_type,
                                      bool multicast, bool rollback)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -4750,6 +5164,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
        struct efx_filter_spec spec;
        u8 baddr[ETH_ALEN];
        int rc;
+       u16 *id;
 
        filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
 
@@ -4760,19 +5175,75 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
        else
                efx_filter_set_uc_def(&spec);
 
+       if (encap_type) {
+               if (nic_data->datapath_caps &
+                   (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+                       efx_filter_set_encap_type(&spec, encap_type);
+               else
+                       /* don't insert encap filters on non-supporting
+                        * platforms. ID will be left as INVALID.
+                        */
+                       return 0;
+       }
+
        if (vlan->vid != EFX_FILTER_VID_UNSPEC)
                efx_filter_set_eth_local(&spec, vlan->vid, NULL);
 
        rc = efx_ef10_filter_insert(efx, &spec, true);
        if (rc < 0) {
-               netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING,
-                            efx->net_dev,
-                            "%scast mismatch filter insert failed rc=%d\n",
-                            multicast ? "Multi" : "Uni", rc);
+               const char *um = multicast ? "Multicast" : "Unicast";
+               const char *encap_name = "";
+               const char *encap_ipv = "";
+
+               if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+                   EFX_ENCAP_TYPE_VXLAN)
+                       encap_name = "VXLAN ";
+               else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+                        EFX_ENCAP_TYPE_NVGRE)
+                       encap_name = "NVGRE ";
+               else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+                        EFX_ENCAP_TYPE_GENEVE)
+                       encap_name = "GENEVE ";
+               if (encap_type & EFX_ENCAP_FLAG_IPV6)
+                       encap_ipv = "IPv6 ";
+               else if (encap_type)
+                       encap_ipv = "IPv4 ";
+
+               /* unprivileged functions can't insert mismatch filters
+                * for encapsulated or unicast traffic, so downgrade
+                * those warnings to debug.
+                */
+               netif_cond_dbg(efx, drv, efx->net_dev,
+                              rc == -EPERM && (encap_type || !multicast), warn,
+                              "%s%s%s mismatch filter insert failed rc=%d\n",
+                              encap_name, encap_ipv, um, rc);
        } else if (multicast) {
-               EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID);
-               vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc);
-               if (!nic_data->workaround_26807) {
+               /* mapping from encap types to default filter IDs (multicast) */
+               static enum efx_ef10_default_filters map[] = {
+                       [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
+                       [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
+                       [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
+                       [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
+                       [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
+                               EFX_EF10_VXLAN6_MCDEF,
+                       [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
+                               EFX_EF10_NVGRE6_MCDEF,
+                       [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
+                               EFX_EF10_GENEVE6_MCDEF,
+               };
+
+               /* quick bounds check (BCAST result impossible) */
+               BUILD_BUG_ON(EFX_EF10_BCAST != 0);
+               if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
+                       WARN_ON(1);
+                       return -EINVAL;
+               }
+               /* then follow map */
+               id = &vlan->default_filters[map[encap_type]];
+
+               EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
+               *id = efx_ef10_filter_get_unsafe_id(rc);
+               if (!nic_data->workaround_26807 && !encap_type) {
                        /* Also need an Ethernet broadcast filter */
                        efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
                                           filter_flags, 0);
@@ -4787,20 +5258,44 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
                                        /* Roll back the mc_def filter */
                                        efx_ef10_filter_remove_unsafe(
                                                        efx, EFX_FILTER_PRI_AUTO,
-                                                       vlan->mcdef);
-                                       vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
+                                                       *id);
+                                       *id = EFX_EF10_FILTER_ID_INVALID;
                                        return rc;
                                }
                        } else {
-                               EFX_WARN_ON_PARANOID(vlan->bcast !=
-                                                    EFX_EF10_FILTER_ID_INVALID);
-                               vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
+                               EFX_WARN_ON_PARANOID(
+                                       vlan->default_filters[EFX_EF10_BCAST] !=
+                                       EFX_EF10_FILTER_ID_INVALID);
+                               vlan->default_filters[EFX_EF10_BCAST] =
+                                       efx_ef10_filter_get_unsafe_id(rc);
                        }
                }
                rc = 0;
        } else {
-               EFX_WARN_ON_PARANOID(vlan->ucdef != EFX_EF10_FILTER_ID_INVALID);
-               vlan->ucdef = rc;
+               /* mapping from encap types to default filter IDs (unicast) */
+               static enum efx_ef10_default_filters map[] = {
+                       [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
+                       [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
+                       [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
+                       [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
+                       [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
+                               EFX_EF10_VXLAN6_UCDEF,
+                       [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
+                               EFX_EF10_NVGRE6_UCDEF,
+                       [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
+                               EFX_EF10_GENEVE6_UCDEF,
+               };
+
+               /* quick bounds check (BCAST result impossible) */
+               BUILD_BUG_ON(EFX_EF10_BCAST != 0);
+               if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
+                       WARN_ON(1);
+                       return -EINVAL;
+               }
+               /* then follow map */
+               id = &vlan->default_filters[map[encap_type]];
+               EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
+               *id = rc;
                rc = 0;
        }
        return rc;
@@ -4894,7 +5389,7 @@ restore_filters:
        if (rc2)
                goto reset_nic;
 
-       netif_device_attach(efx->net_dev);
+       efx_device_attach_if_not_resetting(efx);
 
        return rc;
 
@@ -4923,7 +5418,8 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
 
        /* Insert/renew unicast filters */
        if (table->uc_promisc) {
-               efx_ef10_filter_insert_def(efx, vlan, false, false);
+               efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
+                                          false, false);
                efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
        } else {
                /* If any of the filters failed to insert, fall back to
@@ -4931,8 +5427,25 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
                 * our individual unicast filters.
                 */
                if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
-                       efx_ef10_filter_insert_def(efx, vlan, false, false);
+                       efx_ef10_filter_insert_def(efx, vlan,
+                                                  EFX_ENCAP_TYPE_NONE,
+                                                  false, false);
        }
+       efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
+                                  false, false);
+       efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
+                                             EFX_ENCAP_FLAG_IPV6,
+                                  false, false);
+       efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
+                                  false, false);
+       efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
+                                             EFX_ENCAP_FLAG_IPV6,
+                                  false, false);
+       efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
+                                  false, false);
+       efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
+                                             EFX_ENCAP_FLAG_IPV6,
+                                  false, false);
 
        /* Insert/renew multicast filters */
        /* If changing promiscuous state with cascaded multicast filters, remove
@@ -4946,7 +5459,9 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
                        /* If we failed to insert promiscuous filters, rollback
                         * and fall back to individual multicast filters
                         */
-                       if (efx_ef10_filter_insert_def(efx, vlan, true, true)) {
+                       if (efx_ef10_filter_insert_def(efx, vlan,
+                                                      EFX_ENCAP_TYPE_NONE,
+                                                      true, true)) {
                                /* Changing promisc state, so remove old filters */
                                efx_ef10_filter_remove_old(efx);
                                efx_ef10_filter_insert_addr_list(efx, vlan,
@@ -4956,7 +5471,9 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
                        /* If we failed to insert promiscuous filters, don't
                         * rollback.  Regardless, also insert the mc_list
                         */
-                       efx_ef10_filter_insert_def(efx, vlan, true, false);
+                       efx_ef10_filter_insert_def(efx, vlan,
+                                                  EFX_ENCAP_TYPE_NONE,
+                                                  true, false);
                        efx_ef10_filter_insert_addr_list(efx, vlan, true, false);
                }
        } else {
@@ -4969,11 +5486,28 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
                        /* Changing promisc state, so remove old filters */
                        if (nic_data->workaround_26807)
                                efx_ef10_filter_remove_old(efx);
-                       if (efx_ef10_filter_insert_def(efx, vlan, true, true))
+                       if (efx_ef10_filter_insert_def(efx, vlan,
+                                                      EFX_ENCAP_TYPE_NONE,
+                                                      true, true))
                                efx_ef10_filter_insert_addr_list(efx, vlan,
                                                                 true, false);
                }
        }
+       efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
+                                  true, false);
+       efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
+                                             EFX_ENCAP_FLAG_IPV6,
+                                  true, false);
+       efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
+                                  true, false);
+       efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
+                                             EFX_ENCAP_FLAG_IPV6,
+                                  true, false);
+       efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
+                                  true, false);
+       efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
+                                             EFX_ENCAP_FLAG_IPV6,
+                                  true, false);
 }
 
 /* Caller must hold efx->filter_sem for read if race against
@@ -5060,9 +5594,8 @@ static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
                vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
        for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
                vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
-       vlan->ucdef = EFX_EF10_FILTER_ID_INVALID;
-       vlan->bcast = EFX_EF10_FILTER_ID_INVALID;
-       vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
+       for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+               vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
 
        list_add_tail(&vlan->list, &table->vlan_list);
 
@@ -5089,9 +5622,10 @@ static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
        for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
                efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
                                              vlan->mc[i]);
-       efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->ucdef);
-       efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->bcast);
-       efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mcdef);
+       for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+               if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
+                       efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+                                                     vlan->default_filters[i]);
 
        kfree(vlan);
 }
@@ -5141,7 +5675,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
 
        if (was_enabled)
                efx_net_open(efx->net_dev);
-       netif_device_attach(efx->net_dev);
+       efx_device_attach_if_not_resetting(efx);
 
 #ifdef CONFIG_SFC_SRIOV
        if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
@@ -5540,6 +6074,20 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
        }
 }
 
+static int efx_ef10_get_phys_port_id(struct efx_nic *efx,
+                                    struct netdev_phys_item_id *ppid)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+       if (!is_valid_ether_addr(nic_data->port_id))
+               return -EOPNOTSUPP;
+
+       ppid->id_len = ETH_ALEN;
+       memcpy(ppid->id, nic_data->port_id, ppid->id_len);
+
+       return 0;
+}
+
 static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
 {
        if (proto != htons(ETH_P_8021Q))
@@ -5556,6 +6104,271 @@ static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
        return efx_ef10_del_vlan(efx, vid);
 }
 
+/* We rely on the MCDI wiping out our TX rings if it made any changes to the
+ * ports table, ensuring that any TSO descriptors that were made on a now-
+ * removed tunnel port will be blown away and won't break things when we try
+ * to transmit them using the new ports table.
+ */
+static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX);
+       MCDI_DECLARE_BUF(outbuf, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN);
+       bool will_reset = false;
+       size_t num_entries = 0;
+       size_t inlen, outlen;
+       size_t i;
+       int rc;
+       efx_dword_t flags_and_num_entries;
+
+       WARN_ON(!mutex_is_locked(&nic_data->udp_tunnels_lock));
+
+       nic_data->udp_tunnels_dirty = false;
+
+       if (!(nic_data->datapath_caps &
+           (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))) {
+               efx_device_attach_if_not_resetting(efx);
+               return 0;
+       }
+
+       BUILD_BUG_ON(ARRAY_SIZE(nic_data->udp_tunnels) >
+                    MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
+
+       for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
+               if (nic_data->udp_tunnels[i].count &&
+                   nic_data->udp_tunnels[i].port) {
+                       efx_dword_t entry;
+
+                       EFX_POPULATE_DWORD_2(entry,
+                               TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT,
+                                       ntohs(nic_data->udp_tunnels[i].port),
+                               TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL,
+                                       nic_data->udp_tunnels[i].type);
+                       *_MCDI_ARRAY_DWORD(inbuf,
+                               SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES,
+                               num_entries++) = entry;
+               }
+       }
+
+       BUILD_BUG_ON((MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST -
+                     MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST) * 8 !=
+                    EFX_WORD_1_LBN);
+       BUILD_BUG_ON(MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN * 8 !=
+                    EFX_WORD_1_WIDTH);
+       EFX_POPULATE_DWORD_2(flags_and_num_entries,
+                            MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING,
+                               !!unloading,
+                            EFX_WORD_1, num_entries);
+       *_MCDI_DWORD(inbuf, SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS) =
+               flags_and_num_entries;
+
+       inlen = MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num_entries);
+
+       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS,
+                               inbuf, inlen, outbuf, sizeof(outbuf), &outlen);
+       if (rc == -EIO) {
+               /* Most likely the MC rebooted due to another function also
+                * setting its tunnel port list. Mark the tunnel port list as
+                * dirty, so it will be pushed upon coming up from the reboot.
+                */
+               nic_data->udp_tunnels_dirty = true;
+               return 0;
+       }
+
+       if (rc) {
+               /* expected not available on unprivileged functions */
+               if (rc != -EPERM)
+                       netif_warn(efx, drv, efx->net_dev,
+                                  "Unable to set UDP tunnel ports; rc=%d.\n", rc);
+       } else if (MCDI_DWORD(outbuf, SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS) &
+                  (1 << MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN)) {
+               netif_info(efx, drv, efx->net_dev,
+                          "Rebooting MC due to UDP tunnel port list change\n");
+               will_reset = true;
+               if (unloading)
+                       /* Delay for the MC reset to complete. This will make
+                        * unloading other functions a bit smoother. This is a
+                        * race, but the other unload will work whichever way
+                        * it goes, this just avoids an unnecessary error
+                        * message.
+                        */
+                       msleep(100);
+       }
+       if (!will_reset && !unloading) {
+               /* The caller will have detached, relying on the MC reset to
+                * trigger a re-attach.  Since there won't be an MC reset, we
+                * have to do the attach ourselves.
+                */
+               efx_device_attach_if_not_resetting(efx);
+       }
+
+       return rc;
+}
+
+static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       int rc = 0;
+
+       mutex_lock(&nic_data->udp_tunnels_lock);
+       if (nic_data->udp_tunnels_dirty) {
+               /* Make sure all TX are stopped while we modify the table, else
+                * we might race against an efx_features_check().
+                */
+               efx_device_detach_sync(efx);
+               rc = efx_ef10_set_udp_tnl_ports(efx, false);
+       }
+       mutex_unlock(&nic_data->udp_tunnels_lock);
+       return rc;
+}
+
+static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx,
+                                                            __be16 port)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       size_t i;
+
+       for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
+               if (!nic_data->udp_tunnels[i].count)
+                       continue;
+               if (nic_data->udp_tunnels[i].port == port)
+                       return &nic_data->udp_tunnels[i];
+       }
+       return NULL;
+}
+
+static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx,
+                                    struct efx_udp_tunnel tnl)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       struct efx_udp_tunnel *match;
+       char typebuf[8];
+       size_t i;
+       int rc;
+
+       if (!(nic_data->datapath_caps &
+             (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
+               return 0;
+
+       efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
+       netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n",
+                 typebuf, ntohs(tnl.port));
+
+       mutex_lock(&nic_data->udp_tunnels_lock);
+       /* Make sure all TX are stopped while we add to the table, else we
+        * might race against an efx_features_check().
+        */
+       efx_device_detach_sync(efx);
+
+       match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
+       if (match != NULL) {
+               if (match->type == tnl.type) {
+                       netif_dbg(efx, drv, efx->net_dev,
+                                 "Referencing existing tunnel entry\n");
+                       match->count++;
+                       /* No need to cause an MCDI update */
+                       rc = 0;
+                       goto unlock_out;
+               }
+               efx_get_udp_tunnel_type_name(match->type,
+                                            typebuf, sizeof(typebuf));
+               netif_dbg(efx, drv, efx->net_dev,
+                         "UDP port %d is already in use by %s\n",
+                         ntohs(tnl.port), typebuf);
+               rc = -EEXIST;
+               goto unlock_out;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
+               if (!nic_data->udp_tunnels[i].count) {
+                       nic_data->udp_tunnels[i] = tnl;
+                       nic_data->udp_tunnels[i].count = 1;
+                       rc = efx_ef10_set_udp_tnl_ports(efx, false);
+                       goto unlock_out;
+               }
+
+       netif_dbg(efx, drv, efx->net_dev,
+                 "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n",
+                 typebuf, ntohs(tnl.port));
+
+       rc = -ENOMEM;
+
+unlock_out:
+       mutex_unlock(&nic_data->udp_tunnels_lock);
+       return rc;
+}
+
+/* Called under the TX lock with the TX queue running, hence no-one can be
+ * in the middle of updating the UDP tunnels table.  However, they could
+ * have tried and failed the MCDI, in which case they'll have set the dirty
+ * flag before dropping their locks.
+ */
+static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+       if (!(nic_data->datapath_caps &
+             (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
+               return false;
+
+       if (nic_data->udp_tunnels_dirty)
+               /* SW table may not match HW state, so just assume we can't
+                * use any UDP tunnel offloads.
+                */
+               return false;
+
+       return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL;
+}
+
+static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx,
+                                    struct efx_udp_tunnel tnl)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       struct efx_udp_tunnel *match;
+       char typebuf[8];
+       int rc;
+
+       if (!(nic_data->datapath_caps &
+             (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
+               return 0;
+
+       efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
+       netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n",
+                 typebuf, ntohs(tnl.port));
+
+       mutex_lock(&nic_data->udp_tunnels_lock);
+       /* Make sure all TX are stopped while we remove from the table, else we
+        * might race against an efx_features_check().
+        */
+       efx_device_detach_sync(efx);
+
+       match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
+       if (match != NULL) {
+               if (match->type == tnl.type) {
+                       if (--match->count) {
+                               /* Port is still in use, so nothing to do */
+                               netif_dbg(efx, drv, efx->net_dev,
+                                         "UDP tunnel port %d remains active\n",
+                                         ntohs(tnl.port));
+                               rc = 0;
+                               goto out_unlock;
+                       }
+                       rc = efx_ef10_set_udp_tnl_ports(efx, false);
+                       goto out_unlock;
+               }
+               efx_get_udp_tunnel_type_name(match->type,
+                                            typebuf, sizeof(typebuf));
+               netif_warn(efx, drv, efx->net_dev,
+                          "UDP port %d is actually in use by %s, not removing\n",
+                          ntohs(tnl.port), typebuf);
+       }
+       rc = -ENOENT;
+
+out_unlock:
+       mutex_unlock(&nic_data->udp_tunnels_lock);
+       return rc;
+}
+
 #define EF10_OFFLOAD_FEATURES          \
        (NETIF_F_IP_CSUM |              \
         NETIF_F_HW_VLAN_CTAG_FILTER |  \
@@ -5609,6 +6422,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
        .tx_write = efx_ef10_tx_write,
        .tx_limit_len = efx_ef10_tx_limit_len,
        .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
+       .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
        .rx_probe = efx_ef10_rx_probe,
        .rx_init = efx_ef10_rx_init,
        .rx_remove = efx_ef10_rx_remove,
@@ -5647,11 +6461,11 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
        .vswitching_probe = efx_ef10_vswitching_probe_vf,
        .vswitching_restore = efx_ef10_vswitching_restore_vf,
        .vswitching_remove = efx_ef10_vswitching_remove_vf,
-       .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id,
 #endif
        .get_mac_address = efx_ef10_get_mac_address_vf,
        .set_mac_address = efx_ef10_set_mac_address,
 
+       .get_phys_port_id = efx_ef10_get_phys_port_id,
        .revision = EFX_REV_HUNT_A0,
        .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
        .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
@@ -5659,6 +6473,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
        .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
        .can_rx_scatter = true,
        .always_rx_scatter = true,
+       .min_interrupt_mode = EFX_INT_MODE_MSIX,
        .max_interrupt_mode = EFX_INT_MODE_MSIX,
        .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
        .offload_features = EF10_OFFLOAD_FEATURES,
@@ -5666,6 +6481,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
        .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
        .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
                            1 << HWTSTAMP_FILTER_ALL,
+       .rx_hash_key_size = 40,
 };
 
 const struct efx_nic_type efx_hunt_a0_nic_type = {
@@ -5716,6 +6532,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
        .tx_write = efx_ef10_tx_write,
        .tx_limit_len = efx_ef10_tx_limit_len,
        .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
+       .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
        .rx_probe = efx_ef10_rx_probe,
        .rx_init = efx_ef10_rx_init,
        .rx_remove = efx_ef10_rx_remove,
@@ -5756,6 +6573,10 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
        .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
        .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
        .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
+       .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
+       .udp_tnl_add_port = efx_ef10_udp_tnl_add_port,
+       .udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
+       .udp_tnl_del_port = efx_ef10_udp_tnl_del_port,
 #ifdef CONFIG_SFC_SRIOV
        .sriov_configure = efx_ef10_sriov_configure,
        .sriov_init = efx_ef10_sriov_init,
@@ -5776,6 +6597,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
        .set_mac_address = efx_ef10_set_mac_address,
        .tso_versions = efx_ef10_tso_versions,
 
+       .get_phys_port_id = efx_ef10_get_phys_port_id,
        .revision = EFX_REV_HUNT_A0,
        .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
        .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
@@ -5783,6 +6605,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
        .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
        .can_rx_scatter = true,
        .always_rx_scatter = true,
+       .option_descriptors = true,
+       .min_interrupt_mode = EFX_INT_MODE_LEGACY,
        .max_interrupt_mode = EFX_INT_MODE_MSIX,
        .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
        .offload_features = EF10_OFFLOAD_FEATURES,
@@ -5790,4 +6614,5 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
        .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
        .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
                            1 << HWTSTAMP_FILTER_ALL,
+       .rx_hash_key_size = 40,
 };
index a949b9d27329e356146a18781cf92a14812c1631..b7e4345c990d55454f52cbcb9c7c4d5df66ef176 100644 (file)
@@ -6,6 +6,7 @@
  * under the terms of the GNU General Public License version 2 as published
  * by the Free Software Foundation, incorporated herein by reference.
  */
+#include <linux/etherdevice.h>
 #include <linux/pci.h>
 #include <linux/module.h>
 #include "net_driver.h"
@@ -548,13 +549,13 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
                vf->efx->type->filter_table_probe(vf->efx);
                up_write(&vf->efx->filter_sem);
                efx_net_open(vf->efx->net_dev);
-               netif_device_attach(vf->efx->net_dev);
+               efx_device_attach_if_not_resetting(vf->efx);
        }
 
        return 0;
 
 fail:
-       memset(vf->mac, 0, ETH_ALEN);
+       eth_zero_addr(vf->mac);
        return rc;
 }
 
@@ -666,7 +667,7 @@ restore_filters:
                if (rc2)
                        goto reset_nic;
 
-               netif_device_attach(vf->efx->net_dev);
+               efx_device_attach_if_not_resetting(vf->efx);
        }
        return rc;
 
@@ -760,17 +761,3 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
 
        return 0;
 }
-
-int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
-                                   struct netdev_phys_item_id *ppid)
-{
-       struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
-       if (!is_valid_ether_addr(nic_data->port_id))
-               return -EOPNOTSUPP;
-
-       ppid->id_len = ETH_ALEN;
-       memcpy(ppid->id, nic_data->port_id, ppid->id_len);
-
-       return 0;
-}
index 9ceb7ef0a210622e8d3e7c552ccae0cf0e01c245..2aa444ed42de5c7c77a77d79459c86a186e64d74 100644 (file)
@@ -56,9 +56,6 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
 int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
                                     int link_state);
 
-int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
-                                   struct netdev_phys_item_id *ppid);
-
 int efx_ef10_vswitching_probe_pf(struct efx_nic *efx);
 int efx_ef10_vswitching_probe_vf(struct efx_nic *efx);
 int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
index 5a5dcad8c49acd121b251558fc0a14d2023c0f6e..334bcc6df6b2ba90a43da4baf7b44cc5ebfa1bac 100644 (file)
 #include <linux/aer.h>
 #include <linux/interrupt.h>
 #include "net_driver.h"
+#include <net/gre.h>
+#include <net/udp_tunnel.h>
 #include "efx.h"
 #include "nic.h"
 #include "selftest.h"
 #include "sriov.h"
 
 #include "mcdi.h"
+#include "mcdi_pcol.h"
 #include "workarounds.h"
 
 /**************************************************************************
@@ -88,6 +91,21 @@ const char *const efx_reset_type_names[] = {
        [RESET_TYPE_MCDI_TIMEOUT]       = "MCDI_TIMEOUT (FLR)",
 };
 
+/* UDP tunnel type names */
+static const char *const efx_udp_tunnel_type_names[] = {
+       [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan",
+       [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve",
+};
+
+void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen)
+{
+       if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) &&
+           efx_udp_tunnel_type_names[type] != NULL)
+               snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]);
+       else
+               snprintf(buf, buflen, "type %d", type);
+}
+
 /* Reset workqueue. If any NIC has a hardware failure then a reset will be
  * queued onto this work queue. This is not a per-nic work queue, because
  * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -308,9 +326,6 @@ static int efx_poll(struct napi_struct *napi, int budget)
        struct efx_nic *efx = channel->efx;
        int spent;
 
-       if (!efx_channel_lock_napi(channel))
-               return budget;
-
        netif_vdbg(efx, intr, efx->net_dev,
                   "channel %d NAPI poll executing on CPU %d\n",
                   channel->channel, raw_smp_processor_id());
@@ -331,11 +346,10 @@ static int efx_poll(struct napi_struct *napi, int budget)
                 * since efx_nic_eventq_read_ack() will have no effect if
                 * interrupts have already been disabled.
                 */
-               napi_complete(napi);
-               efx_nic_eventq_read_ack(channel);
+               if (napi_complete_done(napi, spent))
+                       efx_nic_eventq_read_ack(channel);
        }
 
-       efx_channel_unlock_napi(channel);
        return spent;
 }
 
@@ -391,7 +405,6 @@ void efx_start_eventq(struct efx_channel *channel)
        channel->enabled = true;
        smp_wmb();
 
-       efx_channel_enable(channel);
        napi_enable(&channel->napi_str);
        efx_nic_eventq_read_ack(channel);
 }
@@ -403,8 +416,6 @@ void efx_stop_eventq(struct efx_channel *channel)
                return;
 
        napi_disable(&channel->napi_str);
-       while (!efx_channel_disable(channel))
-               usleep_range(1000, 20000);
        channel->enabled = false;
 }
 
@@ -865,7 +876,7 @@ out:
                efx_schedule_reset(efx, RESET_TYPE_DISABLE);
        } else {
                efx_start_all(efx);
-               netif_device_attach(efx->net_dev);
+               efx_device_attach_if_not_resetting(efx);
        }
        return rc;
 
@@ -1409,9 +1420,12 @@ static int efx_probe_interrupts(struct efx_nic *efx)
                                           xentries, 1, n_channels);
                if (rc < 0) {
                        /* Fall back to single channel MSI */
-                       efx->interrupt_mode = EFX_INT_MODE_MSI;
                        netif_err(efx, drv, efx->net_dev,
                                  "could not enable MSI-X\n");
+                       if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
+                               efx->interrupt_mode = EFX_INT_MODE_MSI;
+                       else
+                               return rc;
                } else if (rc < n_channels) {
                        netif_err(efx, drv, efx->net_dev,
                                  "WARNING: Insufficient MSI-X vectors"
@@ -1454,7 +1468,10 @@ static int efx_probe_interrupts(struct efx_nic *efx)
                } else {
                        netif_err(efx, drv, efx->net_dev,
                                  "could not enable MSI\n");
-                       efx->interrupt_mode = EFX_INT_MODE_LEGACY;
+                       if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
+                               efx->interrupt_mode = EFX_INT_MODE_LEGACY;
+                       else
+                               return rc;
                }
        }
 
@@ -2088,7 +2105,6 @@ static void efx_init_napi_channel(struct efx_channel *channel)
        channel->napi_dev = efx->net_dev;
        netif_napi_add(channel->napi_dev, &channel->napi_str,
                       efx_poll, napi_weight);
-       efx_channel_busy_poll_init(channel);
 }
 
 static void efx_init_napi(struct efx_nic *efx)
@@ -2138,37 +2154,6 @@ static void efx_netpoll(struct net_device *net_dev)
 
 #endif
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static int efx_busy_poll(struct napi_struct *napi)
-{
-       struct efx_channel *channel =
-               container_of(napi, struct efx_channel, napi_str);
-       struct efx_nic *efx = channel->efx;
-       int budget = 4;
-       int old_rx_packets, rx_packets;
-
-       if (!netif_running(efx->net_dev))
-               return LL_FLUSH_FAILED;
-
-       if (!efx_channel_try_lock_poll(channel))
-               return LL_FLUSH_BUSY;
-
-       old_rx_packets = channel->rx_queue.rx_packets;
-       efx_process_channel(channel, budget);
-
-       rx_packets = channel->rx_queue.rx_packets - old_rx_packets;
-
-       /* There is no race condition with NAPI here.
-        * NAPI will automatically be rescheduled if it yielded during busy
-        * polling, because it was not able to take the lock and thus returned
-        * the full budget.
-        */
-       efx_channel_unlock_poll(channel);
-
-       return rx_packets;
-}
-#endif
-
 /**************************************************************************
  *
  * Kernel net device interface
@@ -2197,6 +2182,8 @@ int efx_net_open(struct net_device *net_dev)
        efx_link_status_changed(efx);
 
        efx_start_all(efx);
+       if (efx->state == STATE_DISABLED || efx->reset_pending)
+               netif_device_detach(efx->net_dev);
        efx_selftest_async_start(efx);
        return 0;
 }
@@ -2219,16 +2206,14 @@ int efx_net_stop(struct net_device *net_dev)
 }
 
 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
-static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
-                                              struct rtnl_link_stats64 *stats)
+static void efx_net_stats(struct net_device *net_dev,
+                         struct rtnl_link_stats64 *stats)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
 
        spin_lock_bh(&efx->stats_lock);
        efx->type->update_stats(efx, NULL, stats);
        spin_unlock_bh(&efx->stats_lock);
-
-       return stats;
 }
 
 /* Context: netif_tx_lock held, BHs disabled. */
@@ -2265,7 +2250,7 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
        mutex_unlock(&efx->mac_lock);
 
        efx_start_all(efx);
-       netif_device_attach(efx->net_dev);
+       efx_device_attach_if_not_resetting(efx);
        return 0;
 }
 
@@ -2336,6 +2321,27 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
        return 0;
 }
 
+static int efx_get_phys_port_id(struct net_device *net_dev,
+                               struct netdev_phys_item_id *ppid)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       if (efx->type->get_phys_port_id)
+               return efx->type->get_phys_port_id(efx, ppid);
+       else
+               return -EOPNOTSUPP;
+}
+
+static int efx_get_phys_port_name(struct net_device *net_dev,
+                                 char *name, size_t len)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       if (snprintf(name, len, "p%u", efx->port_num) >= len)
+               return -EINVAL;
+       return 0;
+}
+
 static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
@@ -2356,6 +2362,52 @@ static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vi
                return -EOPNOTSUPP;
 }
 
+static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in)
+{
+       switch (in) {
+       case UDP_TUNNEL_TYPE_VXLAN:
+               return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
+       case UDP_TUNNEL_TYPE_GENEVE:
+               return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
+       default:
+               return -1;
+       }
+}
+
+static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+       struct efx_nic *efx = netdev_priv(dev);
+       struct efx_udp_tunnel tnl;
+       int efx_tunnel_type;
+
+       efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
+       if (efx_tunnel_type < 0)
+               return;
+
+       tnl.type = (u16)efx_tunnel_type;
+       tnl.port = ti->port;
+
+       if (efx->type->udp_tnl_add_port)
+               (void)efx->type->udp_tnl_add_port(efx, tnl);
+}
+
+static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+       struct efx_nic *efx = netdev_priv(dev);
+       struct efx_udp_tunnel tnl;
+       int efx_tunnel_type;
+
+       efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
+       if (efx_tunnel_type < 0)
+               return;
+
+       tnl.type = (u16)efx_tunnel_type;
+       tnl.port = ti->port;
+
+       if (efx->type->udp_tnl_add_port)
+               (void)efx->type->udp_tnl_del_port(efx, tnl);
+}
+
 static const struct net_device_ops efx_netdev_ops = {
        .ndo_open               = efx_net_open,
        .ndo_stop               = efx_net_stop,
@@ -2376,18 +2428,18 @@ static const struct net_device_ops efx_netdev_ops = {
        .ndo_set_vf_spoofchk    = efx_sriov_set_vf_spoofchk,
        .ndo_get_vf_config      = efx_sriov_get_vf_config,
        .ndo_set_vf_link_state  = efx_sriov_set_vf_link_state,
-       .ndo_get_phys_port_id   = efx_sriov_get_phys_port_id,
 #endif
+       .ndo_get_phys_port_id   = efx_get_phys_port_id,
+       .ndo_get_phys_port_name = efx_get_phys_port_name,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = efx_netpoll,
 #endif
        .ndo_setup_tc           = efx_setup_tc,
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       .ndo_busy_poll          = efx_busy_poll,
-#endif
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = efx_filter_rfs,
 #endif
+       .ndo_udp_tunnel_add     = efx_udp_tunnel_add,
+       .ndo_udp_tunnel_del     = efx_udp_tunnel_del,
 };
 
 static void efx_update_name(struct efx_nic *efx)
@@ -2627,6 +2679,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
 
        efx_start_all(efx);
 
+       if (efx->type->udp_tnl_push_ports)
+               efx->type->udp_tnl_push_ports(efx);
+
        return 0;
 
 fail:
@@ -2691,7 +2746,7 @@ out:
                efx->state = STATE_DISABLED;
        } else {
                netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
-               netif_device_attach(efx->net_dev);
+               efx_device_attach_if_not_resetting(efx);
        }
        return rc;
 }
@@ -2888,7 +2943,7 @@ static const struct efx_phy_operations efx_dummy_phy_operations = {
 static int efx_init_struct(struct efx_nic *efx,
                           struct pci_dev *pci_dev, struct net_device *net_dev)
 {
-       int i;
+       int rc = -ENOMEM, i;
 
        /* Initialise common structures */
        INIT_LIST_HEAD(&efx->node);
@@ -2929,8 +2984,15 @@ static int efx_init_struct(struct efx_nic *efx,
        }
 
        /* Higher numbered interrupt modes are less capable! */
+       if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
+                        efx->type->min_interrupt_mode)) {
+               rc = -EIO;
+               goto fail;
+       }
        efx->interrupt_mode = max(efx->type->max_interrupt_mode,
                                  interrupt_mode);
+       efx->interrupt_mode = min(efx->type->min_interrupt_mode,
+                                 interrupt_mode);
 
        /* Would be good to use the net_dev name, but we're too early */
        snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
@@ -2943,7 +3005,7 @@ static int efx_init_struct(struct efx_nic *efx,
 
 fail:
        efx_fini_struct(efx);
-       return -ENOMEM;
+       return rc;
 }
 
 static void efx_fini_struct(struct efx_nic *efx)
@@ -3158,6 +3220,51 @@ static int efx_pci_probe_main(struct efx_nic *efx)
        return rc;
 }
 
+static int efx_pci_probe_post_io(struct efx_nic *efx)
+{
+       struct net_device *net_dev = efx->net_dev;
+       int rc = efx_pci_probe_main(efx);
+
+       if (rc)
+               return rc;
+
+       if (efx->type->sriov_init) {
+               rc = efx->type->sriov_init(efx);
+               if (rc)
+                       netif_err(efx, probe, efx->net_dev,
+                                 "SR-IOV can't be enabled rc %d\n", rc);
+       }
+
+       /* Determine netdevice features */
+       net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
+                             NETIF_F_TSO | NETIF_F_RXCSUM);
+       if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
+               net_dev->features |= NETIF_F_TSO6;
+       /* Check whether device supports TSO */
+       if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
+               net_dev->features &= ~NETIF_F_ALL_TSO;
+       /* Mask for features that also apply to VLAN devices */
+       net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
+                                  NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
+                                  NETIF_F_RXCSUM);
+
+       net_dev->hw_features = net_dev->features & ~efx->fixed_features;
+
+       /* Disable VLAN filtering by default.  It may be enforced if
+        * the feature is fixed (i.e. VLAN filters are required to
+        * receive VLAN tagged packets due to vPort restrictions).
+        */
+       net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+       net_dev->features |= efx->fixed_features;
+
+       rc = efx_register_netdev(efx);
+       if (!rc)
+               return 0;
+
+       efx_pci_remove_main(efx);
+       return rc;
+}
+
 /* NIC initialisation
  *
  * This is called at module load (or hotplug insertion,
@@ -3200,42 +3307,28 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
        if (rc)
                goto fail2;
 
-       rc = efx_pci_probe_main(efx);
+       rc = efx_pci_probe_post_io(efx);
+       if (rc) {
+               /* On failure, retry once immediately.
+                * If we aborted probe due to a scheduled reset, dismiss it.
+                */
+               efx->reset_pending = 0;
+               rc = efx_pci_probe_post_io(efx);
+               if (rc) {
+                       /* On another failure, retry once more
+                        * after a 50-305ms delay.
+                        */
+                       unsigned char r;
+
+                       get_random_bytes(&r, 1);
+                       msleep((unsigned int)r + 50);
+                       efx->reset_pending = 0;
+                       rc = efx_pci_probe_post_io(efx);
+               }
+       }
        if (rc)
                goto fail3;
 
-       net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
-                             NETIF_F_TSO | NETIF_F_RXCSUM);
-       if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
-               net_dev->features |= NETIF_F_TSO6;
-       /* Check whether device supports TSO */
-       if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
-               net_dev->features &= ~NETIF_F_ALL_TSO;
-       /* Mask for features that also apply to VLAN devices */
-       net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
-                                  NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
-                                  NETIF_F_RXCSUM);
-
-       net_dev->hw_features = net_dev->features & ~efx->fixed_features;
-
-       /* Disable VLAN filtering by default.  It may be enforced if
-        * the feature is fixed (i.e. VLAN filters are required to
-        * receive VLAN tagged packets due to vPort restrictions).
-        */
-       net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
-       net_dev->features |= efx->fixed_features;
-
-       rc = efx_register_netdev(efx);
-       if (rc)
-               goto fail4;
-
-       if (efx->type->sriov_init) {
-               rc = efx->type->sriov_init(efx);
-               if (rc)
-                       netif_err(efx, probe, efx->net_dev,
-                                 "SR-IOV can't be enabled rc %d\n", rc);
-       }
-
        netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
 
        /* Try to create MTDs, but allow this to fail */
@@ -3252,10 +3345,11 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
                             "PCIE error reporting unavailable (%d).\n",
                             rc);
 
+       if (efx->type->udp_tnl_push_ports)
+               efx->type->udp_tnl_push_ports(efx);
+
        return 0;
 
- fail4:
-       efx_pci_remove_main(efx);
  fail3:
        efx_fini_io(efx);
  fail2:
@@ -3325,7 +3419,7 @@ static int efx_pm_thaw(struct device *dev)
 
                efx_start_all(efx);
 
-               netif_device_attach(efx->net_dev);
+               efx_device_attach_if_not_resetting(efx);
 
                efx->state = STATE_READY;
 
@@ -3585,3 +3679,4 @@ MODULE_AUTHOR("Solarflare Communications and "
 MODULE_DESCRIPTION("Solarflare network driver");
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, efx_pci_table);
+MODULE_VERSION(EFX_DRIVER_VERSION);
index 342ae16e1f2dde07fa03c216ad09c32ac03a82fd..ee14662415c5dfc827a02cb577794af495b0433c 100644 (file)
@@ -276,6 +276,12 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
        netif_tx_unlock_bh(dev);
 }
 
+static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx)
+{
+       if ((efx->state != STATE_DISABLED) && !efx->reset_pending)
+               netif_device_attach(efx->net_dev);
+}
+
 static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
 {
        if (WARN_ON(down_read_trylock(sem))) {
index 18ebaea44e8257255c6a910958e4b00466600903..3747b564411073350c930305c941df8e713edd8c 100644 (file)
@@ -77,6 +77,11 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
+       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
+       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
+       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
+       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
+       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
@@ -1278,15 +1283,29 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
        return (efx->n_rx_channels == 1) ? 0 : ARRAY_SIZE(efx->rx_indir_table);
 }
 
+static u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       return efx->type->rx_hash_key_size;
+}
+
 static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
                                u8 *hfunc)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
+       int rc;
+
+       rc = efx->type->rx_pull_rss_config(efx);
+       if (rc)
+               return rc;
 
        if (hfunc)
                *hfunc = ETH_RSS_HASH_TOP;
        if (indir)
                memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
+       if (key)
+               memcpy(key, efx->rx_hash_key, efx->type->rx_hash_key_size);
        return 0;
 }
 
@@ -1295,14 +1314,18 @@ static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
 {
        struct efx_nic *efx = netdev_priv(net_dev);
 
-       /* We do not allow change in unsupported parameters */
-       if (key ||
-           (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+       /* Hash function is Toeplitz, cannot be changed */
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
                return -EOPNOTSUPP;
-       if (!indir)
+       if (!indir && !key)
                return 0;
 
-       return efx->type->rx_push_rss_config(efx, true, indir);
+       if (!key)
+               key = efx->rx_hash_key;
+       if (!indir)
+               indir = efx->rx_indir_table;
+
+       return efx->type->rx_push_rss_config(efx, true, indir, key);
 }
 
 static int efx_ethtool_get_ts_info(struct net_device *net_dev,
@@ -1377,6 +1400,7 @@ const struct ethtool_ops efx_ethtool_ops = {
        .get_rxnfc              = efx_ethtool_get_rxnfc,
        .set_rxnfc              = efx_ethtool_set_rxnfc,
        .get_rxfh_indir_size    = efx_ethtool_get_rxfh_indir_size,
+       .get_rxfh_key_size      = efx_ethtool_get_rxfh_key_size,
        .get_rxfh               = efx_ethtool_get_rxfh,
        .set_rxfh               = efx_ethtool_set_rxfh,
        .get_ts_info            = efx_ethtool_get_ts_info,
index 5c5cb3c4c12e55ce6e19bf47e70392bf7b9c4348..f5e5cd1659a148fb63ce2078ef13a5ae12d048bc 100644 (file)
@@ -304,9 +304,6 @@ static int ef4_poll(struct napi_struct *napi, int budget)
        struct ef4_nic *efx = channel->efx;
        int spent;
 
-       if (!ef4_channel_lock_napi(channel))
-               return budget;
-
        netif_vdbg(efx, intr, efx->net_dev,
                   "channel %d NAPI poll executing on CPU %d\n",
                   channel->channel, raw_smp_processor_id());
@@ -327,11 +324,10 @@ static int ef4_poll(struct napi_struct *napi, int budget)
                 * since ef4_nic_eventq_read_ack() will have no effect if
                 * interrupts have already been disabled.
                 */
-               napi_complete(napi);
+               napi_complete_done(napi, spent);
                ef4_nic_eventq_read_ack(channel);
        }
 
-       ef4_channel_unlock_napi(channel);
        return spent;
 }
 
@@ -387,7 +383,6 @@ void ef4_start_eventq(struct ef4_channel *channel)
        channel->enabled = true;
        smp_wmb();
 
-       ef4_channel_enable(channel);
        napi_enable(&channel->napi_str);
        ef4_nic_eventq_read_ack(channel);
 }
@@ -399,8 +394,6 @@ void ef4_stop_eventq(struct ef4_channel *channel)
                return;
 
        napi_disable(&channel->napi_str);
-       while (!ef4_channel_disable(channel))
-               usleep_range(1000, 20000);
        channel->enabled = false;
 }
 
@@ -986,7 +979,7 @@ void ef4_mac_reconfigure(struct ef4_nic *efx)
 
 /* Push loopback/power/transmit disable settings to the PHY, and reconfigure
  * the MAC appropriately. All other PHY configuration changes are pushed
- * through phy_op->set_settings(), and pushed asynchronously to the MAC
+ * through phy_op->set_link_ksettings(), and pushed asynchronously to the MAC
  * through ef4_monitor().
  *
  * Callers must hold the mac_lock
@@ -2029,7 +2022,6 @@ static void ef4_init_napi_channel(struct ef4_channel *channel)
        channel->napi_dev = efx->net_dev;
        netif_napi_add(channel->napi_dev, &channel->napi_str,
                       ef4_poll, napi_weight);
-       ef4_channel_busy_poll_init(channel);
 }
 
 static void ef4_init_napi(struct ef4_nic *efx)
@@ -2079,37 +2071,6 @@ static void ef4_netpoll(struct net_device *net_dev)
 
 #endif
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static int ef4_busy_poll(struct napi_struct *napi)
-{
-       struct ef4_channel *channel =
-               container_of(napi, struct ef4_channel, napi_str);
-       struct ef4_nic *efx = channel->efx;
-       int budget = 4;
-       int old_rx_packets, rx_packets;
-
-       if (!netif_running(efx->net_dev))
-               return LL_FLUSH_FAILED;
-
-       if (!ef4_channel_try_lock_poll(channel))
-               return LL_FLUSH_BUSY;
-
-       old_rx_packets = channel->rx_queue.rx_packets;
-       ef4_process_channel(channel, budget);
-
-       rx_packets = channel->rx_queue.rx_packets - old_rx_packets;
-
-       /* There is no race condition with NAPI here.
-        * NAPI will automatically be rescheduled if it yielded during busy
-        * polling, because it was not able to take the lock and thus returned
-        * the full budget.
-        */
-       ef4_channel_unlock_poll(channel);
-
-       return rx_packets;
-}
-#endif
-
 /**************************************************************************
  *
  * Kernel net device interface
@@ -2158,16 +2119,14 @@ int ef4_net_stop(struct net_device *net_dev)
 }
 
 /* Context: process, dev_base_lock or RTNL held, non-blocking. */
-static struct rtnl_link_stats64 *ef4_net_stats(struct net_device *net_dev,
-                                              struct rtnl_link_stats64 *stats)
+static void ef4_net_stats(struct net_device *net_dev,
+                         struct rtnl_link_stats64 *stats)
 {
        struct ef4_nic *efx = netdev_priv(net_dev);
 
        spin_lock_bh(&efx->stats_lock);
        efx->type->update_stats(efx, NULL, stats);
        spin_unlock_bh(&efx->stats_lock);
-
-       return stats;
 }
 
 /* Context: netif_tx_lock held, BHs disabled. */
@@ -2291,9 +2250,6 @@ static const struct net_device_ops ef4_netdev_ops = {
        .ndo_poll_controller = ef4_netpoll,
 #endif
        .ndo_setup_tc           = ef4_setup_tc,
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       .ndo_busy_poll          = ef4_busy_poll,
-#endif
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = ef4_filter_rfs,
 #endif
@@ -3348,3 +3304,4 @@ MODULE_AUTHOR("Solarflare Communications and "
 MODULE_DESCRIPTION("Solarflare Falcon network driver");
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, ef4_pci_table);
+MODULE_VERSION(EF4_DRIVER_VERSION);
index 8e1929b01a3216d3ca9546c2e77e11e14272f645..56049157a5af660d0a4aa752eb33badc6337bc5a 100644 (file)
@@ -115,44 +115,47 @@ static int ef4_ethtool_phys_id(struct net_device *net_dev,
 }
 
 /* This must be called with rtnl_lock held. */
-static int ef4_ethtool_get_settings(struct net_device *net_dev,
-                                   struct ethtool_cmd *ecmd)
+static int
+ef4_ethtool_get_link_ksettings(struct net_device *net_dev,
+                              struct ethtool_link_ksettings *cmd)
 {
        struct ef4_nic *efx = netdev_priv(net_dev);
        struct ef4_link_state *link_state = &efx->link_state;
 
        mutex_lock(&efx->mac_lock);
-       efx->phy_op->get_settings(efx, ecmd);
+       efx->phy_op->get_link_ksettings(efx, cmd);
        mutex_unlock(&efx->mac_lock);
 
        /* Both MACs support pause frames (bidirectional and respond-only) */
-       ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
 
        if (LOOPBACK_INTERNAL(efx)) {
-               ethtool_cmd_speed_set(ecmd, link_state->speed);
-               ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
+               cmd->base.speed = link_state->speed;
+               cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
        }
 
        return 0;
 }
 
 /* This must be called with rtnl_lock held. */
-static int ef4_ethtool_set_settings(struct net_device *net_dev,
-                                   struct ethtool_cmd *ecmd)
+static int
+ef4_ethtool_set_link_ksettings(struct net_device *net_dev,
+                              const struct ethtool_link_ksettings *cmd)
 {
        struct ef4_nic *efx = netdev_priv(net_dev);
        int rc;
 
        /* GMAC does not support 1000Mbps HD */
-       if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
-           (ecmd->duplex != DUPLEX_FULL)) {
+       if ((cmd->base.speed == SPEED_1000) &&
+           (cmd->base.duplex != DUPLEX_FULL)) {
                netif_dbg(efx, drv, efx->net_dev,
                          "rejecting unsupported 1000Mbps HD setting\n");
                return -EINVAL;
        }
 
        mutex_lock(&efx->mac_lock);
-       rc = efx->phy_op->set_settings(efx, ecmd);
+       rc = efx->phy_op->set_link_ksettings(efx, cmd);
        mutex_unlock(&efx->mac_lock);
        return rc;
 }
@@ -1310,8 +1313,6 @@ static int ef4_ethtool_get_module_info(struct net_device *net_dev,
 }
 
 const struct ethtool_ops ef4_ethtool_ops = {
-       .get_settings           = ef4_ethtool_get_settings,
-       .set_settings           = ef4_ethtool_set_settings,
        .get_drvinfo            = ef4_ethtool_get_drvinfo,
        .get_regs_len           = ef4_ethtool_get_regs_len,
        .get_regs               = ef4_ethtool_get_regs,
@@ -1340,4 +1341,6 @@ const struct ethtool_ops ef4_ethtool_ops = {
        .set_rxfh               = ef4_ethtool_set_rxfh,
        .get_module_info        = ef4_ethtool_get_module_info,
        .get_module_eeprom      = ef4_ethtool_get_module_eeprom,
+       .get_link_ksettings     = ef4_ethtool_get_link_ksettings,
+       .set_link_ksettings     = ef4_ethtool_set_link_ksettings,
 };
index e7d7c09296aa079361611d9bd6f83d78ee4b4d96..ee0713f03d01f77288b7b866acfc22937a175983 100644 (file)
@@ -226,33 +226,45 @@ void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx,
 }
 
 /**
- * ef4_mdio_set_settings - Set (some of) the PHY settings over MDIO.
+ * ef4_mdio_set_link_ksettings - Set (some of) the PHY settings over MDIO.
  * @efx:               Efx NIC
- * @ecmd:              New settings
+ * @cmd:               New settings
  */
-int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
+int ef4_mdio_set_link_ksettings(struct ef4_nic *efx,
+                               const struct ethtool_link_ksettings *cmd)
 {
-       struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET };
-
-       efx->phy_op->get_settings(efx, &prev);
-
-       if (ecmd->advertising == prev.advertising &&
-           ethtool_cmd_speed(ecmd) == ethtool_cmd_speed(&prev) &&
-           ecmd->duplex == prev.duplex &&
-           ecmd->port == prev.port &&
-           ecmd->autoneg == prev.autoneg)
+       struct ethtool_link_ksettings prev = {
+               .base.cmd = ETHTOOL_GLINKSETTINGS
+       };
+       u32 prev_advertising, advertising;
+       u32 prev_supported;
+
+       efx->phy_op->get_link_ksettings(efx, &prev);
+
+       ethtool_convert_link_mode_to_legacy_u32(&advertising,
+                                               cmd->link_modes.advertising);
+       ethtool_convert_link_mode_to_legacy_u32(&prev_advertising,
+                                               prev.link_modes.advertising);
+       ethtool_convert_link_mode_to_legacy_u32(&prev_supported,
+                                               prev.link_modes.supported);
+
+       if (advertising == prev_advertising &&
+           cmd->base.speed == prev.base.speed &&
+           cmd->base.duplex == prev.base.duplex &&
+           cmd->base.port == prev.base.port &&
+           cmd->base.autoneg == prev.base.autoneg)
                return 0;
 
        /* We can only change these settings for -T PHYs */
-       if (prev.port != PORT_TP || ecmd->port != PORT_TP)
+       if (prev.base.port != PORT_TP || cmd->base.port != PORT_TP)
                return -EINVAL;
 
        /* Check that PHY supports these settings */
-       if (!ecmd->autoneg ||
-           (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported)
+       if (!cmd->base.autoneg ||
+           (advertising | SUPPORTED_Autoneg) & ~prev_supported)
                return -EINVAL;
 
-       ef4_link_set_advertising(efx, ecmd->advertising | ADVERTISED_Autoneg);
+       ef4_link_set_advertising(efx, advertising | ADVERTISED_Autoneg);
        ef4_mdio_an_reconfigure(efx);
        return 0;
 }
index 885cf7a834a6052f530763f751b945918db1d7cb..53cb5cc4ad376c29813a126030c65e08e9310a08 100644 (file)
@@ -83,7 +83,8 @@ void ef4_mdio_set_mmds_lpower(struct ef4_nic *efx, int low_power,
                              unsigned int mmd_mask);
 
 /* Set (some of) the PHY settings over MDIO */
-int ef4_mdio_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd);
+int ef4_mdio_set_link_ksettings(struct ef4_nic *efx,
+                               const struct ethtool_link_ksettings *cmd);
 
 /* Push advertising flags and restart autonegotiation */
 void ef4_mdio_an_reconfigure(struct ef4_nic *efx);
index 210b28f7d2a1b2bbc295317041e1093cf7682ae5..37a8bdf32206a44b82ff25d4111412062dd04a22 100644 (file)
@@ -448,131 +448,6 @@ struct ef4_channel {
        struct ef4_tx_queue tx_queue[EF4_TXQ_TYPES];
 };
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-enum ef4_channel_busy_poll_state {
-       EF4_CHANNEL_STATE_IDLE = 0,
-       EF4_CHANNEL_STATE_NAPI = BIT(0),
-       EF4_CHANNEL_STATE_NAPI_REQ_BIT = 1,
-       EF4_CHANNEL_STATE_NAPI_REQ = BIT(1),
-       EF4_CHANNEL_STATE_POLL_BIT = 2,
-       EF4_CHANNEL_STATE_POLL = BIT(2),
-       EF4_CHANNEL_STATE_DISABLE_BIT = 3,
-};
-
-static inline void ef4_channel_busy_poll_init(struct ef4_channel *channel)
-{
-       WRITE_ONCE(channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE);
-}
-
-/* Called from the device poll routine to get ownership of a channel. */
-static inline bool ef4_channel_lock_napi(struct ef4_channel *channel)
-{
-       unsigned long prev, old = READ_ONCE(channel->busy_poll_state);
-
-       while (1) {
-               switch (old) {
-               case EF4_CHANNEL_STATE_POLL:
-                       /* Ensure ef4_channel_try_lock_poll() wont starve us */
-                       set_bit(EF4_CHANNEL_STATE_NAPI_REQ_BIT,
-                               &channel->busy_poll_state);
-                       /* fallthrough */
-               case EF4_CHANNEL_STATE_POLL | EF4_CHANNEL_STATE_NAPI_REQ:
-                       return false;
-               default:
-                       break;
-               }
-               prev = cmpxchg(&channel->busy_poll_state, old,
-                              EF4_CHANNEL_STATE_NAPI);
-               if (unlikely(prev != old)) {
-                       /* This is likely to mean we've just entered polling
-                        * state. Go back round to set the REQ bit.
-                        */
-                       old = prev;
-                       continue;
-               }
-               return true;
-       }
-}
-
-static inline void ef4_channel_unlock_napi(struct ef4_channel *channel)
-{
-       /* Make sure write has completed from ef4_channel_lock_napi() */
-       smp_wmb();
-       WRITE_ONCE(channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE);
-}
-
-/* Called from ef4_busy_poll(). */
-static inline bool ef4_channel_try_lock_poll(struct ef4_channel *channel)
-{
-       return cmpxchg(&channel->busy_poll_state, EF4_CHANNEL_STATE_IDLE,
-                       EF4_CHANNEL_STATE_POLL) == EF4_CHANNEL_STATE_IDLE;
-}
-
-static inline void ef4_channel_unlock_poll(struct ef4_channel *channel)
-{
-       clear_bit_unlock(EF4_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
-}
-
-static inline bool ef4_channel_busy_polling(struct ef4_channel *channel)
-{
-       return test_bit(EF4_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
-}
-
-static inline void ef4_channel_enable(struct ef4_channel *channel)
-{
-       clear_bit_unlock(EF4_CHANNEL_STATE_DISABLE_BIT,
-                        &channel->busy_poll_state);
-}
-
-/* Stop further polling or napi access.
- * Returns false if the channel is currently busy polling.
- */
-static inline bool ef4_channel_disable(struct ef4_channel *channel)
-{
-       set_bit(EF4_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state);
-       /* Implicit barrier in ef4_channel_busy_polling() */
-       return !ef4_channel_busy_polling(channel);
-}
-
-#else /* CONFIG_NET_RX_BUSY_POLL */
-
-static inline void ef4_channel_busy_poll_init(struct ef4_channel *channel)
-{
-}
-
-static inline bool ef4_channel_lock_napi(struct ef4_channel *channel)
-{
-       return true;
-}
-
-static inline void ef4_channel_unlock_napi(struct ef4_channel *channel)
-{
-}
-
-static inline bool ef4_channel_try_lock_poll(struct ef4_channel *channel)
-{
-       return false;
-}
-
-static inline void ef4_channel_unlock_poll(struct ef4_channel *channel)
-{
-}
-
-static inline bool ef4_channel_busy_polling(struct ef4_channel *channel)
-{
-       return false;
-}
-
-static inline void ef4_channel_enable(struct ef4_channel *channel)
-{
-}
-
-static inline bool ef4_channel_disable(struct ef4_channel *channel)
-{
-       return true;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 /**
  * struct ef4_msi_context - Context for each MSI
  * @efx: The associated NIC
@@ -684,8 +559,8 @@ static inline bool ef4_link_state_equal(const struct ef4_link_state *left,
  * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
  * @poll: Update @link_state and report whether it changed.
  *     Serialised by the mac_lock.
- * @get_settings: Get ethtool settings. Serialised by the mac_lock.
- * @set_settings: Set ethtool settings. Serialised by the mac_lock.
+ * @get_link_ksettings: Get ethtool settings. Serialised by the mac_lock.
+ * @set_link_ksettings: Set ethtool settings. Serialised by the mac_lock.
  * @set_npage_adv: Set abilities advertised in (Extended) Next Page
  *     (only needed where AN bit is set in mmds)
  * @test_alive: Test that PHY is 'alive' (online)
@@ -700,10 +575,10 @@ struct ef4_phy_operations {
        void (*remove) (struct ef4_nic *efx);
        int (*reconfigure) (struct ef4_nic *efx);
        bool (*poll) (struct ef4_nic *efx);
-       void (*get_settings) (struct ef4_nic *efx,
-                             struct ethtool_cmd *ecmd);
-       int (*set_settings) (struct ef4_nic *efx,
-                            struct ethtool_cmd *ecmd);
+       void (*get_link_ksettings)(struct ef4_nic *efx,
+                                  struct ethtool_link_ksettings *cmd);
+       int (*set_link_ksettings)(struct ef4_nic *efx,
+                                 const struct ethtool_link_ksettings *cmd);
        void (*set_npage_adv) (struct ef4_nic *efx, u32);
        int (*test_alive) (struct ef4_nic *efx);
        const char *(*test_name) (struct ef4_nic *efx, unsigned int index);
index d293316525488692cf2bbea445cdf80b467736ef..f5e0f18d4ea8879db101d8a5d4a237826241f95c 100644 (file)
@@ -437,9 +437,10 @@ static int qt202x_phy_reconfigure(struct ef4_nic *efx)
        return 0;
 }
 
-static void qt202x_phy_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
+static void qt202x_phy_get_link_ksettings(struct ef4_nic *efx,
+                                         struct ethtool_link_ksettings *cmd)
 {
-       mdio45_ethtool_gset(&efx->mdio, ecmd);
+       mdio45_ethtool_ksettings_get(&efx->mdio, cmd);
 }
 
 static void qt202x_phy_remove(struct ef4_nic *efx)
@@ -487,8 +488,8 @@ const struct ef4_phy_operations falcon_qt202x_phy_ops = {
        .poll            = qt202x_phy_poll,
        .fini            = ef4_port_dummy_op_void,
        .remove          = qt202x_phy_remove,
-       .get_settings    = qt202x_phy_get_settings,
-       .set_settings    = ef4_mdio_set_settings,
+       .get_link_ksettings = qt202x_phy_get_link_ksettings,
+       .set_link_ksettings = ef4_mdio_set_link_ksettings,
        .test_alive      = ef4_mdio_test_alive,
        .get_module_eeprom = qt202x_phy_get_module_eeprom,
        .get_module_info = qt202x_phy_get_module_info,
index 250458cbdb4dcdd3700bdf520cee8c9865c4abdb..6a8406dc0c2b47dbdb4c2466e028ff04fd35e1c9 100644 (file)
@@ -674,8 +674,7 @@ void __ef4_rx_packet(struct ef4_channel *channel)
        if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
                rx_buf->flags &= ~EF4_RX_PKT_CSUMMED;
 
-       if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb &&
-           !ef4_channel_busy_polling(channel))
+       if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb)
                ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
        else
                ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
index acc548a1c4d616b7431d2f4e76e420abd51d3749..ff9b4e2b590c096d5101dee2fc7621ea7903a9d5 100644 (file)
@@ -351,9 +351,6 @@ static int tenxpress_phy_reconfigure(struct ef4_nic *efx)
        return 0;
 }
 
-static void
-tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd);
-
 /* Poll for link state changes */
 static bool tenxpress_phy_poll(struct ef4_nic *efx)
 {
@@ -443,7 +440,8 @@ sfx7101_run_tests(struct ef4_nic *efx, int *results, unsigned flags)
 }
 
 static void
-tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
+tenxpress_get_link_ksettings(struct ef4_nic *efx,
+                            struct ethtool_link_ksettings *cmd)
 {
        u32 adv = 0, lpa = 0;
        int reg;
@@ -455,20 +453,22 @@ tenxpress_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
        if (reg & MDIO_AN_10GBT_STAT_LP10G)
                lpa |= ADVERTISED_10000baseT_Full;
 
-       mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
+       mdio45_ethtool_ksettings_get_npage(&efx->mdio, cmd, adv, lpa);
 
        /* In loopback, the PHY automatically brings up the correct interface,
         * but doesn't advertise the correct speed. So override it */
        if (LOOPBACK_EXTERNAL(efx))
-               ethtool_cmd_speed_set(ecmd, SPEED_10000);
+               cmd->base.speed = SPEED_10000;
 }
 
-static int tenxpress_set_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
+static int
+tenxpress_set_link_ksettings(struct ef4_nic *efx,
+                            const struct ethtool_link_ksettings *cmd)
 {
-       if (!ecmd->autoneg)
+       if (!cmd->base.autoneg)
                return -EINVAL;
 
-       return ef4_mdio_set_settings(efx, ecmd);
+       return ef4_mdio_set_link_ksettings(efx, cmd);
 }
 
 static void sfx7101_set_npage_adv(struct ef4_nic *efx, u32 advertising)
@@ -485,8 +485,8 @@ const struct ef4_phy_operations falcon_sfx7101_phy_ops = {
        .poll             = tenxpress_phy_poll,
        .fini             = sfx7101_phy_fini,
        .remove           = tenxpress_phy_remove,
-       .get_settings     = tenxpress_get_settings,
-       .set_settings     = tenxpress_set_settings,
+       .get_link_ksettings = tenxpress_get_link_ksettings,
+       .set_link_ksettings = tenxpress_set_link_ksettings,
        .set_npage_adv    = sfx7101_set_npage_adv,
        .test_alive       = ef4_mdio_test_alive,
        .test_name        = sfx7101_test_name,
index 18421f5e880f038db24ee6b7465cd878ca1ef7b6..3c55fd23c271a6420596cfa84d0740c73d500965 100644 (file)
@@ -540,9 +540,10 @@ static int txc43128_run_tests(struct ef4_nic *efx, int *results, unsigned flags)
        return rc;
 }
 
-static void txc43128_get_settings(struct ef4_nic *efx, struct ethtool_cmd *ecmd)
+static void txc43128_get_link_ksettings(struct ef4_nic *efx,
+                                       struct ethtool_link_ksettings *cmd)
 {
-       mdio45_ethtool_gset(&efx->mdio, ecmd);
+       mdio45_ethtool_ksettings_get(&efx->mdio, cmd);
 }
 
 const struct ef4_phy_operations falcon_txc_phy_ops = {
@@ -552,8 +553,8 @@ const struct ef4_phy_operations falcon_txc_phy_ops = {
        .poll           = txc43128_phy_poll,
        .fini           = txc43128_phy_fini,
        .remove         = txc43128_phy_remove,
-       .get_settings   = txc43128_get_settings,
-       .set_settings   = ef4_mdio_set_settings,
+       .get_link_ksettings = txc43128_get_link_ksettings,
+       .set_link_ksettings = ef4_mdio_set_link_ksettings,
        .test_alive     = ef4_mdio_test_alive,
        .run_tests      = txc43128_run_tests,
        .test_name      = txc43128_test_name,
index e4ca2161af7096658c1b4b05803e2b603af05ddd..ba45150f53c7a478c447d82be07f6d3174f0222d 100644 (file)
@@ -1649,6 +1649,22 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx)
        }
 }
 
+void efx_farch_rx_pull_indir_table(struct efx_nic *efx)
+{
+       size_t i = 0;
+       efx_dword_t dword;
+
+       BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+                    FR_BZ_RX_INDIRECTION_TBL_ROWS);
+
+       for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
+               efx_readd(efx, &dword,
+                          FR_BZ_RX_INDIRECTION_TBL +
+                          FR_BZ_RX_INDIRECTION_TBL_STEP * i);
+               efx->rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE);
+       }
+}
+
 /* Looks at available SRAM resources and works out how many queues we
  * can support, and where things like descriptor caches should live.
  *
index d0ed7f71ea7e25145bb4cf12395154efc587c3a0..8189a1cd973fd5ee44d5b70a1ce798301558aa7b 100644 (file)
@@ -27,6 +27,7 @@
  * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
  * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
  * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
+ * @EFX_FILTER_MATCH_ENCAP_TYPE: Match by encapsulation type.
  *     Used for RX default unicast and multicast/broadcast filters.
  *
  * Only some combinations are supported, depending on NIC type:
@@ -54,6 +55,7 @@ enum efx_filter_match_flags {
        EFX_FILTER_MATCH_OUTER_VID =    0x0100,
        EFX_FILTER_MATCH_IP_PROTO =     0x0200,
        EFX_FILTER_MATCH_LOC_MAC_IG =   0x0400,
+       EFX_FILTER_MATCH_ENCAP_TYPE =   0x0800,
 };
 
 /**
@@ -98,6 +100,26 @@ enum efx_filter_flags {
        EFX_FILTER_FLAG_TX = 0x10,
 };
 
+/** enum efx_encap_type - types of encapsulation
+ * @EFX_ENCAP_TYPE_NONE: no encapsulation
+ * @EFX_ENCAP_TYPE_VXLAN: VXLAN encapsulation
+ * @EFX_ENCAP_TYPE_NVGRE: NVGRE encapsulation
+ * @EFX_ENCAP_TYPE_GENEVE: GENEVE encapsulation
+ * @EFX_ENCAP_FLAG_IPV6: indicates IPv6 outer frame
+ *
+ * Contains both enumerated types and flags.
+ * To get just the type, OR with @EFX_ENCAP_TYPES_MASK.
+ */
+enum efx_encap_type {
+       EFX_ENCAP_TYPE_NONE = 0,
+       EFX_ENCAP_TYPE_VXLAN = 1,
+       EFX_ENCAP_TYPE_NVGRE = 2,
+       EFX_ENCAP_TYPE_GENEVE = 3,
+
+       EFX_ENCAP_TYPES_MASK = 7,
+       EFX_ENCAP_FLAG_IPV6 = 8,
+};
+
 /**
  * struct efx_filter_spec - specification for a hardware filter
  * @match_flags: Match type flags, from &enum efx_filter_match_flags
@@ -118,6 +140,8 @@ enum efx_filter_flags {
  * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
  * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
  * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
+ * @encap_type: Encapsulation type to match (from &enum efx_encap_type), if
+ *     %EFX_FILTER_MATCH_ENCAP_TYPE is set
  *
  * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
  * used to initialise the structure.  The efx_filter_set_*() functions
@@ -144,7 +168,8 @@ struct efx_filter_spec {
        __be32  rem_host[4];
        __be16  loc_port;
        __be16  rem_port;
-       /* total 64 bytes */
+       u32     encap_type:4;
+       /* total 65 bytes */
 };
 
 enum {
@@ -269,4 +294,18 @@ static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
        return 0;
 }
 
+static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec,
+                                            enum efx_encap_type encap_type)
+{
+       spec->match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+       spec->encap_type = encap_type;
+}
+
+static inline enum efx_encap_type efx_filter_get_encap_type(
+               const struct efx_filter_spec *spec)
+{
+       if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TYPE)
+               return spec->encap_type;
+       return EFX_ENCAP_TYPE_NONE;
+}
 #endif /* EFX_FILTER_H */
index 995651341b94d658f6f50f01c80356cd029ee600..b9422450deb8e91b4a34bee26e8648d6e3a6ac8b 100644 (file)
@@ -128,7 +128,7 @@ fail:
        return rc;
 }
 
-void efx_mcdi_fini(struct efx_nic *efx)
+void efx_mcdi_detach(struct efx_nic *efx)
 {
        if (!efx->mcdi)
                return;
@@ -137,6 +137,12 @@ void efx_mcdi_fini(struct efx_nic *efx)
 
        /* Relinquish the device (back to the BMC, if this is a LOM) */
        efx_mcdi_drv_attach(efx, false, NULL);
+}
+
+void efx_mcdi_fini(struct efx_nic *efx)
+{
+       if (!efx->mcdi)
+               return;
 
 #ifdef CONFIG_SFC_MCDI_LOGGING
        free_page((unsigned long)efx->mcdi->iface.logging_buffer);
@@ -716,8 +722,11 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
                if (cmd == MC_CMD_REBOOT && rc == -EIO) {
                        /* Don't reset if MC_CMD_REBOOT returns EIO */
                } else if (rc == -EIO || rc == -EINTR) {
-                       netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
-                                 -rc);
+                       netif_err(efx, hw, efx->net_dev, "MC reboot detected\n");
+                       netif_dbg(efx, hw, efx->net_dev, "MC rebooted during command %d rc %d\n",
+                                 cmd, -rc);
+                       if (efx->type->mcdi_reboot_detected)
+                               efx->type->mcdi_reboot_detected(efx);
                        efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
                } else if (proxy_handle && (rc == -EPROTO) &&
                           efx_mcdi_get_proxy_handle(efx, hdr_len, data_len,
@@ -837,11 +846,9 @@ static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
                                                  outbuf, outlen, outlen_actual,
                                                  quiet, NULL, raw_rc);
                } else {
-                       netif_printk(efx, hw,
-                                    rc == -EPERM ? KERN_DEBUG : KERN_ERR,
-                                    efx->net_dev,
-                                    "MC command 0x%x failed after proxy auth rc=%d\n",
-                                    cmd, rc);
+                       netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
+                                      "MC command 0x%x failed after proxy auth rc=%d\n",
+                                      cmd, rc);
 
                        if (rc == -EINTR || rc == -EIO)
                                efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
@@ -1084,10 +1091,9 @@ void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
                code = MCDI_DWORD(outbuf, ERR_CODE);
        if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
                err_arg = MCDI_DWORD(outbuf, ERR_ARG);
-       netif_printk(efx, hw, rc == -EPERM ? KERN_DEBUG : KERN_ERR,
-                    efx->net_dev,
-                    "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
-                    cmd, inlen, rc, code, err_arg);
+       netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
+                      "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
+                      cmd, inlen, rc, code, err_arg);
 }
 
 /* Switch to polled MCDI completions.  This can be called in various
@@ -2057,8 +2063,8 @@ fail:
        /* Older firmware lacks GET_WORKAROUNDS and this isn't especially
         * terrifying.  The call site will have to deal with it though.
         */
-       netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR,
-                    efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+       netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err,
+                      "%s: failed rc=%d\n", __func__, rc);
        return rc;
 }
 
index 4472107ca8c144ee611a3bf9dc0b6982d45d232e..154ef41d19275ded25c7b34a84ac8769b99be013 100644 (file)
@@ -142,6 +142,7 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
 #endif
 
 int efx_mcdi_init(struct efx_nic *efx);
+void efx_mcdi_detach(struct efx_nic *efx);
 void efx_mcdi_fini(struct efx_nic *efx);
 
 int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
index 35cc3d4fa5f692a3ae58503ecd94c995b260fc0e..47ced8a898ca305885136acc4c54dd8b95786a11 100644 (file)
 #define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
 #define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
 
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define    TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT  0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT  0x17c1
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: VXLAN */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN  0x0
+/* enum: Geneve */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE  0x1
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
 
 /***********************************/
 /* MC_CMD_RX_BALANCING
index 1c62c1a00fca49679cb8a69a1ac1a5564be9868d..c0537ea06c9ac495803f19266e2cd26c5b12a097 100644 (file)
@@ -208,6 +208,12 @@ struct efx_tx_buffer {
  * @write_count: Current write pointer
  *     This is the number of buffers that have been added to the
  *     hardware ring.
+ * @packet_write_count: Completable write pointer
+ *     This is the write pointer of the last packet written.
+ *     Normally this will equal @write_count, but as option descriptors
+ *     don't produce completion events, they won't update this.
+ *     Filled in iff @efx->type->option_descriptors; only used for PIO.
+ *     Thus, this is written and used on EF10, and neither on farch.
  * @old_read_count: The value of read_count when last checked.
  *     This is here for performance reasons.  The xmit path will
  *     only get the up-to-date value of read_count if this
@@ -255,6 +261,7 @@ struct efx_tx_queue {
        /* Members used only on the xmit path */
        unsigned int insert_count ____cacheline_aligned_in_smp;
        unsigned int write_count;
+       unsigned int packet_write_count;
        unsigned int old_read_count;
        unsigned int tso_bursts;
        unsigned int tso_long_headers;
@@ -300,6 +307,7 @@ struct efx_rx_buffer {
 #define EFX_RX_PKT_DISCARD     0x0004
 #define EFX_RX_PKT_TCP         0x0040
 #define EFX_RX_PKT_PREFIX_LEN  0x0080  /* length is in prefix only */
+#define EFX_RX_PKT_CSUM_LEVEL  0x0200
 
 /**
  * struct efx_rx_page_state - Page-based rx buffer state
@@ -462,13 +470,18 @@ struct efx_channel {
        u32 *rps_flow_id;
 #endif
 
-       unsigned n_rx_tobe_disc;
-       unsigned n_rx_ip_hdr_chksum_err;
-       unsigned n_rx_tcp_udp_chksum_err;
-       unsigned n_rx_mcast_mismatch;
-       unsigned n_rx_frm_trunc;
-       unsigned n_rx_overlength;
-       unsigned n_skbuff_leaks;
+       unsigned int n_rx_tobe_disc;
+       unsigned int n_rx_ip_hdr_chksum_err;
+       unsigned int n_rx_tcp_udp_chksum_err;
+       unsigned int n_rx_outer_ip_hdr_chksum_err;
+       unsigned int n_rx_outer_tcp_udp_chksum_err;
+       unsigned int n_rx_inner_ip_hdr_chksum_err;
+       unsigned int n_rx_inner_tcp_udp_chksum_err;
+       unsigned int n_rx_eth_crc_err;
+       unsigned int n_rx_mcast_mismatch;
+       unsigned int n_rx_frm_trunc;
+       unsigned int n_rx_overlength;
+       unsigned int n_skbuff_leaks;
        unsigned int n_rx_nodesc_trunc;
        unsigned int n_rx_merge_events;
        unsigned int n_rx_merge_packets;
@@ -484,131 +497,6 @@ struct efx_channel {
        u32 sync_timestamp_minor;
 };
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-enum efx_channel_busy_poll_state {
-       EFX_CHANNEL_STATE_IDLE = 0,
-       EFX_CHANNEL_STATE_NAPI = BIT(0),
-       EFX_CHANNEL_STATE_NAPI_REQ_BIT = 1,
-       EFX_CHANNEL_STATE_NAPI_REQ = BIT(1),
-       EFX_CHANNEL_STATE_POLL_BIT = 2,
-       EFX_CHANNEL_STATE_POLL = BIT(2),
-       EFX_CHANNEL_STATE_DISABLE_BIT = 3,
-};
-
-static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
-{
-       WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
-}
-
-/* Called from the device poll routine to get ownership of a channel. */
-static inline bool efx_channel_lock_napi(struct efx_channel *channel)
-{
-       unsigned long prev, old = READ_ONCE(channel->busy_poll_state);
-
-       while (1) {
-               switch (old) {
-               case EFX_CHANNEL_STATE_POLL:
-                       /* Ensure efx_channel_try_lock_poll() wont starve us */
-                       set_bit(EFX_CHANNEL_STATE_NAPI_REQ_BIT,
-                               &channel->busy_poll_state);
-                       /* fallthrough */
-               case EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_REQ:
-                       return false;
-               default:
-                       break;
-               }
-               prev = cmpxchg(&channel->busy_poll_state, old,
-                              EFX_CHANNEL_STATE_NAPI);
-               if (unlikely(prev != old)) {
-                       /* This is likely to mean we've just entered polling
-                        * state. Go back round to set the REQ bit.
-                        */
-                       old = prev;
-                       continue;
-               }
-               return true;
-       }
-}
-
-static inline void efx_channel_unlock_napi(struct efx_channel *channel)
-{
-       /* Make sure write has completed from efx_channel_lock_napi() */
-       smp_wmb();
-       WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
-}
-
-/* Called from efx_busy_poll(). */
-static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
-{
-       return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE,
-                       EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE;
-}
-
-static inline void efx_channel_unlock_poll(struct efx_channel *channel)
-{
-       clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
-}
-
-static inline bool efx_channel_busy_polling(struct efx_channel *channel)
-{
-       return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
-}
-
-static inline void efx_channel_enable(struct efx_channel *channel)
-{
-       clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT,
-                        &channel->busy_poll_state);
-}
-
-/* Stop further polling or napi access.
- * Returns false if the channel is currently busy polling.
- */
-static inline bool efx_channel_disable(struct efx_channel *channel)
-{
-       set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state);
-       /* Implicit barrier in efx_channel_busy_polling() */
-       return !efx_channel_busy_polling(channel);
-}
-
-#else /* CONFIG_NET_RX_BUSY_POLL */
-
-static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
-{
-}
-
-static inline bool efx_channel_lock_napi(struct efx_channel *channel)
-{
-       return true;
-}
-
-static inline void efx_channel_unlock_napi(struct efx_channel *channel)
-{
-}
-
-static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
-{
-       return false;
-}
-
-static inline void efx_channel_unlock_poll(struct efx_channel *channel)
-{
-}
-
-static inline bool efx_channel_busy_polling(struct efx_channel *channel)
-{
-       return false;
-}
-
-static inline void efx_channel_enable(struct efx_channel *channel)
-{
-}
-
-static inline bool efx_channel_disable(struct efx_channel *channel)
-{
-       return true;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 /**
  * struct efx_msi_context - Context for each MSI
  * @efx: The associated NIC
@@ -666,6 +554,8 @@ extern const unsigned int efx_reset_type_max;
 #define RESET_TYPE(type) \
        STRING_TABLE_LOOKUP(type, efx_reset_type)
 
+void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen);
+
 enum efx_int_mode {
        /* Be careful if altering to correct macro below */
        EFX_INT_MODE_MSIX = 0,
@@ -1105,6 +995,15 @@ struct efx_mtd_partition {
        char name[IFNAMSIZ + 20];
 };
 
+struct efx_udp_tunnel {
+       u16 type; /* TUNNEL_ENCAP_UDP_PORT_ENTRY_foo, see mcdi_pcol.h */
+       __be16 port;
+       /* Count of repeated adds of the same port.  Used only inside the list,
+        * not in request arguments.
+        */
+       u16 count;
+};
+
 /**
  * struct efx_nic_type - Efx device type definition
  * @mem_bar: Get the memory BAR
@@ -1174,6 +1073,7 @@ struct efx_mtd_partition {
  * @tx_remove: Free resources for TX queue
  * @tx_write: Write TX descriptors and doorbell
  * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
+ * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC
  * @rx_probe: Allocate resources for RX queue
  * @rx_init: Initialise RX queue on the NIC
  * @rx_remove: Free resources for RX queue
@@ -1220,9 +1120,14 @@ struct efx_mtd_partition {
  * @ptp_set_ts_config: Set hardware timestamp configuration.  The flags
  *     and tx_type will already have been validated but this operation
  *     must validate and update rx_filter.
+ * @get_phys_port_id: Get the underlying physical port id.
  * @set_mac_address: Set the MAC address of the device
  * @tso_versions: Returns mask of firmware-assisted TSO versions supported.
  *     If %NULL, then device does not support any TSO version.
+ * @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required.
+ * @udp_tnl_add_port: Add a UDP tunnel port
+ * @udp_tnl_has_port: Check if a port has been added as UDP tunnel
+ * @udp_tnl_del_port: Remove a UDP tunnel port
  * @revision: Hardware architecture revision
  * @txd_ptr_tbl_base: TX descriptor ring base address
  * @rxd_ptr_tbl_base: RX descriptor ring base address
@@ -1236,8 +1141,11 @@ struct efx_mtd_partition {
  * @rx_buffer_padding: Size of padding at end of RX packet
  * @can_rx_scatter: NIC is able to scatter packets to multiple buffers
  * @always_rx_scatter: NIC will always scatter packets to multiple buffers
+ * @option_descriptors: NIC supports TX option descriptors
+ * @min_interrupt_mode: Lowest capability interrupt mode supported
+ *     from &enum efx_int_mode.
  * @max_interrupt_mode: Highest capability interrupt mode supported
- *     from &enum efx_init_mode.
+ *     from &enum efx_int_mode.
  * @timer_period_max: Maximum period of interrupt timer (in ticks)
  * @offload_features: net_device feature flags for protocol offload
  *     features implemented in hardware
@@ -1302,7 +1210,8 @@ struct efx_nic_type {
        unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue,
                                     dma_addr_t dma_addr, unsigned int len);
        int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
-                                 const u32 *rx_indir_table);
+                                 const u32 *rx_indir_table, const u8 *key);
+       int (*rx_pull_rss_config)(struct efx_nic *efx);
        int (*rx_probe)(struct efx_rx_queue *rx_queue);
        void (*rx_init)(struct efx_rx_queue *rx_queue);
        void (*rx_remove)(struct efx_rx_queue *rx_queue);
@@ -1358,6 +1267,8 @@ struct efx_nic_type {
        int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
        int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
        int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
+       int (*get_phys_port_id)(struct efx_nic *efx,
+                               struct netdev_phys_item_id *ppid);
        int (*sriov_init)(struct efx_nic *efx);
        void (*sriov_fini)(struct efx_nic *efx);
        bool (*sriov_wanted)(struct efx_nic *efx);
@@ -1372,14 +1283,16 @@ struct efx_nic_type {
                                   struct ifla_vf_info *ivi);
        int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i,
                                       int link_state);
-       int (*sriov_get_phys_port_id)(struct efx_nic *efx,
-                                     struct netdev_phys_item_id *ppid);
        int (*vswitching_probe)(struct efx_nic *efx);
        int (*vswitching_restore)(struct efx_nic *efx);
        void (*vswitching_remove)(struct efx_nic *efx);
        int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr);
        int (*set_mac_address)(struct efx_nic *efx);
        u32 (*tso_versions)(struct efx_nic *efx);
+       int (*udp_tnl_push_ports)(struct efx_nic *efx);
+       int (*udp_tnl_add_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
+       bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port);
+       int (*udp_tnl_del_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
 
        int revision;
        unsigned int txd_ptr_tbl_base;
@@ -1394,12 +1307,15 @@ struct efx_nic_type {
        unsigned int rx_buffer_padding;
        bool can_rx_scatter;
        bool always_rx_scatter;
+       bool option_descriptors;
+       unsigned int min_interrupt_mode;
        unsigned int max_interrupt_mode;
        unsigned int timer_period_max;
        netdev_features_t offload_features;
        int mcdi_max_ver;
        unsigned int max_rx_ip_filters;
        u32 hwtstamp_filters;
+       unsigned int rx_hash_key_size;
 };
 
 /**************************************************************************
index 223774635cbabf23a439cb90a983427b0f0fef38..7b916aa21bdef18896debc4bdd7384696e50f3a3 100644 (file)
@@ -85,6 +85,17 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
        return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
 }
 
+/* Report whether the NIC considers this TX queue empty, using
+ * packet_write_count (the write count recorded for the last completable
+ * doorbell push).  May return false negative.  EF10 only, which is OK
+ * because only EF10 supports PIO.
+ */
+static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
+{
+       EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors);
+       return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count);
+}
+
 /* Decide whether we can use TX PIO, ie. write packet data directly into
  * a buffer on the device.  This can reduce latency at the expense of
  * throughput, so we only do this if both hardware and software TX rings
@@ -94,9 +105,9 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
 static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue)
 {
        struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue);
-       return tx_queue->piobuf &&
-              __efx_nic_tx_is_empty(tx_queue, tx_queue->insert_count) &&
-              __efx_nic_tx_is_empty(partner, partner->insert_count);
+
+       return tx_queue->piobuf && efx_nic_tx_is_empty(tx_queue) &&
+              efx_nic_tx_is_empty(partner);
 }
 
 /* Decide whether to push a TX descriptor to the NIC vs merely writing
@@ -332,6 +343,7 @@ enum {
  * @pio_write_base: Base address for writing PIO buffers
  * @pio_write_vi_base: Relative VI number for @pio_write_base
  * @piobuf_handle: Handle of each PIO buffer allocated
+ * @piobuf_size: size of a single PIO buffer
  * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
  *     reboot
  * @rx_rss_context: Firmware handle for our RSS context
@@ -357,6 +369,10 @@ enum {
  * @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero
  * @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock.
  * @vlan_lock: Lock to serialize access to vlan_list.
+ * @udp_tunnels: UDP tunnel port numbers and types.
+ * @udp_tunnels_dirty: flag indicating a reboot occurred while pushing
+ *     @udp_tunnels to hardware and thus the push must be re-done.
+ * @udp_tunnels_lock: Serialises writes to @udp_tunnels and @udp_tunnels_dirty.
  */
 struct efx_ef10_nic_data {
        struct efx_buffer mcdi_buf;
@@ -369,6 +385,7 @@ struct efx_ef10_nic_data {
        void __iomem *wc_membase, *pio_write_base;
        unsigned int pio_write_vi_base;
        unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
+       u16 piobuf_size;
        bool must_restore_piobufs;
        u32 rx_rss_context;
        bool rx_rss_context_exclusive;
@@ -392,6 +409,9 @@ struct efx_ef10_nic_data {
        u8 vport_mac[ETH_ALEN];
        struct list_head vlan_list;
        struct mutex vlan_lock;
+       struct efx_udp_tunnel udp_tunnels[16];
+       bool udp_tunnels_dirty;
+       struct mutex udp_tunnels_lock;
 };
 
 int efx_init_sriov(void);
@@ -613,6 +633,7 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
 void efx_farch_init_common(struct efx_nic *efx);
 void efx_ef10_handle_drain_event(struct efx_nic *efx);
 void efx_farch_rx_push_indir_table(struct efx_nic *efx);
+void efx_farch_rx_pull_indir_table(struct efx_nic *efx);
 
 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
                         unsigned int len, gfp_t gfp_flags);
index 5f4ad4f3518f22ad773e9af4767f065268d7e981..42443f434569b22865a450b0a3e249753fe86d76 100644 (file)
@@ -434,6 +434,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
                             PKT_HASH_TYPE_L3);
        skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
                          CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+       skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
 
        for (;;) {
                skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
@@ -621,8 +622,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
 
        /* Set the SKB flags */
        skb_checksum_none_assert(skb);
-       if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
+       if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
+               skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
+       }
 
        efx_rx_skb_attach_timestamp(channel, skb);
 
@@ -665,8 +668,7 @@ void __efx_rx_packet(struct efx_channel *channel)
        if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
                rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
 
-       if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb &&
-           !efx_channel_busy_polling(channel))
+       if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
                efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
        else
                efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
index cd38b44ae23af330bca1c1b49d60a89b26438814..dab286a337a6bdf21a02387e093ed81516fb427a 100644 (file)
@@ -768,7 +768,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
        __efx_reconfigure_port(efx);
        mutex_unlock(&efx->mac_lock);
 
-       netif_device_attach(efx->net_dev);
+       efx_device_attach_if_not_resetting(efx);
 
        return rc_test;
 }
index 4e54e5dc9fcb49bf03667843d47a4a2cf1aa978b..a617f657eae35fa710e777d524c9b33708f3bdee 100644 (file)
@@ -326,18 +326,40 @@ fail5:
        efx_nic_free_buffer(efx, &efx->irq_status);
 fail4:
 fail3:
+       efx_mcdi_detach(efx);
        efx_mcdi_fini(efx);
 fail1:
        kfree(efx->nic_data);
        return rc;
 }
 
+static int siena_rx_pull_rss_config(struct efx_nic *efx)
+{
+       efx_oword_t temp;
+
+       /* Read from IPv6 RSS key as that's longer (the IPv4 key is just the
+        * first 128 bits of the same key, assuming it's been set by
+        * siena_rx_push_rss_config, below)
+        */
+       efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
+       memcpy(efx->rx_hash_key, &temp, sizeof(temp));
+       efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
+       memcpy(efx->rx_hash_key + sizeof(temp), &temp, sizeof(temp));
+       efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+       memcpy(efx->rx_hash_key + 2 * sizeof(temp), &temp,
+              FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
+       efx_farch_rx_pull_indir_table(efx);
+       return 0;
+}
+
 static int siena_rx_push_rss_config(struct efx_nic *efx, bool user,
-                                   const u32 *rx_indir_table)
+                                   const u32 *rx_indir_table, const u8 *key)
 {
        efx_oword_t temp;
 
        /* Set hash key for IPv4 */
+       if (key)
+               memcpy(efx->rx_hash_key, key, sizeof(temp));
        memcpy(&temp, efx->rx_hash_key, sizeof(temp));
        efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
 
@@ -402,7 +424,7 @@ static int siena_init_nic(struct efx_nic *efx)
                            EFX_RX_USR_BUF_SIZE >> 5);
        efx_writeo(efx, &temp, FR_AZ_RX_CFG);
 
-       siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
+       siena_rx_push_rss_config(efx, false, efx->rx_indir_table, NULL);
        efx->rss_active = true;
 
        /* Enable event logging */
@@ -429,6 +451,7 @@ static void siena_remove_nic(struct efx_nic *efx)
 
        efx_mcdi_reset(efx, RESET_TYPE_ALL);
 
+       efx_mcdi_detach(efx);
        efx_mcdi_fini(efx);
 
        /* Tear down the private nic state */
@@ -979,6 +1002,7 @@ const struct efx_nic_type siena_a0_nic_type = {
        .tx_write = efx_farch_tx_write,
        .tx_limit_len = efx_farch_tx_limit_len,
        .rx_push_rss_config = siena_rx_push_rss_config,
+       .rx_pull_rss_config = siena_rx_pull_rss_config,
        .rx_probe = efx_farch_rx_probe,
        .rx_init = efx_farch_rx_init,
        .rx_remove = efx_farch_rx_remove,
@@ -1044,6 +1068,8 @@ const struct efx_nic_type siena_a0_nic_type = {
        .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
        .rx_buffer_padding = 0,
        .can_rx_scatter = true,
+       .option_descriptors = false,
+       .min_interrupt_mode = EFX_INT_MODE_LEGACY,
        .max_interrupt_mode = EFX_INT_MODE_MSIX,
        .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
        .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1053,4 +1079,5 @@ const struct efx_nic_type siena_a0_nic_type = {
        .hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
                             1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
                             1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
+       .rx_hash_key_size = 16,
 };
index 9abcf4aded30735924b3a63bd04f070e1be1d3a4..0b766fdbcddbbdb18d77d40d51ee3660d6454a77 100644 (file)
@@ -73,14 +73,3 @@ int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
        else
                return -EOPNOTSUPP;
 }
-
-int efx_sriov_get_phys_port_id(struct net_device *net_dev,
-                              struct netdev_phys_item_id *ppid)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-
-       if (efx->type->sriov_get_phys_port_id)
-               return efx->type->sriov_get_phys_port_id(efx, ppid);
-       else
-               return -EOPNOTSUPP;
-}
index ba1762e7f2165d49dcedfbba34430ccbf465da60..84c7984edcafa768fb856de3e07a42c7fd2623f6 100644 (file)
@@ -23,9 +23,6 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
                            struct ifla_vf_info *ivi);
 int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
                                int link_state);
-int efx_sriov_get_phys_port_id(struct net_device *net_dev,
-                              struct netdev_phys_item_id *ppid);
-
 #endif /* CONFIG_SFC_SRIOV */
 
 #endif /* EFX_SRIOV_H */
index 3c0151424d125d611d7d6f09b604bb5cfbb33519..ff88d60aa6d5650d04f46938eaf6abc63c4ff568 100644 (file)
@@ -28,7 +28,6 @@
 
 #ifdef EFX_USE_PIO
 
-#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
 #define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
 unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
 
@@ -817,6 +816,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
 
        tx_queue->insert_count = 0;
        tx_queue->write_count = 0;
+       tx_queue->packet_write_count = 0;
        tx_queue->old_write_count = 0;
        tx_queue->read_count = 0;
        tx_queue->old_read_count = 0;
index 55a95e1d69d68b45ec85a8648e85d24e3673a4a0..5f2737189c724eea48ffdd8d8a48f520a209f3e7 100644 (file)
@@ -264,7 +264,6 @@ struct epic_private {
        spinlock_t lock;                                /* Group with Tx control cache line. */
        spinlock_t napi_lock;
        struct napi_struct napi;
-       unsigned int reschedule_in_poll;
        unsigned int cur_tx, dirty_tx;
 
        unsigned int cur_rx, dirty_rx;
@@ -400,7 +399,6 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        spin_lock_init(&ep->lock);
        spin_lock_init(&ep->napi_lock);
-       ep->reschedule_in_poll = 0;
 
        /* Bring the chip out of low-power mode. */
        ew32(GENCTL, 0x4200);
@@ -1086,13 +1084,12 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
 
        handled = 1;
 
-       if ((status & EpicNapiEvent) && !ep->reschedule_in_poll) {
+       if (status & EpicNapiEvent) {
                spin_lock(&ep->napi_lock);
                if (napi_schedule_prep(&ep->napi)) {
                        epic_napi_irq_off(dev, ep);
                        __napi_schedule(&ep->napi);
-               } else
-                       ep->reschedule_in_poll++;
+               }
                spin_unlock(&ep->napi_lock);
        }
        status &= ~EpicNapiEvent;
@@ -1248,37 +1245,23 @@ static int epic_poll(struct napi_struct *napi, int budget)
 {
        struct epic_private *ep = container_of(napi, struct epic_private, napi);
        struct net_device *dev = ep->mii.dev;
-       int work_done = 0;
        void __iomem *ioaddr = ep->ioaddr;
-
-rx_action:
+       int work_done;
 
        epic_tx(dev, ep);
 
-       work_done += epic_rx(dev, budget);
+       work_done = epic_rx(dev, budget);
 
        epic_rx_err(dev, ep);
 
-       if (work_done < budget) {
+       if (work_done < budget && napi_complete_done(napi, work_done)) {
                unsigned long flags;
-               int more;
-
-               /* A bit baroque but it avoids a (space hungry) spin_unlock */
 
                spin_lock_irqsave(&ep->napi_lock, flags);
 
-               more = ep->reschedule_in_poll;
-               if (!more) {
-                       __napi_complete(napi);
-                       ew32(INTSTAT, EpicNapiEvent);
-                       epic_napi_irq_on(dev, ep);
-               } else
-                       ep->reschedule_in_poll--;
-
+               ew32(INTSTAT, EpicNapiEvent);
+               epic_napi_irq_on(dev, ep);
                spin_unlock_irqrestore(&ep->napi_lock, flags);
-
-               if (more)
-                       goto rx_action;
        }
 
        return work_done;
index 67154621abcf9775a605f9066bfe7ff41418b56c..97280daba27f7f8349eaaabc71e4514c33d9fcf3 100644 (file)
@@ -113,6 +113,7 @@ struct smc_private {
     struct mii_if_info         mii_if;
     int                                duplex;
     int                                rx_ovrn;
+    unsigned long              last_rx;
 };
 
 /* Special definitions for Megahertz multifunction cards */
@@ -1491,6 +1492,7 @@ static void smc_rx(struct net_device *dev)
     if (!(rx_status & RS_ERRORS)) {
        /* do stuff to make a new packet */
        struct sk_buff *skb;
+       struct smc_private *smc = netdev_priv(dev);
        
        /* Note: packet_length adds 5 or 6 extra bytes here! */
        skb = netdev_alloc_skb(dev, packet_length+2);
@@ -1509,7 +1511,7 @@ static void smc_rx(struct net_device *dev)
        skb->protocol = eth_type_trans(skb, dev);
        
        netif_rx(skb);
-       dev->last_rx = jiffies;
+       smc->last_rx = jiffies;
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += packet_length;
        if (rx_status & RS_MULTICAST)
@@ -1790,7 +1792,7 @@ static void media_check(u_long arg)
     }
 
     /* Ignore collisions unless we've had no rx's recently */
-    if (time_after(jiffies, dev->last_rx + HZ)) {
+    if (time_after(jiffies, smc->last_rx + HZ)) {
        if (smc->tx_err || (smc->media_status & EPH_16COL))
            media |= EPH_16COL;
     }
index 3174aebb322fe98e00a93466840aae2de7db33f7..2fa3c1d03abc591f70ac857f2f3665e5e0ee143b 100644 (file)
@@ -861,7 +861,7 @@ static int smsc9420_rx_poll(struct napi_struct *napi, int budget)
        smsc9420_pci_flush_write(pd);
 
        if (work_done < budget) {
-               napi_complete(&pd->napi);
+               napi_complete_done(&pd->napi, work_done);
 
                /* re-enable RX DMA interrupts */
                dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
index 1c1157d2bd40ca2ae38cb88ffd211407b88db4d8..ecd7a5edef5d7837efae50c58fd2378fef69dc1b 100644 (file)
@@ -7,7 +7,8 @@ config NET_VENDOR_STMICRO
        default y
        depends on HAS_IOMEM
        ---help---
-         If you have a network (Ethernet) card belonging to this class, say Y.
+         If you have a network (Ethernet) card based on Synopsys Ethernet IP
+         Cores, say Y.
 
          Note that the answer to this question doesn't directly affect the
          kernel: saying N will just cause the configurator to skip all
index ab66248a4b78ba47a535f9dbbe12062996684ec8..cfbe3634dfa18143c8cf4500697a3ed6f9778c07 100644 (file)
@@ -1,5 +1,5 @@
 config STMMAC_ETH
-       tristate "STMicroelectronics 10/100/1000 Ethernet driver"
+       tristate "STMicroelectronics 10/100/1000/EQOS Ethernet driver"
        depends on HAS_IOMEM && HAS_DMA
        select MII
        select PHYLIB
@@ -7,9 +7,8 @@ config STMMAC_ETH
        imply PTP_1588_CLOCK
        select RESET_CONTROLLER
        ---help---
-         This is the driver for the Ethernet IPs are built around a
-         Synopsys IP Core and only tested on the STMicroelectronics
-         platforms.
+         This is the driver for the Ethernet IPs built around a
+         Synopsys IP Core.
 
 if STMMAC_ETH
 
@@ -29,6 +28,15 @@ config STMMAC_PLATFORM
 
 if STMMAC_PLATFORM
 
+config DWMAC_DWC_QOS_ETH
+       tristate "Support for snps,dwc-qos-ethernet.txt DT binding."
+       select PHYLIB
+       select CRC32
+       select MII
+       depends on OF && HAS_DMA
+       help
+         Support for chips using the snps,dwc-qos-ethernet.txt DT binding.
+
 config DWMAC_GENERIC
        tristate "Generic driver for DWMAC"
        default STMMAC_PLATFORM
@@ -143,11 +151,11 @@ config STMMAC_PCI
        tristate "STMMAC PCI bus support"
        depends on STMMAC_ETH && PCI
        ---help---
-         This is to select the Synopsys DWMAC available on PCI devices,
-         if you have a controller with this interface, say Y or M here.
+         This selects the platform specific bus support for the stmmac driver.
+         This driver was tested on XLINX XC2V3000 FF1152AMT0221
+         D1215994A VIRTEX FPGA board and SNPS QoS IPK Prototyping Kit.
 
-         This PCI support is tested on XLINX XC2V3000 FF1152AMT0221
-         D1215994A VIRTEX FPGA board.
+         If you have a controller with this interface, say Y or M here.
 
          If unsure, say N.
 endif
index 8f83a86ba13c69e052a5077f834ebc8f21302d10..700c603366748f05e1d365f60fddda693eab92c9 100644 (file)
@@ -16,6 +16,7 @@ obj-$(CONFIG_DWMAC_SOCFPGA)   += dwmac-altr-socfpga.o
 obj-$(CONFIG_DWMAC_STI)                += dwmac-sti.o
 obj-$(CONFIG_DWMAC_STM32)      += dwmac-stm32.o
 obj-$(CONFIG_DWMAC_SUNXI)      += dwmac-sunxi.o
+obj-$(CONFIG_DWMAC_DWC_QOS_ETH)        += dwmac-dwc-qos-eth.o
 obj-$(CONFIG_DWMAC_GENERIC)    += dwmac-generic.o
 stmmac-platform-objs:= stmmac_platform.o
 dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
index 026e8e9cb9429bf60c775a531917322994946035..01a8c020d6db193a67c2a74bdb522b245ea5ac99 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
index b13a144f72ad565ae2b36b9914260800d38ce5fb..144fe84e8a531e63ae14c3f3a9c5348a7407285f 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
@@ -71,7 +67,7 @@ struct stmmac_extra_stats {
        unsigned long overflow_error;
        unsigned long ipc_csum_error;
        unsigned long rx_collision;
-       unsigned long rx_crc;
+       unsigned long rx_crc_errors;
        unsigned long dribbling_bit;
        unsigned long rx_length;
        unsigned long rx_mii;
@@ -323,6 +319,9 @@ struct dma_features {
        /* TX and RX number of channels */
        unsigned int number_rx_channel;
        unsigned int number_tx_channel;
+       /* TX and RX number of queues */
+       unsigned int number_rx_queues;
+       unsigned int number_tx_queues;
        /* Alternate (enhanced) DESC mode */
        unsigned int enh_desc;
 };
@@ -340,7 +339,7 @@ struct dma_features {
 /* Common MAC defines */
 #define MAC_CTRL_REG           0x00000000      /* MAC Control */
 #define MAC_ENABLE_TX          0x00000008      /* Transmitter Enable */
-#define MAC_RNABLE_RX          0x00000004      /* Receiver Enable */
+#define MAC_ENABLE_RX          0x00000004      /* Receiver Enable */
 
 /* Default LPI timers */
 #define STMMAC_DEFAULT_LIT_LS  0x3E8
@@ -454,6 +453,8 @@ struct stmmac_ops {
        void (*core_init)(struct mac_device_info *hw, int mtu);
        /* Enable and verify that the IPC module is supported */
        int (*rx_ipc)(struct mac_device_info *hw);
+       /* Enable RX Queues */
+       void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue);
        /* Dump MAC registers */
        void (*dump_regs)(struct mac_device_info *hw);
        /* Handle extra events on specific interrupts hw dependent */
@@ -471,7 +472,8 @@ struct stmmac_ops {
                              unsigned int reg_n);
        void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
                              unsigned int reg_n);
-       void (*set_eee_mode)(struct mac_device_info *hw);
+       void (*set_eee_mode)(struct mac_device_info *hw,
+                            bool en_tx_lpi_clockgating);
        void (*reset_eee_mode)(struct mac_device_info *hw);
        void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
        void (*set_eee_pls)(struct mac_device_info *hw, int link);
index faeeef75d7f17edbb69f483609d3b9c8e483fad3..0c2432b1ce67a7b607adf61519a167501ab0b7bb 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
index 1d181e205d6ecbb49c5d173fcb6f3ea5bfe6fe40..ca9d7e48034ceb33f5f4eb4db5b99691ed1a278f 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
new file mode 100644 (file)
index 0000000..1a3fa3d
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
+ *
+ * Copyright (C) 2016 Joao Pinto <jpinto@synopsys.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
+                                  struct plat_stmmacenet_data *plat_dat)
+{
+       struct device_node *np = pdev->dev.of_node;
+       u32 burst_map = 0;
+       u32 bit_index = 0;
+       u32 a_index = 0;
+
+       if (!plat_dat->axi) {
+               plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL);
+
+               if (!plat_dat->axi)
+                       return -ENOMEM;
+       }
+
+       plat_dat->axi->axi_lpi_en = of_property_read_bool(np, "snps,en-lpi");
+       if (of_property_read_u32(np, "snps,write-requests",
+                                &plat_dat->axi->axi_wr_osr_lmt)) {
+               /**
+                * Since the register has a reset value of 1, if property
+                * is missing, default to 1.
+                */
+               plat_dat->axi->axi_wr_osr_lmt = 1;
+       } else {
+               /**
+                * If property exists, to keep the behavior from dwc_eth_qos,
+                * subtract one after parsing.
+                */
+               plat_dat->axi->axi_wr_osr_lmt--;
+       }
+
+       if (of_property_read_u32(np, "read,read-requests",
+                                &plat_dat->axi->axi_rd_osr_lmt)) {
+               /**
+                * Since the register has a reset value of 1, if property
+                * is missing, default to 1.
+                */
+               plat_dat->axi->axi_rd_osr_lmt = 1;
+       } else {
+               /**
+                * If property exists, to keep the behavior from dwc_eth_qos,
+                * subtract one after parsing.
+                */
+               plat_dat->axi->axi_rd_osr_lmt--;
+       }
+       of_property_read_u32(np, "snps,burst-map", &burst_map);
+
+       /* converts burst-map bitmask to burst array */
+       for (bit_index = 0; bit_index < 7; bit_index++) {
+               if (burst_map & (1 << bit_index)) {
+                       switch (bit_index) {
+                       case 0:
+                       plat_dat->axi->axi_blen[a_index] = 4; break;
+                       case 1:
+                       plat_dat->axi->axi_blen[a_index] = 8; break;
+                       case 2:
+                       plat_dat->axi->axi_blen[a_index] = 16; break;
+                       case 3:
+                       plat_dat->axi->axi_blen[a_index] = 32; break;
+                       case 4:
+                       plat_dat->axi->axi_blen[a_index] = 64; break;
+                       case 5:
+                       plat_dat->axi->axi_blen[a_index] = 128; break;
+                       case 6:
+                       plat_dat->axi->axi_blen[a_index] = 256; break;
+                       default:
+                       break;
+                       }
+                       a_index++;
+               }
+       }
+
+       /* dwc-qos needs GMAC4, AAL, TSO and PMT */
+       plat_dat->has_gmac4 = 1;
+       plat_dat->dma_cfg->aal = 1;
+       plat_dat->tso_en = 1;
+       plat_dat->pmt = 1;
+
+       return 0;
+}
+
+static int dwc_eth_dwmac_probe(struct platform_device *pdev)
+{
+       struct plat_stmmacenet_data *plat_dat;
+       struct stmmac_resources stmmac_res;
+       struct resource *res;
+       int ret;
+
+       memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
+
+       /**
+        * Since stmmac_platform supports name IRQ only, basic platform
+        * resource initialization is done in the glue logic.
+        */
+       stmmac_res.irq = platform_get_irq(pdev, 0);
+       if (stmmac_res.irq < 0) {
+               if (stmmac_res.irq != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "IRQ configuration information not found\n");
+
+               return stmmac_res.irq;
+       }
+       stmmac_res.wol_irq = stmmac_res.irq;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       stmmac_res.addr = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(stmmac_res.addr))
+               return PTR_ERR(stmmac_res.addr);
+
+       plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+       if (IS_ERR(plat_dat))
+               return PTR_ERR(plat_dat);
+
+       plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
+       if (IS_ERR(plat_dat->stmmac_clk)) {
+               dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+               ret = PTR_ERR(plat_dat->stmmac_clk);
+               plat_dat->stmmac_clk = NULL;
+               goto err_remove_config_dt;
+       }
+       clk_prepare_enable(plat_dat->stmmac_clk);
+
+       plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+       if (IS_ERR(plat_dat->pclk)) {
+               dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+               ret = PTR_ERR(plat_dat->pclk);
+               plat_dat->pclk = NULL;
+               goto err_out_clk_dis_phy;
+       }
+       clk_prepare_enable(plat_dat->pclk);
+
+       ret = dwc_eth_dwmac_config_dt(pdev, plat_dat);
+       if (ret)
+               goto err_out_clk_dis_aper;
+
+       ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+       if (ret)
+               goto err_out_clk_dis_aper;
+
+       return 0;
+
+err_out_clk_dis_aper:
+       clk_disable_unprepare(plat_dat->pclk);
+err_out_clk_dis_phy:
+       clk_disable_unprepare(plat_dat->stmmac_clk);
+err_remove_config_dt:
+       stmmac_remove_config_dt(pdev, plat_dat);
+
+       return ret;
+}
+
+static int dwc_eth_dwmac_remove(struct platform_device *pdev)
+{
+       return stmmac_pltfr_remove(pdev);
+}
+
+static const struct of_device_id dwc_eth_dwmac_match[] = {
+       { .compatible = "snps,dwc-qos-ethernet-4.10", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
+
+static struct platform_driver dwc_eth_dwmac_driver = {
+       .probe  = dwc_eth_dwmac_probe,
+       .remove = dwc_eth_dwmac_remove,
+       .driver = {
+               .name           = "dwc-eth-dwmac",
+               .of_match_table = dwc_eth_dwmac_match,
+       },
+};
+module_platform_driver(dwc_eth_dwmac_driver);
+
+MODULE_AUTHOR("Joao Pinto <jpinto@synopsys.com>");
+MODULE_DESCRIPTION("Synopsys DWC Ethernet Quality-of-Service v4.10a driver");
+MODULE_LICENSE("GPL v2");
index ffaed1f35efe0754749a536c563d91937dcb4302..9685555932ea39c79182205504f6ebec4da51845 100644 (file)
 
 #define PRG_ETH0_TXDLY_SHIFT           5
 #define PRG_ETH0_TXDLY_MASK            GENMASK(6, 5)
-#define PRG_ETH0_TXDLY_OFF             (0x0 << PRG_ETH0_TXDLY_SHIFT)
-#define PRG_ETH0_TXDLY_QUARTER         (0x1 << PRG_ETH0_TXDLY_SHIFT)
-#define PRG_ETH0_TXDLY_HALF            (0x2 << PRG_ETH0_TXDLY_SHIFT)
-#define PRG_ETH0_TXDLY_THREE_QUARTERS  (0x3 << PRG_ETH0_TXDLY_SHIFT)
 
 /* divider for the result of m250_sel */
 #define PRG_ETH0_CLK_M250_DIV_SHIFT    7
@@ -69,6 +65,8 @@ struct meson8b_dwmac {
 
        struct clk_divider      m25_div;
        struct clk              *m25_div_clk;
+
+       u32                     tx_delay_ns;
 };
 
 static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
@@ -179,11 +177,19 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
 {
        int ret;
        unsigned long clk_rate;
+       u8 tx_dly_val = 0;
 
        switch (dwmac->phy_mode) {
        case PHY_INTERFACE_MODE_RGMII:
-       case PHY_INTERFACE_MODE_RGMII_ID:
        case PHY_INTERFACE_MODE_RGMII_RXID:
+               /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where
+                * 8ns are exactly one cycle of the 125MHz RGMII TX clock):
+                * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3
+                */
+               tx_dly_val = dwmac->tx_delay_ns >> 1;
+               /* fall through */
+
+       case PHY_INTERFACE_MODE_RGMII_ID:
        case PHY_INTERFACE_MODE_RGMII_TXID:
                /* Generate a 25MHz clock for the PHY */
                clk_rate = 25 * 1000 * 1000;
@@ -196,9 +202,8 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
                meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
                                        PRG_ETH0_INVERTED_RMII_CLK, 0);
 
-               /* TX clock delay - all known boards use a 1/4 cycle delay */
                meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
-                                       PRG_ETH0_TXDLY_QUARTER);
+                                       tx_dly_val << PRG_ETH0_TXDLY_SHIFT);
                break;
 
        case PHY_INTERFACE_MODE_RMII:
@@ -284,6 +289,11 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
                goto err_remove_config_dt;
        }
 
+       /* use 2ns as fallback since this value was previously hardcoded */
+       if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns",
+                                &dwmac->tx_delay_ns))
+               dwmac->tx_delay_ns = 2;
+
        ret = meson8b_init_clk(dwmac);
        if (ret)
                goto err_remove_config_dt;
index 1f997027ae512eed71c02a0d5af11c852988bbe3..17d4bbaeb65cd7e8711c6cb541bc36713695235a 100644 (file)
@@ -341,7 +341,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
         * mode. Create a copy of the core reset handle so it can be used by
         * the driver later.
         */
-       dwmac->stmmac_rst = stpriv->stmmac_rst;
+       dwmac->stmmac_rst = stpriv->plat->stmmac_rst;
 
        ret = socfpga_dwmac_set_phy_mode(dwmac);
        if (ret)
index 1657acfa70c2980108ad7e84f0548aa65299305e..e149848140413c827f08757aa41da325d82c1fdd 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
index 52b9407a8a39cfa54d3fc29e7566467623d78987..c02d36629c5284993afe7c6604af11d9c1923abb 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
index 5484fd726d5af7f5f10708c57d062b9992be655d..91c8926b7479ab180fa70f66c1bf71309258d64e 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
@@ -347,11 +343,14 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
        return ret;
 }
 
-static void dwmac1000_set_eee_mode(struct mac_device_info *hw)
+static void dwmac1000_set_eee_mode(struct mac_device_info *hw,
+                                  bool en_tx_lpi_clockgating)
 {
        void __iomem *ioaddr = hw->pcsr;
        u32 value;
 
+       /*TODO - en_tx_lpi_clockgating treatment */
+
        /* Enable the link status receive on RGMII, SGMII ore SMII
         * receive path and instruct the transmit to enter in LPI
         * state.
index 612d3aaac9a42cc0c2e1a705075a08cfca02abd2..fbaec0ffd9ef6638ef5406631561f3ee6964f018 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
index 9dd2987e284dcb9f96fd3a48e98e606b26326d3a..8ab518997b1b509a5cf72343ac42c9ef1993e02c 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
index e5664da382f33e9a52fefd33c007481997ab464f..d40e91e8fc7bde6352d9a5a141707cff69d8a428 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
index 3e8d4fefa5e0cb0131415bf4b6862620d4515abc..db45134fddf04e50c703254b8570b744004123f8 100644 (file)
@@ -22,6 +22,7 @@
 #define GMAC_HASH_TAB_32_63            0x00000014
 #define GMAC_RX_FLOW_CTRL              0x00000090
 #define GMAC_QX_TX_FLOW_CTRL(x)                (0x70 + x * 4)
+#define GMAC_RXQ_CTRL0                 0x000000a0
 #define GMAC_INT_STATUS                        0x000000b0
 #define GMAC_INT_EN                    0x000000b4
 #define GMAC_PCS_BASE                  0x000000e0
 
 #define GMAC_MAX_PERFECT_ADDRESSES     128
 
+/* MAC RX Queue Enable */
+#define GMAC_RX_QUEUE_CLEAR(queue)     ~(GENMASK(1, 0) << ((queue) * 2))
+#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2)
+#define GMAC_RX_DCB_QUEUE_ENABLE(queue)        BIT(((queue) * 2) + 1)
+
 /* MAC Flow Control RX */
 #define GMAC_RX_FLOW_CTRL_RFE          BIT(0)
 
@@ -84,6 +90,19 @@ enum power_event {
        power_down = 0x00000001,
 };
 
+/* Energy Efficient Ethernet (EEE) for GMAC4
+ *
+ * LPI status, timer and control register offset
+ */
+#define GMAC4_LPI_CTRL_STATUS  0xd0
+#define GMAC4_LPI_TIMER_CTRL   0xd4
+
+/* LPI control and status defines */
+#define GMAC4_LPI_CTRL_STATUS_LPITCSE  BIT(21) /* LPI Tx Clock Stop Enable */
+#define GMAC4_LPI_CTRL_STATUS_LPITXA   BIT(19) /* Enable LPI TX Automate */
+#define GMAC4_LPI_CTRL_STATUS_PLS      BIT(17) /* PHY Link Status */
+#define GMAC4_LPI_CTRL_STATUS_LPIEN    BIT(16) /* LPI Enable */
+
 /* MAC Debug bitmap */
 #define GMAC_DEBUG_TFCSTS_MASK         GENMASK(18, 17)
 #define GMAC_DEBUG_TFCSTS_SHIFT                17
@@ -133,6 +152,8 @@ enum power_event {
 /* MAC HW features2 bitmap */
 #define GMAC_HW_FEAT_TXCHCNT           GENMASK(21, 18)
 #define GMAC_HW_FEAT_RXCHCNT           GENMASK(15, 12)
+#define GMAC_HW_FEAT_TXQCNT            GENMASK(9, 6)
+#define GMAC_HW_FEAT_RXQCNT            GENMASK(3, 0)
 
 /* MAC HW ADDR regs */
 #define GMAC_HI_DCS                    GENMASK(18, 16)
index eaed7cb2186734c7ec7af3833c433629aa690a59..202216cd6789176e1cf9164cfb3faf6b58c0ba89 100644 (file)
@@ -59,6 +59,17 @@ static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
        writel(value, ioaddr + GMAC_INT_EN);
 }
 
+static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue)
+{
+       void __iomem *ioaddr = hw->pcsr;
+       u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
+
+       value &= GMAC_RX_QUEUE_CLEAR(queue);
+       value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
+
+       writel(value, ioaddr + GMAC_RXQ_CTRL0);
+}
+
 static void dwmac4_dump_regs(struct mac_device_info *hw)
 {
        void __iomem *ioaddr = hw->pcsr;
@@ -126,6 +137,65 @@ static void dwmac4_get_umac_addr(struct mac_device_info *hw,
                                   GMAC_ADDR_LOW(reg_n));
 }
 
+static void dwmac4_set_eee_mode(struct mac_device_info *hw,
+                               bool en_tx_lpi_clockgating)
+{
+       void __iomem *ioaddr = hw->pcsr;
+       u32 value;
+
+       /* Enable the link status receive on RGMII, SGMII ore SMII
+        * receive path and instruct the transmit to enter in LPI
+        * state.
+        */
+       value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
+       value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
+
+       if (en_tx_lpi_clockgating)
+               value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
+
+       writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
+}
+
+static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
+{
+       void __iomem *ioaddr = hw->pcsr;
+       u32 value;
+
+       value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
+       value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
+       writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
+}
+
+static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
+{
+       void __iomem *ioaddr = hw->pcsr;
+       u32 value;
+
+       value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
+
+       if (link)
+               value |= GMAC4_LPI_CTRL_STATUS_PLS;
+       else
+               value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
+
+       writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
+}
+
+static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
+{
+       void __iomem *ioaddr = hw->pcsr;
+       int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
+
+       /* Program the timers in the LPI timer control register:
+        * LS: minimum time (ms) for which the link
+        *  status from PHY should be ok before transmitting
+        *  the LPI pattern.
+        * TW: minimum time (us) for which the core waits
+        *  after it has stopped transmitting the LPI pattern.
+        */
+       writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
+}
+
 static void dwmac4_set_filter(struct mac_device_info *hw,
                              struct net_device *dev)
 {
@@ -392,12 +462,17 @@ static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
 static const struct stmmac_ops dwmac4_ops = {
        .core_init = dwmac4_core_init,
        .rx_ipc = dwmac4_rx_ipc_enable,
+       .rx_queue_enable = dwmac4_rx_queue_enable,
        .dump_regs = dwmac4_dump_regs,
        .host_irq_status = dwmac4_irq_status,
        .flow_ctrl = dwmac4_flow_ctrl,
        .pmt = dwmac4_pmt,
        .set_umac_addr = dwmac4_set_umac_addr,
        .get_umac_addr = dwmac4_get_umac_addr,
+       .set_eee_mode = dwmac4_set_eee_mode,
+       .reset_eee_mode = dwmac4_reset_eee_mode,
+       .set_eee_timer = dwmac4_set_eee_timer,
+       .set_eee_pls = dwmac4_set_eee_pls,
        .pcs_ctrl_ane = dwmac4_ctrl_ane,
        .pcs_rane = dwmac4_rane,
        .pcs_get_adv_lp = dwmac4_get_adv_lp,
index 8816515e1bbbc5c4a1fd5a443a478f2061d27c50..843ec69222eacd69f6107a0f6f1cceac83b2667f 100644 (file)
@@ -103,7 +103,7 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
                        x->rx_mii++;
 
                if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
-                       x->rx_crc++;
+                       x->rx_crc_errors++;
                        stats->rx_crc_errors++;
                }
 
index 8196ab5fc33c0b2bfedea016b3f505fff7e5ba52..377d1b44d4f2802650819b37d7e27fe78115bf41 100644 (file)
@@ -303,6 +303,11 @@ static void dwmac4_get_hw_feature(void __iomem *ioaddr,
                ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1;
        dma_cap->number_tx_channel =
                ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1;
+       /* TX and RX number of queues */
+       dma_cap->number_rx_queues =
+               ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1;
+       dma_cap->number_tx_queues =
+               ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1;
 
        /* IEEE 1588-2002 */
        dma_cap->time_stamp = 0;
index 726d9d9aaf83dce9c91ab307791fc594642e389f..56e485f79077374a9e19859a7953b3f18f5c42f3 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
index 84e3e84cec7d33dddf13e0fc83e235d1e181cbda..e60bfca2a763325880215bab4592d9dbe5056fbb 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
@@ -21,6 +17,7 @@
 *******************************************************************************/
 
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include "common.h"
 #include "dwmac_dma.h"
 
 int dwmac_dma_reset(void __iomem *ioaddr)
 {
        u32 value = readl(ioaddr + DMA_BUS_MODE);
-       int limit;
+       int err;
 
        /* DMA SW reset */
        value |= DMA_BUS_MODE_SFT_RESET;
        writel(value, ioaddr + DMA_BUS_MODE);
-       limit = 10;
-       while (limit--) {
-               if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
-                       break;
-               mdelay(10);
-       }
 
-       if (limit < 0)
+       err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+                                !(value & DMA_BUS_MODE_SFT_RESET),
+                                100000, 10000);
+       if (err)
                return -EBUSY;
 
        return 0;
@@ -102,7 +96,7 @@ static void show_tx_process_state(unsigned int status)
                pr_debug("- TX (Stopped): Reset or Stop command\n");
                break;
        case 1:
-               pr_debug("- TX (Running):Fetching the Tx desc\n");
+               pr_debug("- TX (Running): Fetching the Tx desc\n");
                break;
        case 2:
                pr_debug("- TX (Running): Waiting for end of tx\n");
@@ -136,7 +130,7 @@ static void show_rx_process_state(unsigned int status)
                pr_debug("- RX (Running): Fetching the Rx desc\n");
                break;
        case 2:
-               pr_debug("- RX (Running):Checking for end of pkt\n");
+               pr_debug("- RX (Running): Checking for end of pkt\n");
                break;
        case 3:
                pr_debug("- RX (Running): Waiting for Rx pkt\n");
@@ -246,7 +240,7 @@ void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
        unsigned long data;
 
        data = (addr[5] << 8) | addr[4];
-       /* For MAC Addr registers se have to set the Address Enable (AE)
+       /* For MAC Addr registers we have to set the Address Enable (AE)
         * bit that has no effect on the High Reg 0 where the bit 31 (MO)
         * is RO.
         */
@@ -261,9 +255,9 @@ void stmmac_set_mac(void __iomem *ioaddr, bool enable)
        u32 value = readl(ioaddr + MAC_CTRL_REG);
 
        if (enable)
-               value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
+               value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
        else
-               value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
+               value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
 
        writel(value, ioaddr + MAC_CTRL_REG);
 }
index f0d86321dfe22b0d4455436326c93a8af513764a..323b59ec74a35f4f318060f02d74c3b17af40d5d 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
@@ -225,7 +221,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
                        x->rx_mii++;
 
                if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
-                       x->rx_crc++;
+                       x->rx_crc_errors++;
                        stats->rx_crc_errors++;
                }
                ret = discard_frame;
index 38a1a5603293425d9758d1f2f0643aae375d584d..c037326331f5f7cbeb44a7e726283d6935c543fe 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
index ce9aa792857b0fbef5e9a07ceb138b01a96960f7..e9b04c28980f6242493686e07771cfa2eb3708b1 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
index fd78406e2e9afb198f0f0c6645e162be69ef713d..efb818ebd55e26c13b899c2180faf0ba0ed64609 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
@@ -115,7 +111,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
                        stats->collisions++;
                }
                if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
-                       x->rx_crc++;
+                       x->rx_crc_errors++;
                        stats->rx_crc_errors++;
                }
                ret = discard_frame;
index 9983ce9bd90de6a24b60ff396e04a403630fb9d3..452f256ff03f04bb6ee846966eb8961ebbc40c05 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
index eab04aeeeb95e3bfaacd36417f87fc8c2751dcce..cd8fb619b1e977cd2b51aa3cfbb9b242fe94510d 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
@@ -106,9 +102,6 @@ struct stmmac_priv {
        u32 msg_enable;
        int wolopts;
        int wol_irq;
-       struct clk *stmmac_clk;
-       struct clk *pclk;
-       struct reset_control *stmmac_rst;
        int clk_csr;
        struct timer_list eee_ctrl_timer;
        int lpi_irq;
@@ -120,8 +113,6 @@ struct stmmac_priv {
        struct ptp_clock *ptp_clock;
        struct ptp_clock_info ptp_clock_ops;
        unsigned int default_addend;
-       struct clk *clk_ptp_ref;
-       unsigned int clk_ptp_rate;
        u32 adv_ts;
        int use_riwt;
        int irq_wake;
index 699ee1d3042651e9e3d6a26c52def13a363583c6..5ff6bc4eb8f1cc7541433a6c70b25cfa71e458a9 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
@@ -65,7 +61,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
        STMMAC_STAT(overflow_error),
        STMMAC_STAT(ipc_csum_error),
        STMMAC_STAT(rx_collision),
-       STMMAC_STAT(rx_crc),
+       STMMAC_STAT(rx_crc_errors),
        STMMAC_STAT(dribbling_bit),
        STMMAC_STAT(rx_length),
        STMMAC_STAT(rx_mii),
@@ -446,24 +442,24 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
 
        memset(reg_space, 0x0, REG_SPACE_SIZE);
 
-       if (!(priv->plat->has_gmac || priv->plat->has_gmac4)) {
+       if (priv->plat->has_gmac || priv->plat->has_gmac4) {
                /* MAC registers */
-               for (i = 0; i < 12; i++)
+               for (i = 0; i < 55; i++)
                        reg_space[i] = readl(priv->ioaddr + (i * 4));
                /* DMA registers */
-               for (i = 0; i < 9; i++)
-                       reg_space[i + 12] =
+               for (i = 0; i < 22; i++)
+                       reg_space[i + 55] =
                            readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
-               reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR);
-               reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR);
        } else {
                /* MAC registers */
-               for (i = 0; i < 55; i++)
+               for (i = 0; i < 12; i++)
                        reg_space[i] = readl(priv->ioaddr + (i * 4));
                /* DMA registers */
-               for (i = 0; i < 22; i++)
-                       reg_space[i + 55] =
+               for (i = 0; i < 9; i++)
+                       reg_space[i + 12] =
                            readl(priv->ioaddr + (DMA_BUS_MODE + (i * 4)));
+               reg_space[22] = readl(priv->ioaddr + DMA_CUR_TX_BUF_ADDR);
+               reg_space[23] = readl(priv->ioaddr + DMA_CUR_RX_BUF_ADDR);
        }
 }
 
@@ -712,7 +708,7 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
 
 static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
 {
-       unsigned long clk = clk_get_rate(priv->stmmac_clk);
+       unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
 
        if (!clk)
                return 0;
@@ -722,7 +718,7 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
 
 static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
 {
-       unsigned long clk = clk_get_rate(priv->stmmac_clk);
+       unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
 
        if (!clk)
                return 0;
index 10d6059b2f26555af9963812f847b68109b9c959..721b616552611aa74ea077e744ec9a0c4836a48f 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
index e3f6389e1b01c5bba89c9cf3b632113fa9b4db42..3cbe09682afe7a719be0a3b83a33215ed50adce9 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
@@ -158,7 +154,7 @@ static void stmmac_clk_csr_set(struct stmmac_priv *priv)
 {
        u32 clk_rate;
 
-       clk_rate = clk_get_rate(priv->stmmac_clk);
+       clk_rate = clk_get_rate(priv->plat->stmmac_clk);
 
        /* Platform provided default clk_csr would be assumed valid
         * for all other cases except for the below mentioned ones.
@@ -191,7 +187,7 @@ static void print_pkt(unsigned char *buf, int len)
 
 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
 {
-       unsigned avail;
+       u32 avail;
 
        if (priv->dirty_tx > priv->cur_tx)
                avail = priv->dirty_tx - priv->cur_tx - 1;
@@ -203,7 +199,7 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
 
 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
 {
-       unsigned dirty;
+       u32 dirty;
 
        if (priv->dirty_rx <= priv->cur_rx)
                dirty = priv->cur_rx - priv->dirty_rx;
@@ -216,7 +212,7 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
 /**
  * stmmac_hw_fix_mac_speed - callback for speed selection
  * @priv: driver private structure
- * Description: on some platforms (e.g. ST), some HW system configuraton
+ * Description: on some platforms (e.g. ST), some HW system configuration
  * registers have to be set according to the link speed negotiated.
  */
 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
@@ -239,7 +235,8 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
        /* Check and enter in LPI mode */
        if ((priv->dirty_tx == priv->cur_tx) &&
            (priv->tx_path_in_lpi_mode == false))
-               priv->hw->mac->set_eee_mode(priv->hw);
+               priv->hw->mac->set_eee_mode(priv->hw,
+                                           priv->plat->en_tx_lpi_clockgating);
 }
 
 /**
@@ -415,7 +412,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
 /**
  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
  *  @dev: device pointer.
- *  @ifr: An IOCTL specefic structure, that can contain a pointer to
+ *  @ifr: An IOCTL specific structure, that can contain a pointer to
  *  a proprietary structure used to pass information to the driver.
  *  Description:
  *  This function configures the MAC to enable/disable both outgoing(TX)
@@ -606,7 +603,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
 
                /* program Sub Second Increment reg */
                sec_inc = priv->hw->ptp->config_sub_second_increment(
-                       priv->ptpaddr, priv->clk_ptp_rate,
+                       priv->ptpaddr, priv->plat->clk_ptp_rate,
                        priv->plat->has_gmac4);
                temp = div_u64(1000000000ULL, sec_inc);
 
@@ -616,7 +613,7 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
                 * where, freq_div_ratio = 1e9ns/sec_inc
                 */
                temp = (u64)(temp << 32);
-               priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
+               priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
                priv->hw->ptp->config_addend(priv->ptpaddr,
                                             priv->default_addend);
 
@@ -644,18 +641,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
        if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
                return -EOPNOTSUPP;
 
-       /* Fall-back to main clock in case of no PTP ref is passed */
-       priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref");
-       if (IS_ERR(priv->clk_ptp_ref)) {
-               priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
-               priv->clk_ptp_ref = NULL;
-               netdev_dbg(priv->dev, "PTP uses main clock\n");
-       } else {
-               clk_prepare_enable(priv->clk_ptp_ref);
-               priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
-               netdev_dbg(priv->dev, "PTP rate %d\n", priv->clk_ptp_rate);
-       }
-
        priv->adv_ts = 0;
        /* Check if adv_ts can be enabled for dwmac 4.x core */
        if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
@@ -682,8 +667,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
 
 static void stmmac_release_ptp(struct stmmac_priv *priv)
 {
-       if (priv->clk_ptp_ref)
-               clk_disable_unprepare(priv->clk_ptp_ref);
+       if (priv->plat->clk_ptp_ref)
+               clk_disable_unprepare(priv->plat->clk_ptp_ref);
        stmmac_ptp_unregister(priv);
 }
 
@@ -704,7 +689,7 @@ static void stmmac_adjust_link(struct net_device *dev)
        int new_state = 0;
        unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
 
-       if (phydev == NULL)
+       if (!phydev)
                return;
 
        spin_lock_irqsave(&priv->lock, flags);
@@ -731,33 +716,36 @@ static void stmmac_adjust_link(struct net_device *dev)
                        new_state = 1;
                        switch (phydev->speed) {
                        case 1000:
-                               if (likely((priv->plat->has_gmac) ||
-                                          (priv->plat->has_gmac4)))
+                               if (priv->plat->has_gmac ||
+                                   priv->plat->has_gmac4)
                                        ctrl &= ~priv->hw->link.port;
-                               stmmac_hw_fix_mac_speed(priv);
                                break;
                        case 100:
+                               if (priv->plat->has_gmac ||
+                                   priv->plat->has_gmac4) {
+                                       ctrl |= priv->hw->link.port;
+                                       ctrl |= priv->hw->link.speed;
+                               } else {
+                                       ctrl &= ~priv->hw->link.port;
+                               }
+                               break;
                        case 10:
-                               if (likely((priv->plat->has_gmac) ||
-                                          (priv->plat->has_gmac4))) {
+                               if (priv->plat->has_gmac ||
+                                   priv->plat->has_gmac4) {
                                        ctrl |= priv->hw->link.port;
-                                       if (phydev->speed == SPEED_100) {
-                                               ctrl |= priv->hw->link.speed;
-                                       } else {
-                                               ctrl &= ~(priv->hw->link.speed);
-                                       }
+                                       ctrl &= ~(priv->hw->link.speed);
                                } else {
                                        ctrl &= ~priv->hw->link.port;
                                }
-                               stmmac_hw_fix_mac_speed(priv);
                                break;
                        default:
                                netif_warn(priv, link, priv->dev,
-                                          "Speed (%d) not 10/100\n",
-                                          phydev->speed);
+                                          "broken speed: %d\n", phydev->speed);
+                               phydev->speed = SPEED_UNKNOWN;
                                break;
                        }
-
+                       if (phydev->speed != SPEED_UNKNOWN)
+                               stmmac_hw_fix_mac_speed(priv);
                        priv->speed = phydev->speed;
                }
 
@@ -770,8 +758,8 @@ static void stmmac_adjust_link(struct net_device *dev)
        } else if (priv->oldlink) {
                new_state = 1;
                priv->oldlink = 0;
-               priv->speed = 0;
-               priv->oldduplex = -1;
+               priv->speed = SPEED_UNKNOWN;
+               priv->oldduplex = DUPLEX_UNKNOWN;
        }
 
        if (new_state && netif_msg_link(priv))
@@ -833,8 +821,8 @@ static int stmmac_init_phy(struct net_device *dev)
        int interface = priv->plat->interface;
        int max_speed = priv->plat->max_speed;
        priv->oldlink = 0;
-       priv->speed = 0;
-       priv->oldduplex = -1;
+       priv->speed = SPEED_UNKNOWN;
+       priv->oldduplex = DUPLEX_UNKNOWN;
 
        if (priv->plat->phy_node) {
                phydev = of_phy_connect(dev, priv->plat->phy_node,
@@ -886,9 +874,7 @@ static int stmmac_init_phy(struct net_device *dev)
        if (phydev->is_pseudo_fixed_link)
                phydev->irq = PHY_POLL;
 
-       netdev_dbg(priv->dev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
-                  __func__, phydev->phy_id, phydev->link);
-
+       phy_attached_info(phydev);
        return 0;
 }
 
@@ -1014,7 +1000,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
  * @dev: net device structure
  * @flags: gfp flag.
  * Description: this function initializes the DMA RX/TX descriptors
- * and allocates the socket buffers. It suppors the chained and ring
+ * and allocates the socket buffers. It supports the chained and ring
  * modes.
  */
 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
@@ -1127,13 +1113,6 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
        int i;
 
        for (i = 0; i < DMA_TX_SIZE; i++) {
-               struct dma_desc *p;
-
-               if (priv->extend_desc)
-                       p = &((priv->dma_etx + i)->basic);
-               else
-                       p = priv->dma_tx + i;
-
                if (priv->tx_skbuff_dma[i].buf) {
                        if (priv->tx_skbuff_dma[i].map_as_page)
                                dma_unmap_page(priv->device,
@@ -1147,7 +1126,7 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
                                                 DMA_TO_DEVICE);
                }
 
-               if (priv->tx_skbuff[i] != NULL) {
+               if (priv->tx_skbuff[i]) {
                        dev_kfree_skb_any(priv->tx_skbuff[i]);
                        priv->tx_skbuff[i] = NULL;
                        priv->tx_skbuff_dma[i].buf = 0;
@@ -1270,6 +1249,28 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
        kfree(priv->tx_skbuff);
 }
 
+/**
+ *  stmmac_mac_enable_rx_queues - Enable MAC rx queues
+ *  @priv: driver private structure
+ *  Description: It is used for enabling the rx queues in the MAC
+ */
+static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
+{
+       int rx_count = priv->dma_cap.number_rx_queues;
+       int queue = 0;
+
+       /* If GMAC does not have multiple queues, then this is not necessary*/
+       if (rx_count == 1)
+               return;
+
+       /**
+        *  If the core is synthesized with multiple rx queues / multiple
+        *  dma channels, then rx queues will be disabled by default.
+        *  For now only rx queue 0 is enabled.
+        */
+       priv->hw->mac->rx_queue_enable(priv->hw, queue);
+}
+
 /**
  *  stmmac_dma_operation_mode - HW DMA operation mode
  *  @priv: driver private structure
@@ -1671,10 +1672,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
        /* Copy the MAC addr into the HW  */
        priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
 
-       /* If required, perform hw setup of the bus. */
-       if (priv->plat->bus_setup)
-               priv->plat->bus_setup(priv->ioaddr);
-
        /* PS and related bits will be programmed according to the speed */
        if (priv->hw->pcs) {
                int speed = priv->plat->mac_port_sel_speed;
@@ -1691,6 +1688,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
        /* Initialize the MAC Core */
        priv->hw->mac->core_init(priv->hw, dev->mtu);
 
+       /* Initialize MAC RX Queues */
+       if (priv->hw->mac->rx_queue_enable)
+               stmmac_mac_enable_rx_queues(priv);
+
        ret = priv->hw->mac->rx_ipc(priv->hw);
        if (!ret) {
                netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
@@ -1711,8 +1712,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
 
        if (init_ptp) {
                ret = stmmac_init_ptp(priv);
-               if (ret)
-                       netdev_warn(priv->dev, "fail to init PTP.\n");
+               if (ret == -EOPNOTSUPP)
+                       netdev_warn(priv->dev, "PTP not supported by HW\n");
+               else if (ret)
+                       netdev_warn(priv->dev, "PTP init failed\n");
        }
 
 #ifdef CONFIG_DEBUG_FS
@@ -2519,7 +2522,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
                if (unlikely(status == discard_frame)) {
                        priv->dev->stats.rx_errors++;
                        if (priv->hwts_rx_en && !priv->extend_desc) {
-                               /* DESC2 & DESC3 will be overwitten by device
+                               /* DESC2 & DESC3 will be overwritten by device
                                 * with timestamp value, hence reinitialize
                                 * them in stmmac_rx_refill() function so that
                                 * device can reuse it.
@@ -2542,7 +2545,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
 
                        frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
 
-                       /*  If frame length is greather than skb buffer size
+                       /*  If frame length is greater than skb buffer size
                         *  (preallocated during init) then the packet is
                         *  ignored
                         */
@@ -2669,7 +2672,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget)
 
        work_done = stmmac_rx(priv, budget);
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                stmmac_enable_dma_irq(priv);
        }
        return work_done;
@@ -2748,7 +2751,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
        /* Some GMAC devices have a bugged Jumbo frame support that
         * needs to have the Tx COE disabled for oversized frames
         * (due to limited buffer sizes). In this case we disable
-        * the TX csum insertionin the TDES and not use SF.
+        * the TX csum insertion in the TDES and not use SF.
         */
        if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
                features &= ~NETIF_F_CSUM_MASK;
@@ -2894,9 +2897,7 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
        struct dma_desc *p = (struct dma_desc *)head;
 
        for (i = 0; i < size; i++) {
-               u64 x;
                if (extend_desc) {
-                       x = *(u64 *) ep;
                        seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
                                   i, (unsigned int)virt_to_phys(ep),
                                   le32_to_cpu(ep->basic.des0),
@@ -2905,7 +2906,6 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
                                   le32_to_cpu(ep->basic.des3));
                        ep++;
                } else {
-                       x = *(u64 *) p;
                        seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
                                   i, (unsigned int)virt_to_phys(ep),
                                   le32_to_cpu(p->des0), le32_to_cpu(p->des1),
@@ -2975,7 +2975,7 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
                   (priv->dma_cap.hash_filter) ? "Y" : "N");
        seq_printf(seq, "\tMultiple MAC address registers: %s\n",
                   (priv->dma_cap.multi_addr) ? "Y" : "N");
-       seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
+       seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
                   (priv->dma_cap.pcs) ? "Y" : "N");
        seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
                   (priv->dma_cap.sma_mdio) ? "Y" : "N");
@@ -3251,44 +3251,8 @@ int stmmac_dvr_probe(struct device *device,
        if ((phyaddr >= 0) && (phyaddr <= 31))
                priv->plat->phy_addr = phyaddr;
 
-       priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
-       if (IS_ERR(priv->stmmac_clk)) {
-               netdev_warn(priv->dev, "%s: warning: cannot get CSR clock\n",
-                           __func__);
-               /* If failed to obtain stmmac_clk and specific clk_csr value
-                * is NOT passed from the platform, probe fail.
-                */
-               if (!priv->plat->clk_csr) {
-                       ret = PTR_ERR(priv->stmmac_clk);
-                       goto error_clk_get;
-               } else {
-                       priv->stmmac_clk = NULL;
-               }
-       }
-       clk_prepare_enable(priv->stmmac_clk);
-
-       priv->pclk = devm_clk_get(priv->device, "pclk");
-       if (IS_ERR(priv->pclk)) {
-               if (PTR_ERR(priv->pclk) == -EPROBE_DEFER) {
-                       ret = -EPROBE_DEFER;
-                       goto error_pclk_get;
-               }
-               priv->pclk = NULL;
-       }
-       clk_prepare_enable(priv->pclk);
-
-       priv->stmmac_rst = devm_reset_control_get(priv->device,
-                                                 STMMAC_RESOURCE_NAME);
-       if (IS_ERR(priv->stmmac_rst)) {
-               if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) {
-                       ret = -EPROBE_DEFER;
-                       goto error_hw_init;
-               }
-               dev_info(priv->device, "no reset control found\n");
-               priv->stmmac_rst = NULL;
-       }
-       if (priv->stmmac_rst)
-               reset_control_deassert(priv->stmmac_rst);
+       if (priv->plat->stmmac_rst)
+               reset_control_deassert(priv->plat->stmmac_rst);
 
        /* Init MAC and get the capabilities */
        ret = stmmac_hw_init(priv);
@@ -3391,10 +3355,6 @@ error_netdev_register:
 error_mdio_register:
        netif_napi_del(&priv->napi);
 error_hw_init:
-       clk_disable_unprepare(priv->pclk);
-error_pclk_get:
-       clk_disable_unprepare(priv->stmmac_clk);
-error_clk_get:
        free_netdev(ndev);
 
        return ret;
@@ -3420,10 +3380,10 @@ int stmmac_dvr_remove(struct device *dev)
        stmmac_set_mac(priv->ioaddr, false);
        netif_carrier_off(ndev);
        unregister_netdev(ndev);
-       if (priv->stmmac_rst)
-               reset_control_assert(priv->stmmac_rst);
-       clk_disable_unprepare(priv->pclk);
-       clk_disable_unprepare(priv->stmmac_clk);
+       if (priv->plat->stmmac_rst)
+               reset_control_assert(priv->plat->stmmac_rst);
+       clk_disable_unprepare(priv->plat->pclk);
+       clk_disable_unprepare(priv->plat->stmmac_clk);
        if (priv->hw->pcs != STMMAC_PCS_RGMII &&
            priv->hw->pcs != STMMAC_PCS_TBI &&
            priv->hw->pcs != STMMAC_PCS_RTBI)
@@ -3472,14 +3432,14 @@ int stmmac_suspend(struct device *dev)
                stmmac_set_mac(priv->ioaddr, false);
                pinctrl_pm_select_sleep_state(priv->device);
                /* Disable clock in case of PWM is off */
-               clk_disable(priv->pclk);
-               clk_disable(priv->stmmac_clk);
+               clk_disable(priv->plat->pclk);
+               clk_disable(priv->plat->stmmac_clk);
        }
        spin_unlock_irqrestore(&priv->lock, flags);
 
        priv->oldlink = 0;
-       priv->speed = 0;
-       priv->oldduplex = -1;
+       priv->speed = SPEED_UNKNOWN;
+       priv->oldduplex = DUPLEX_UNKNOWN;
        return 0;
 }
 EXPORT_SYMBOL_GPL(stmmac_suspend);
@@ -3512,9 +3472,9 @@ int stmmac_resume(struct device *dev)
                priv->irq_wake = 0;
        } else {
                pinctrl_pm_select_default_state(priv->device);
-               /* enable the clk prevously disabled */
-               clk_enable(priv->stmmac_clk);
-               clk_enable(priv->pclk);
+               /* enable the clk previously disabled */
+               clk_enable(priv->plat->stmmac_clk);
+               clk_enable(priv->plat->pclk);
                /* reset the phy so that it's ready */
                if (priv->mii)
                        stmmac_mdio_reset(priv->mii);
index b0344c21375292fe3489a4872400be6535785491..db157a47000c65fb588d3f1c5dfe21bbd20983fb 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
   Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
+#include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_gpio.h>
 #include <linux/of_mdio.h>
-#include <asm/io.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
 
 #include "stmmac.h"
 
 #define MII_GMAC4_WRITE                        (1 << MII_GMAC4_GOC_SHIFT)
 #define MII_GMAC4_READ                 (3 << MII_GMAC4_GOC_SHIFT)
 
-static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
-{
-       unsigned long curr;
-       unsigned long finish = jiffies + 3 * HZ;
-
-       do {
-               curr = jiffies;
-               if (readl(ioaddr + mii_addr) & MII_BUSY)
-                       cpu_relax();
-               else
-                       return 0;
-       } while (!time_after_eq(curr, finish));
-
-       return -EBUSY;
-}
-
 /**
  * stmmac_mdio_read
  * @bus: points to the mii_bus structure
@@ -74,7 +55,7 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
        struct stmmac_priv *priv = netdev_priv(ndev);
        unsigned int mii_address = priv->hw->mii.addr;
        unsigned int mii_data = priv->hw->mii.data;
-
+       u32 v;
        int data;
        u32 value = MII_BUSY;
 
@@ -86,12 +67,14 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
        if (priv->plat->has_gmac4)
                value |= MII_GMAC4_READ;
 
-       if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+       if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+                              100, 10000))
                return -EBUSY;
 
        writel(value, priv->ioaddr + mii_address);
 
-       if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+       if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+                              100, 10000))
                return -EBUSY;
 
        /* Read the data from the MII data register */
@@ -115,7 +98,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
        struct stmmac_priv *priv = netdev_priv(ndev);
        unsigned int mii_address = priv->hw->mii.addr;
        unsigned int mii_data = priv->hw->mii.data;
-
+       u32 v;
        u32 value = MII_BUSY;
 
        value |= (phyaddr << priv->hw->mii.addr_shift)
@@ -130,7 +113,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
                value |= MII_WRITE;
 
        /* Wait until any existing MII operation is complete */
-       if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+       if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+                              100, 10000))
                return -EBUSY;
 
        /* Set the MII address register to write */
@@ -138,7 +122,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
        writel(value, priv->ioaddr + mii_address);
 
        /* Wait until any existing MII operation is complete */
-       return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
+       return readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+                                 100, 10000);
 }
 
 /**
@@ -156,9 +141,9 @@ int stmmac_mdio_reset(struct mii_bus *bus)
 
 #ifdef CONFIG_OF
        if (priv->device->of_node) {
-
                if (data->reset_gpio < 0) {
                        struct device_node *np = priv->device->of_node;
+
                        if (!np)
                                return 0;
 
@@ -198,7 +183,7 @@ int stmmac_mdio_reset(struct mii_bus *bus)
 
        /* This is a workaround for problems with the STE101P PHY.
         * It doesn't complete its reset until at least one clock cycle
-        * on MDC, so perform a dummy mdio read. To be upadted for GMAC4
+        * on MDC, so perform a dummy mdio read. To be updated for GMAC4
         * if needed.
         */
        if (!priv->plat->has_gmac4)
@@ -225,7 +210,7 @@ int stmmac_mdio_register(struct net_device *ndev)
                return 0;
 
        new_bus = mdiobus_alloc();
-       if (new_bus == NULL)
+       if (!new_bus)
                return -ENOMEM;
 
        if (mdio_bus_data->irqs)
@@ -262,49 +247,48 @@ int stmmac_mdio_register(struct net_device *ndev)
        found = 0;
        for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
                struct phy_device *phydev = mdiobus_get_phy(new_bus, addr);
-               if (phydev) {
-                       int act = 0;
-                       char irq_num[4];
-                       char *irq_str;
-
-                       /*
-                        * If an IRQ was provided to be assigned after
-                        * the bus probe, do it here.
-                        */
-                       if ((mdio_bus_data->irqs == NULL) &&
-                           (mdio_bus_data->probed_phy_irq > 0)) {
-                               new_bus->irq[addr] =
-                                       mdio_bus_data->probed_phy_irq;
-                               phydev->irq = mdio_bus_data->probed_phy_irq;
-                       }
-
-                       /*
-                        * If we're going to bind the MAC to this PHY bus,
-                        * and no PHY number was provided to the MAC,
-                        * use the one probed here.
-                        */
-                       if (priv->plat->phy_addr == -1)
-                               priv->plat->phy_addr = addr;
-
-                       act = (priv->plat->phy_addr == addr);
-                       switch (phydev->irq) {
-                       case PHY_POLL:
-                               irq_str = "POLL";
-                               break;
-                       case PHY_IGNORE_INTERRUPT:
-                               irq_str = "IGNORE";
-                               break;
-                       default:
-                               sprintf(irq_num, "%d", phydev->irq);
-                               irq_str = irq_num;
-                               break;
-                       }
-                       netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
-                                   phydev->phy_id, addr,
-                                   irq_str, phydev_name(phydev),
-                                   act ? " active" : "");
-                       found = 1;
+               int act = 0;
+               char irq_num[4];
+               char *irq_str;
+
+               if (!phydev)
+                       continue;
+
+               /*
+                * If an IRQ was provided to be assigned after
+                * the bus probe, do it here.
+                */
+               if (!mdio_bus_data->irqs &&
+                   (mdio_bus_data->probed_phy_irq > 0)) {
+                       new_bus->irq[addr] = mdio_bus_data->probed_phy_irq;
+                       phydev->irq = mdio_bus_data->probed_phy_irq;
+               }
+
+               /*
+                * If we're going to bind the MAC to this PHY bus,
+                * and no PHY number was provided to the MAC,
+                * use the one probed here.
+                */
+               if (priv->plat->phy_addr == -1)
+                       priv->plat->phy_addr = addr;
+
+               act = (priv->plat->phy_addr == addr);
+               switch (phydev->irq) {
+               case PHY_POLL:
+                       irq_str = "POLL";
+                       break;
+               case PHY_IGNORE_INTERRUPT:
+                       irq_str = "IGNORE";
+                       break;
+               default:
+                       sprintf(irq_num, "%d", phydev->irq);
+                       irq_str = irq_num;
+                       break;
                }
+               netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
+                           phydev->phy_id, addr, irq_str, phydev_name(phydev),
+                           act ? " active" : "");
+               found = 1;
        }
 
        if (!found && !mdio_node) {
index 3da4737620cb3fbf50e16c2cf61de3dbfd3e2355..5c9e462276b9cbc25a8b5c2748988286fe17884f 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
index 36942f5a6a53adfc20bd254b0045fad51e398c12..433a84239a687bab4ff0572978d7c0eaf849cb46 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
@@ -121,7 +117,6 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
        axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
        axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
        axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe");
-       axi->axi_axi_all = of_property_read_bool(np, "snps,axi_all");
        axi->axi_fb = of_property_read_bool(np, "snps,axi_fb");
        axi->axi_mb = of_property_read_bool(np, "snps,axi_mb");
        axi->axi_rb =  of_property_read_bool(np, "snps,axi_rb");
@@ -181,10 +176,19 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
                mdio = false;
        }
 
-       /* If snps,dwmac-mdio is passed from DT, always register the MDIO */
-       for_each_child_of_node(np, plat->mdio_node) {
-               if (of_device_is_compatible(plat->mdio_node, "snps,dwmac-mdio"))
-                       break;
+       /* exception for dwmac-dwc-qos-eth glue logic */
+       if (of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) {
+               plat->mdio_node = of_get_child_by_name(np, "mdio");
+       } else {
+               /**
+                * If snps,dwmac-mdio is passed from DT, always register
+                * the MDIO
+                */
+               for_each_child_of_node(np, plat->mdio_node) {
+                       if (of_device_is_compatible(plat->mdio_node,
+                                                   "snps,dwmac-mdio"))
+                               break;
+               }
        }
 
        if (plat->mdio_node) {
@@ -249,6 +253,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
        plat->force_sf_dma_mode =
                of_property_read_bool(np, "snps,force_sf_dma_mode");
 
+       plat->en_tx_lpi_clockgating =
+               of_property_read_bool(np, "snps,en-tx-lpi-clockgating");
+
        /* Set the maxmtu to a default of JUMBO_LEN in case the
         * parameter is not present in the device tree.
         */
@@ -333,7 +340,54 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
 
        plat->axi = stmmac_axi_setup(pdev);
 
+       /* clock setup */
+       plat->stmmac_clk = devm_clk_get(&pdev->dev,
+                                       STMMAC_RESOURCE_NAME);
+       if (IS_ERR(plat->stmmac_clk)) {
+               dev_warn(&pdev->dev, "Cannot get CSR clock\n");
+               plat->stmmac_clk = NULL;
+       }
+       clk_prepare_enable(plat->stmmac_clk);
+
+       plat->pclk = devm_clk_get(&pdev->dev, "pclk");
+       if (IS_ERR(plat->pclk)) {
+               if (PTR_ERR(plat->pclk) == -EPROBE_DEFER)
+                       goto error_pclk_get;
+
+               plat->pclk = NULL;
+       }
+       clk_prepare_enable(plat->pclk);
+
+       /* Fall-back to main clock in case of no PTP ref is passed */
+       plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref");
+       if (IS_ERR(plat->clk_ptp_ref)) {
+               plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
+               plat->clk_ptp_ref = NULL;
+               dev_warn(&pdev->dev, "PTP uses main clock\n");
+       } else {
+               clk_prepare_enable(plat->clk_ptp_ref);
+               plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
+               dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
+       }
+
+       plat->stmmac_rst = devm_reset_control_get(&pdev->dev,
+                                                 STMMAC_RESOURCE_NAME);
+       if (IS_ERR(plat->stmmac_rst)) {
+               if (PTR_ERR(plat->stmmac_rst) == -EPROBE_DEFER)
+                       goto error_hw_init;
+
+               dev_info(&pdev->dev, "no reset control found\n");
+               plat->stmmac_rst = NULL;
+       }
+
        return plat;
+
+error_hw_init:
+       clk_disable_unprepare(plat->pclk);
+error_pclk_get:
+       clk_disable_unprepare(plat->stmmac_clk);
+
+       return ERR_PTR(-EPROBE_DEFER);
 }
 
 /**
@@ -357,7 +411,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev,
 struct plat_stmmacenet_data *
 stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
 {
-       return ERR_PTR(-ENOSYS);
+       return ERR_PTR(-EINVAL);
 }
 
 void stmmac_remove_config_dt(struct platform_device *pdev,
index 3eb281d1db08a94ff76a3d3d21df367966a036d6..d71bd80c5b5b63a60335e371db2c38093acc0fda 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
index c06938c47af5549658c19e9c64a568595d80510a..48fb72fc423c9f7c29aa713376867c90c32932af 100644 (file)
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
   more details.
 
-  You should have received a copy of the GNU General Public License along with
-  this program; if not, write to the Free Software Foundation, Inc.,
-  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
-
   The full GNU General Public License is included in this distribution in
   the file called "COPYING".
 
index a4b40e3015e57648b47740ecae7ecba1e885c4ba..b2caf5132bd2b2bd74e1895a0a80ddf25321278f 100644 (file)
@@ -70,19 +70,23 @@ config CASSINI
          <http://docs.oracle.com/cd/E19113-01/giga.ether.pci/817-4341-10/817-4341-10.pdf>.
 
 config SUNVNET_COMMON
-       bool
+       tristate "Common routines to support Sun Virtual Networking"
        depends on SUN_LDOMS
-       default y if SUN_LDOMS
+       default m
 
 config SUNVNET
        tristate "Sun Virtual Network support"
+       default m
        depends on SUN_LDOMS
+       depends on SUNVNET_COMMON
        ---help---
          Support for virtual network devices under Sun Logical Domains.
 
 config LDMVSW
        tristate "Sun4v LDoms Virtual Switch support"
+       default m
        depends on SUN_LDOMS
+       depends on SUNVNET_COMMON
        ---help---
          Support for virtual switch devices under Sun4v Logical Domains.
          This driver adds a network interface for every vsw-port node
index 335b8766063807a42ae9a9cb0b82aa7ea396a809..89952deae47fc813b66c6d856f16dd648a803a0c 100644 (file)
 static u8 vsw_port_hwaddr[ETH_ALEN] = {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
 
 #define DRV_MODULE_NAME                "ldmvsw"
-#define DRV_MODULE_VERSION     "1.0"
-#define DRV_MODULE_RELDATE     "Jan 15, 2016"
+#define DRV_MODULE_VERSION     "1.1"
+#define DRV_MODULE_RELDATE     "February 3, 2017"
 
 static char version[] =
-       DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+       DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
 MODULE_AUTHOR("Oracle");
 MODULE_DESCRIPTION("Sun4v LDOM Virtual Switch Driver");
 MODULE_LICENSE("GPL");
@@ -234,8 +234,7 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[],
        dev->ethtool_ops = &vsw_ethtool_ops;
        dev->watchdog_timeo = VSW_TX_TIMEOUT;
 
-       dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE |
-                          NETIF_F_HW_CSUM | NETIF_F_SG;
+       dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG;
        dev->features = dev->hw_features;
 
        /* MTU range: 68 - 65535 */
@@ -259,11 +258,6 @@ static struct vio_driver_ops vsw_vio_ops = {
        .handshake_complete     = sunvnet_handshake_complete_common,
 };
 
-static void print_version(void)
-{
-       printk_once(KERN_INFO "%s", version);
-}
-
 static const char *remote_macaddr_prop = "remote-mac-address";
 static const char *id_prop = "id";
 
@@ -279,8 +273,6 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        const u64 *port_id;
        u64 handle;
 
-       print_version();
-
        hp = mdesc_grab();
 
        rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len);
@@ -327,7 +319,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        port->vp = vp;
        port->dev = dev;
        port->switch_port = 1;
-       port->tso = true;
+       port->tso = false; /* no tso in vsw, misbehaves in bridge */
        port->tsolen = 0;
 
        /* Mark the port as belonging to ldmvsw which directs the
@@ -457,6 +449,7 @@ static struct vio_driver vsw_port_driver = {
 
 static int __init vsw_init(void)
 {
+       pr_info("%s\n", version);
        return vio_register_driver(&vsw_port_driver);
 }
 
index f90d1af6d390654cec17a52572191df4ddc4b4ba..57978056b3366f0ecb0410f96dbfbde228ea44f1 100644 (file)
@@ -3786,7 +3786,7 @@ static int niu_poll(struct napi_struct *napi, int budget)
        work_done = niu_poll_core(np, lp, budget);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                niu_ldg_rearm(np, lp, 1);
        }
        return work_done;
@@ -6294,8 +6294,8 @@ no_rings:
        stats->tx_errors = errors;
 }
 
-static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
-                                              struct rtnl_link_stats64 *stats)
+static void niu_get_stats(struct net_device *dev,
+                         struct rtnl_link_stats64 *stats)
 {
        struct niu *np = netdev_priv(dev);
 
@@ -6303,8 +6303,6 @@ static struct rtnl_link_stats64 *niu_get_stats(struct net_device *dev,
                niu_get_rx_stats(np, stats);
                niu_get_tx_stats(np, stats);
        }
-
-       return stats;
 }
 
 static void niu_load_hash_xmac(struct niu *np, u16 *hash)
index d277e410797694f3e8dd8cd40430cd23e1148b17..5c5952e782cd223c0aee844d414ee6c749c35804 100644 (file)
@@ -922,7 +922,7 @@ static int gem_poll(struct napi_struct *napi, int budget)
                gp->status = readl(gp->regs + GREG_STAT);
        } while (gp->status & GREG_STAT_NAPI);
 
-       napi_complete(napi);
+       napi_complete_done(napi, work_done);
        gem_enable_ints(gp);
 
        return work_done;
index 5356a7074796caafe0ce30284dc859ab75a61e05..4cc2571f71c6b65a076071789cef36f537bc0ddd 100644 (file)
 #define VNET_TX_TIMEOUT                        (5 * HZ)
 
 #define DRV_MODULE_NAME                "sunvnet"
-#define DRV_MODULE_VERSION     "1.0"
-#define DRV_MODULE_RELDATE     "June 25, 2007"
+#define DRV_MODULE_VERSION     "2.0"
+#define DRV_MODULE_RELDATE     "February 3, 2017"
 
 static char version[] =
-       DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+       DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
 MODULE_DESCRIPTION("Sun LDOM virtual network driver");
 MODULE_LICENSE("GPL");
@@ -303,11 +303,6 @@ static struct vio_driver_ops vnet_vio_ops = {
        .handshake_complete     = sunvnet_handshake_complete_common,
 };
 
-static void print_version(void)
-{
-       printk_once(KERN_INFO "%s", version);
-}
-
 const char *remote_macaddr_prop = "remote-mac-address";
 
 static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
@@ -319,8 +314,6 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        const u64 *rmac;
        int len, i, err, switch_port;
 
-       print_version();
-
        hp = mdesc_grab();
 
        vp = vnet_find_parent(hp, vdev->mp, vdev);
@@ -446,6 +439,7 @@ static struct vio_driver vnet_port_driver = {
 
 static int __init vnet_init(void)
 {
+       pr_info("%s\n", version);
        return vio_register_driver(&vnet_port_driver);
 }
 
index 8878b75d68b4ddec99fb014e0e51cd91081380d2..fa2d11ca9b81e49d84a0b66dbe262cd3977c0560 100644 (file)
  */
 #define        VNET_MAX_RETRIES        10
 
+MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
+MODULE_DESCRIPTION("Sun LDOM virtual network support library");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.1");
+
 static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
 static void vnet_port_reset(struct vnet_port *port);
 
@@ -181,6 +186,7 @@ static int handle_attr_info(struct vio_driver_state *vio,
        } else {
                pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
                pkt->ipv4_lso_maxlen = 0;
+               port->tsolen = 0;
        }
 
        /* for version >= 1.6, ACK packet mode we support */
@@ -714,12 +720,8 @@ static void maybe_tx_wakeup(struct vnet_port *port)
        txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
                                  port->q_index);
        __netif_tx_lock(txq, smp_processor_id());
-       if (likely(netif_tx_queue_stopped(txq))) {
-               struct vio_dring_state *dr;
-
-               dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+       if (likely(netif_tx_queue_stopped(txq)))
                netif_tx_wake_queue(txq);
-       }
        __netif_tx_unlock(txq);
 }
 
@@ -737,41 +739,37 @@ static int vnet_event_napi(struct vnet_port *port, int budget)
        struct vio_driver_state *vio = &port->vio;
        int tx_wakeup, err;
        int npkts = 0;
-       int event = (port->rx_event & LDC_EVENT_RESET);
-
-ldc_ctrl:
-       if (unlikely(event == LDC_EVENT_RESET ||
-                    event == LDC_EVENT_UP)) {
-               vio_link_state_change(vio, event);
-
-               if (event == LDC_EVENT_RESET) {
-                       vnet_port_reset(port);
-                       vio_port_up(vio);
-
-                       /* If the device is running but its tx queue was
-                        * stopped (due to flow control), restart it.
-                        * This is necessary since vnet_port_reset()
-                        * clears the tx drings and thus we may never get
-                        * back a VIO_TYPE_DATA ACK packet - which is
-                        * the normal mechanism to restart the tx queue.
-                        */
-                       if (netif_running(dev))
-                               maybe_tx_wakeup(port);
-               }
+
+       /* we don't expect any other bits */
+       BUG_ON(port->rx_event & ~(LDC_EVENT_DATA_READY |
+                                 LDC_EVENT_RESET |
+                                 LDC_EVENT_UP));
+
+       /* RESET takes precedent over any other event */
+       if (port->rx_event & LDC_EVENT_RESET) {
+               vio_link_state_change(vio, LDC_EVENT_RESET);
+               vnet_port_reset(port);
+               vio_port_up(vio);
+
+               /* If the device is running but its tx queue was
+                * stopped (due to flow control), restart it.
+                * This is necessary since vnet_port_reset()
+                * clears the tx drings and thus we may never get
+                * back a VIO_TYPE_DATA ACK packet - which is
+                * the normal mechanism to restart the tx queue.
+                */
+               if (netif_running(dev))
+                       maybe_tx_wakeup(port);
+
                port->rx_event = 0;
                return 0;
        }
-       /* We may have multiple LDC events in rx_event. Unroll send_events() */
-       event = (port->rx_event & LDC_EVENT_UP);
-       port->rx_event &= ~(LDC_EVENT_RESET | LDC_EVENT_UP);
-       if (event == LDC_EVENT_UP)
-               goto ldc_ctrl;
-       event = port->rx_event;
-       if (!(event & LDC_EVENT_DATA_READY))
-               return 0;
 
-       /* we dont expect any other bits than RESET, UP, DATA_READY */
-       BUG_ON(event != LDC_EVENT_DATA_READY);
+       if (port->rx_event & LDC_EVENT_UP) {
+               vio_link_state_change(vio, LDC_EVENT_UP);
+               port->rx_event = 0;
+               return 0;
+       }
 
        err = 0;
        tx_wakeup = 0;
@@ -794,25 +792,25 @@ ldc_ctrl:
                        pkt->start_idx = vio_dring_next(dr,
                                                        port->napi_stop_idx);
                        pkt->end_idx = -1;
-                       goto napi_resume;
-               }
-               err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
-               if (unlikely(err < 0)) {
-                       if (err == -ECONNRESET)
-                               vio_conn_reset(vio);
-                       break;
+               } else {
+                       err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
+                       if (unlikely(err < 0)) {
+                               if (err == -ECONNRESET)
+                                       vio_conn_reset(vio);
+                               break;
+                       }
+                       if (err == 0)
+                               break;
+                       viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
+                              msgbuf.tag.type,
+                              msgbuf.tag.stype,
+                              msgbuf.tag.stype_env,
+                              msgbuf.tag.sid);
+                       err = vio_validate_sid(vio, &msgbuf.tag);
+                       if (err < 0)
+                               break;
                }
-               if (err == 0)
-                       break;
-               viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
-                      msgbuf.tag.type,
-                      msgbuf.tag.stype,
-                      msgbuf.tag.stype_env,
-                      msgbuf.tag.sid);
-               err = vio_validate_sid(vio, &msgbuf.tag);
-               if (err < 0)
-                       break;
-napi_resume:
+
                if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
                        if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
                                if (!sunvnet_port_is_up_common(port)) {
@@ -860,7 +858,7 @@ int sunvnet_poll_common(struct napi_struct *napi, int budget)
        int processed = vnet_event_napi(port, budget);
 
        if (processed < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, processed);
                port->rx_event &= ~LDC_EVENT_DATA_READY;
                vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
        }
@@ -1256,10 +1254,8 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
 
        rcu_read_lock();
        port = vnet_tx_port(skb, dev);
-       if (unlikely(!port)) {
-               rcu_read_unlock();
+       if (unlikely(!port))
                goto out_dropped;
-       }
 
        if (skb_is_gso(skb) && skb->len > port->tsolen) {
                err = vnet_handle_offloads(port, skb, vnet_tx_port);
@@ -1284,7 +1280,6 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
                        fl4.saddr = ip_hdr(skb)->saddr;
 
                        rt = ip_route_output_key(dev_net(dev), &fl4);
-                       rcu_read_unlock();
                        if (!IS_ERR(rt)) {
                                skb_dst_set(skb, &rt->dst);
                                icmp_send(skb, ICMP_DEST_UNREACH,
@@ -1426,6 +1421,7 @@ ldc_start_done:
        dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
        if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
                netif_tx_stop_queue(txq);
+               smp_rmb();
                if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
                        netif_tx_wake_queue(txq);
        }
@@ -1443,8 +1439,7 @@ out_dropped:
                                jiffies + VNET_CLEAN_TIMEOUT);
        else if (port)
                del_timer(&port->clean_timer);
-       if (port)
-               rcu_read_unlock();
+       rcu_read_unlock();
        if (skb)
                dev_kfree_skb(skb);
        vnet_free_skbs(freeskbs);
@@ -1641,7 +1636,7 @@ static void vnet_port_reset(struct vnet_port *port)
        del_timer(&port->clean_timer);
        sunvnet_port_free_tx_bufs_common(port);
        port->rmtu = 0;
-       port->tso = true;
+       port->tso = (port->vsw == 0);  /* no tso in vsw, misbehaves in bridge */
        port->tsolen = 0;
 }
 
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
deleted file mode 100644 (file)
index 8276ee5..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# Synopsys network device configuration
-#
-
-config NET_VENDOR_SYNOPSYS
-       bool "Synopsys devices"
-       default y
-       ---help---
-         If you have a network (Ethernet) device belonging to this class, say Y.
-
-         Note that the answer to this question doesn't directly affect the
-         kernel: saying N will just cause the configurator to skip all
-         the questions about Synopsys devices. If you say Y, you will be asked
-         for your specific device in the following questions.
-
-if NET_VENDOR_SYNOPSYS
-
-config SYNOPSYS_DWC_ETH_QOS
-       tristate "Sypnopsys DWC Ethernet QOS v4.10a support"
-       select PHYLIB
-       select CRC32
-       select MII
-       depends on OF && HAS_DMA
-       ---help---
-         This driver supports the DWC Ethernet QoS from Synopsys
-
-endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
deleted file mode 100644 (file)
index 7a37572..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the Synopsys network device drivers.
-#
-
-obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
deleted file mode 100644 (file)
index 09f5a67..0000000
+++ /dev/null
@@ -1,2998 +0,0 @@
-/*  Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
- *
- *  This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC).
- *  This version introduced a lot of changes which breaks backwards
- *  compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers).
- *  Some fields differ between version 4.00a and 4.10a, mainly the interrupt
- *  bit fields. The driver could be made compatible with 4.00, if all relevant
- *  HW erratas are handled.
- *
- *  The GMAC is highly configurable at synthesis time. This driver has been
- *  developed for a subset of the total available feature set. Currently
- *  it supports:
- *  - TSO
- *  - Checksum offload for RX and TX.
- *  - Energy efficient ethernet.
- *  - GMII phy interface.
- *  - The statistics module.
- *  - Single RX and TX queue.
- *
- *  Copyright (C) 2015 Axis Communications AB.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms and conditions of the GNU General Public License,
- *  version 2, as published by the Free Software Foundation.
- */
-
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/ethtool.h>
-#include <linux/stat.h>
-#include <linux/types.h>
-
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
-
-#include <linux/phy.h>
-#include <linux/mii.h>
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-
-#include <linux/device.h>
-#include <linux/bitrev.h>
-#include <linux/crc32.h>
-
-#include <linux/of.h>
-#include <linux/interrupt.h>
-#include <linux/clocksource.h>
-#include <linux/net_tstamp.h>
-#include <linux/pm_runtime.h>
-#include <linux/of_net.h>
-#include <linux/of_address.h>
-#include <linux/of_mdio.h>
-#include <linux/timer.h>
-#include <linux/tcp.h>
-
-#define DRIVER_NAME                    "dwceqos"
-#define DRIVER_DESCRIPTION             "Synopsys DWC Ethernet QoS driver"
-#define DRIVER_VERSION                 "0.9"
-
-#define DWCEQOS_MSG_DEFAULT    (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
-       NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
-
-#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */
-
-#define DWCEQOS_LPI_TIMER_MIN      8
-#define DWCEQOS_LPI_TIMER_MAX      ((1 << 20) - 1)
-
-#define DWCEQOS_RX_BUF_SIZE 2048
-
-#define DWCEQOS_RX_DCNT 256
-#define DWCEQOS_TX_DCNT 256
-
-#define DWCEQOS_HASH_TABLE_SIZE 64
-
-/* The size field in the DMA descriptor is 14 bits */
-#define BYTES_PER_DMA_DESC 16376
-
-/* Hardware registers */
-#define START_MAC_REG_OFFSET    0x0000
-#define MAX_MAC_REG_OFFSET      0x0bd0
-#define START_MTL_REG_OFFSET    0x0c00
-#define MAX_MTL_REG_OFFSET      0x0d7c
-#define START_DMA_REG_OFFSET    0x1000
-#define MAX_DMA_REG_OFFSET      0x117C
-
-#define REG_SPACE_SIZE          0x1800
-
-/* DMA */
-#define REG_DWCEQOS_DMA_MODE             0x1000
-#define REG_DWCEQOS_DMA_SYSBUS_MODE      0x1004
-#define REG_DWCEQOS_DMA_IS               0x1008
-#define REG_DWCEQOS_DMA_DEBUG_ST0        0x100c
-
-/* DMA channel registers */
-#define REG_DWCEQOS_DMA_CH0_CTRL         0x1100
-#define REG_DWCEQOS_DMA_CH0_TX_CTRL      0x1104
-#define REG_DWCEQOS_DMA_CH0_RX_CTRL      0x1108
-#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST  0x1114
-#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST  0x111c
-#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL  0x1120
-#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL  0x1128
-#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN   0x112c
-#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN   0x1130
-#define REG_DWCEQOS_DMA_CH0_IE           0x1134
-#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC   0x1144
-#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC   0x114c
-#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF    0x1154
-#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG    0x115c
-#define REG_DWCEQOS_DMA_CH0_STA          0x1160
-
-#define DWCEQOS_DMA_MODE_TXPR            BIT(11)
-#define DWCEQOS_DMA_MODE_DA              BIT(1)
-
-#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI   BIT(31)
-#define DWCEQOS_DMA_SYSBUS_MODE_FB       BIT(0)
-#define DWCEQOS_DMA_SYSBUS_MODE_AAL      BIT(12)
-
-#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \
-       (((x) << 16) & 0x000F0000)
-#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT    3
-#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK       GENMASK(19, 16)
-
-#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \
-       (((x) << 24) & 0x0F000000)
-#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT    3
-#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK       GENMASK(27, 24)
-
-#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1)
-#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \
-       (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK)
-#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT   GENMASK(3, 1)
-
-#define DWCEQOS_DMA_CH_CTRL_PBLX8       BIT(16)
-#define DWCEQOS_DMA_CH_CTRL_DSL(x)      ((x) << 18)
-
-#define DWCEQOS_DMA_CH_CTRL_PBL(x)       ((x) << 16)
-#define DWCEQOS_DMA_CH_CTRL_START         BIT(0)
-#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x)   ((x) << 1)
-#define DWCEQOS_DMA_CH_TX_OSP            BIT(4)
-#define DWCEQOS_DMA_CH_TX_TSE            BIT(12)
-
-#define DWCEQOS_DMA_CH0_IE_NIE           BIT(15)
-#define DWCEQOS_DMA_CH0_IE_AIE           BIT(14)
-#define DWCEQOS_DMA_CH0_IE_RIE           BIT(6)
-#define DWCEQOS_DMA_CH0_IE_TIE           BIT(0)
-#define DWCEQOS_DMA_CH0_IE_FBEE          BIT(12)
-#define DWCEQOS_DMA_CH0_IE_RBUE          BIT(7)
-
-#define DWCEQOS_DMA_IS_DC0IS             BIT(0)
-#define DWCEQOS_DMA_IS_MTLIS             BIT(16)
-#define DWCEQOS_DMA_IS_MACIS             BIT(17)
-
-#define DWCEQOS_DMA_CH0_IS_TI            BIT(0)
-#define DWCEQOS_DMA_CH0_IS_RI            BIT(6)
-#define DWCEQOS_DMA_CH0_IS_RBU           BIT(7)
-#define DWCEQOS_DMA_CH0_IS_FBE           BIT(12)
-#define DWCEQOS_DMA_CH0_IS_CDE           BIT(13)
-#define DWCEQOS_DMA_CH0_IS_AIS           BIT(14)
-
-#define DWCEQOS_DMA_CH0_IS_TEB           GENMASK(18, 16)
-#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ   BIT(16)
-#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR  BIT(17)
-
-#define DWCEQOS_DMA_CH0_IS_REB           GENMASK(21, 19)
-#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ   BIT(19)
-#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR  BIT(20)
-
-/* DMA descriptor bits for RX normal descriptor (read format) */
-#define DWCEQOS_DMA_RDES3_OWN     BIT(31)
-#define DWCEQOS_DMA_RDES3_INTE    BIT(30)
-#define DWCEQOS_DMA_RDES3_BUF2V   BIT(25)
-#define DWCEQOS_DMA_RDES3_BUF1V   BIT(24)
-
-/* DMA descriptor bits for RX normal descriptor (write back format) */
-#define DWCEQOS_DMA_RDES1_IPCE    BIT(7)
-#define DWCEQOS_DMA_RDES3_ES      BIT(15)
-#define DWCEQOS_DMA_RDES3_E_JT    BIT(14)
-#define DWCEQOS_DMA_RDES3_PL(x)   ((x) & 0x7fff)
-#define DWCEQOS_DMA_RDES1_PT      0x00000007
-#define DWCEQOS_DMA_RDES1_PT_UDP  BIT(0)
-#define DWCEQOS_DMA_RDES1_PT_TCP  BIT(1)
-#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003
-
-/* DMA descriptor bits for TX normal descriptor (read format) */
-#define DWCEQOS_DMA_TDES2_IOC     BIT(31)
-#define DWCEQOS_DMA_TDES3_OWN     BIT(31)
-#define DWCEQOS_DMA_TDES3_CTXT    BIT(30)
-#define DWCEQOS_DMA_TDES3_FD      BIT(29)
-#define DWCEQOS_DMA_TDES3_LD      BIT(28)
-#define DWCEQOS_DMA_TDES3_CIPH    BIT(16)
-#define DWCEQOS_DMA_TDES3_CIPP    BIT(17)
-#define DWCEQOS_DMA_TDES3_CA      0x00030000
-#define DWCEQOS_DMA_TDES3_TSE     BIT(18)
-#define DWCEQOS_DMA_DES3_THL(x)   ((x) << 19)
-#define DWCEQOS_DMA_DES2_B2L(x)   ((x) << 16)
-
-#define DWCEQOS_DMA_TDES3_TCMSSV    BIT(26)
-
-/* DMA channel states */
-#define DMA_TX_CH_STOPPED   0
-#define DMA_TX_CH_SUSPENDED 6
-
-#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12)
-
-/* MTL */
-#define REG_DWCEQOS_MTL_OPER             0x0c00
-#define REG_DWCEQOS_MTL_DEBUG_ST         0x0c0c
-#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST    0x0d08
-#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST    0x0d38
-
-#define REG_DWCEQOS_MTL_IS               0x0c20
-#define REG_DWCEQOS_MTL_TXQ0_OPER        0x0d00
-#define REG_DWCEQOS_MTL_RXQ0_OPER        0x0d30
-#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT     0x0d34
-#define REG_DWCEQOS_MTL_RXQ0_CTRL         0x0d3c
-
-#define REG_DWCEQOS_MTL_Q0_ISCTRL         0x0d2c
-
-#define DWCEQOS_MTL_SCHALG_STRICT        0x00000060
-
-#define DWCEQOS_MTL_TXQ_TXQEN            BIT(3)
-#define DWCEQOS_MTL_TXQ_TSF              BIT(1)
-#define DWCEQOS_MTL_TXQ_FTQ              BIT(0)
-#define DWCEQOS_MTL_TXQ_TTC512           0x00000070
-
-#define DWCEQOS_MTL_TXQ_SIZE(x)          ((((x) - 256) & 0xff00) << 8)
-
-#define DWCEQOS_MTL_RXQ_SIZE(x)          ((((x) - 256) & 0xff00) << 12)
-#define DWCEQOS_MTL_RXQ_EHFC             BIT(7)
-#define DWCEQOS_MTL_RXQ_DIS_TCP_EF       BIT(6)
-#define DWCEQOS_MTL_RXQ_FEP              BIT(4)
-#define DWCEQOS_MTL_RXQ_FUP              BIT(3)
-#define DWCEQOS_MTL_RXQ_RSF              BIT(5)
-#define DWCEQOS_MTL_RXQ_RTC32            BIT(0)
-
-/* MAC */
-#define REG_DWCEQOS_MAC_CFG              0x0000
-#define REG_DWCEQOS_MAC_EXT_CFG          0x0004
-#define REG_DWCEQOS_MAC_PKT_FILT         0x0008
-#define REG_DWCEQOS_MAC_WD_TO            0x000c
-#define REG_DWCEQOS_HASTABLE_LO          0x0010
-#define REG_DWCEQOS_HASTABLE_HI          0x0014
-#define REG_DWCEQOS_MAC_IS               0x00b0
-#define REG_DWCEQOS_MAC_IE               0x00b4
-#define REG_DWCEQOS_MAC_STAT             0x00b8
-#define REG_DWCEQOS_MAC_MDIO_ADDR        0x0200
-#define REG_DWCEQOS_MAC_MDIO_DATA        0x0204
-#define REG_DWCEQOS_MAC_MAC_ADDR0_HI     0x0300
-#define REG_DWCEQOS_MAC_MAC_ADDR0_LO     0x0304
-#define REG_DWCEQOS_MAC_RXQ0_CTRL0       0x00a0
-#define REG_DWCEQOS_MAC_HW_FEATURE0      0x011c
-#define REG_DWCEQOS_MAC_HW_FEATURE1      0x0120
-#define REG_DWCEQOS_MAC_HW_FEATURE2      0x0124
-#define REG_DWCEQOS_MAC_HASHTABLE_LO     0x0010
-#define REG_DWCEQOS_MAC_HASHTABLE_HI     0x0014
-#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS  0x00d0
-#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL  0x00d4
-#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER  0x00d8
-#define REG_DWCEQOS_MAC_1US_TIC_COUNTER  0x00dc
-#define REG_DWCEQOS_MAC_RX_FLOW_CTRL     0x0090
-#define REG_DWCEQOS_MAC_Q0_TX_FLOW      0x0070
-
-#define DWCEQOS_MAC_CFG_ACS              BIT(20)
-#define DWCEQOS_MAC_CFG_JD               BIT(17)
-#define DWCEQOS_MAC_CFG_JE               BIT(16)
-#define DWCEQOS_MAC_CFG_PS               BIT(15)
-#define DWCEQOS_MAC_CFG_FES              BIT(14)
-#define DWCEQOS_MAC_CFG_DM               BIT(13)
-#define DWCEQOS_MAC_CFG_DO               BIT(10)
-#define DWCEQOS_MAC_CFG_TE               BIT(1)
-#define DWCEQOS_MAC_CFG_IPC              BIT(27)
-#define DWCEQOS_MAC_CFG_RE               BIT(0)
-
-#define DWCEQOS_ADDR_HIGH(reg)           (0x00000300 + (reg * 8))
-#define DWCEQOS_ADDR_LOW(reg)            (0x00000304 + (reg * 8))
-
-#define DWCEQOS_MAC_IS_LPI_INT           BIT(5)
-#define DWCEQOS_MAC_IS_MMC_INT           BIT(8)
-
-#define DWCEQOS_MAC_RXQ_EN               BIT(1)
-#define DWCEQOS_MAC_MAC_ADDR_HI_EN       BIT(31)
-#define DWCEQOS_MAC_PKT_FILT_RA          BIT(31)
-#define DWCEQOS_MAC_PKT_FILT_HPF         BIT(10)
-#define DWCEQOS_MAC_PKT_FILT_SAF         BIT(9)
-#define DWCEQOS_MAC_PKT_FILT_SAIF        BIT(8)
-#define DWCEQOS_MAC_PKT_FILT_DBF         BIT(5)
-#define DWCEQOS_MAC_PKT_FILT_PM          BIT(4)
-#define DWCEQOS_MAC_PKT_FILT_DAIF        BIT(3)
-#define DWCEQOS_MAC_PKT_FILT_HMC         BIT(2)
-#define DWCEQOS_MAC_PKT_FILT_HUC         BIT(1)
-#define DWCEQOS_MAC_PKT_FILT_PR          BIT(0)
-
-#define DWCEQOS_MAC_MDIO_ADDR_CR(x)      (((x & 15)) << 8)
-#define DWCEQOS_MAC_MDIO_ADDR_CR_20      2
-#define DWCEQOS_MAC_MDIO_ADDR_CR_35      3
-#define DWCEQOS_MAC_MDIO_ADDR_CR_60      0
-#define DWCEQOS_MAC_MDIO_ADDR_CR_100     1
-#define DWCEQOS_MAC_MDIO_ADDR_CR_150     4
-#define DWCEQOS_MAC_MDIO_ADDR_CR_250     5
-#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ   0x0000000c
-#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE  BIT(2)
-#define DWCEQOS_MAC_MDIO_ADDR_GB         BIT(0)
-
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN  BIT(0)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX  BIT(1)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN  BIT(2)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX  BIT(3)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST  BIT(8)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST  BIT(9)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN   BIT(16)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS     BIT(17)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN   BIT(18)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA  BIT(19)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE   BIT(20)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21)
-
-#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x)  ((x) & GENMASK(11, 0))
-
-#define DWCEQOS_LPI_CTRL_ENABLE_EEE      (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \
-                                         DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \
-                                         DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN)
-
-#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
-
-#define DWCEQOS_MAC_Q0_TX_FLOW_TFE   BIT(1)
-#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time)        ((time) << 16)
-#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4)
-
-/* Features */
-#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16)
-#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14)
-#define DWCEQOS_MAC_HW_FEATURE0_HDSEL    BIT(2)
-#define DWCEQOS_MAC_HW_FEATURE0_EEESEL   BIT(13)
-#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL  BIT(1)
-#define DWCEQOS_MAC_HW_FEATURE0_MIISEL   BIT(0)
-
-#define DWCEQOS_MAC_HW_FEATURE1_TSOEN    BIT(18)
-#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6)
-#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x)  (128 << ((x) & 0x1f))
-
-#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \
-       (1 + (((feature1) & 0x1fc0000) >> 18))
-
-#define DWCEQOS_MDIO_PHYADDR(x)     (((x) & 0x1f) << 21)
-#define DWCEQOS_MDIO_PHYREG(x)      (((x) & 0x1f) << 16)
-
-#define DWCEQOS_DMA_MODE_SWR            BIT(0)
-
-#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048
-
-/* Mac Management Counters */
-#define REG_DWCEQOS_MMC_CTRL             0x0700
-#define REG_DWCEQOS_MMC_RXIRQ            0x0704
-#define REG_DWCEQOS_MMC_TXIRQ            0x0708
-#define REG_DWCEQOS_MMC_RXIRQMASK        0x070c
-#define REG_DWCEQOS_MMC_TXIRQMASK        0x0710
-
-#define DWCEQOS_MMC_CTRL_CNTRST          BIT(0)
-#define DWCEQOS_MMC_CTRL_RSTONRD         BIT(2)
-
-#define DWC_MMC_TXLPITRANSCNTR           0x07F0
-#define DWC_MMC_TXLPIUSCNTR              0x07EC
-#define DWC_MMC_TXOVERSIZE_G             0x0778
-#define DWC_MMC_TXVLANPACKETS_G          0x0774
-#define DWC_MMC_TXPAUSEPACKETS           0x0770
-#define DWC_MMC_TXEXCESSDEF              0x076C
-#define DWC_MMC_TXPACKETCOUNT_G          0x0768
-#define DWC_MMC_TXOCTETCOUNT_G           0x0764
-#define DWC_MMC_TXCARRIERERROR           0x0760
-#define DWC_MMC_TXEXCESSCOL              0x075C
-#define DWC_MMC_TXLATECOL                0x0758
-#define DWC_MMC_TXDEFERRED               0x0754
-#define DWC_MMC_TXMULTICOL_G             0x0750
-#define DWC_MMC_TXSINGLECOL_G            0x074C
-#define DWC_MMC_TXUNDERFLOWERROR         0x0748
-#define DWC_MMC_TXBROADCASTPACKETS_GB    0x0744
-#define DWC_MMC_TXMULTICASTPACKETS_GB    0x0740
-#define DWC_MMC_TXUNICASTPACKETS_GB      0x073C
-#define DWC_MMC_TX1024TOMAXOCTETS_GB     0x0738
-#define DWC_MMC_TX512TO1023OCTETS_GB     0x0734
-#define DWC_MMC_TX256TO511OCTETS_GB      0x0730
-#define DWC_MMC_TX128TO255OCTETS_GB      0x072C
-#define DWC_MMC_TX65TO127OCTETS_GB       0x0728
-#define DWC_MMC_TX64OCTETS_GB            0x0724
-#define DWC_MMC_TXMULTICASTPACKETS_G     0x0720
-#define DWC_MMC_TXBROADCASTPACKETS_G     0x071C
-#define DWC_MMC_TXPACKETCOUNT_GB         0x0718
-#define DWC_MMC_TXOCTETCOUNT_GB          0x0714
-
-#define DWC_MMC_RXLPITRANSCNTR           0x07F8
-#define DWC_MMC_RXLPIUSCNTR              0x07F4
-#define DWC_MMC_RXCTRLPACKETS_G          0x07E4
-#define DWC_MMC_RXRCVERROR               0x07E0
-#define DWC_MMC_RXWATCHDOG               0x07DC
-#define DWC_MMC_RXVLANPACKETS_GB         0x07D8
-#define DWC_MMC_RXFIFOOVERFLOW           0x07D4
-#define DWC_MMC_RXPAUSEPACKETS           0x07D0
-#define DWC_MMC_RXOUTOFRANGETYPE         0x07CC
-#define DWC_MMC_RXLENGTHERROR            0x07C8
-#define DWC_MMC_RXUNICASTPACKETS_G       0x07C4
-#define DWC_MMC_RX1024TOMAXOCTETS_GB     0x07C0
-#define DWC_MMC_RX512TO1023OCTETS_GB     0x07BC
-#define DWC_MMC_RX256TO511OCTETS_GB      0x07B8
-#define DWC_MMC_RX128TO255OCTETS_GB      0x07B4
-#define DWC_MMC_RX65TO127OCTETS_GB       0x07B0
-#define DWC_MMC_RX64OCTETS_GB            0x07AC
-#define DWC_MMC_RXOVERSIZE_G             0x07A8
-#define DWC_MMC_RXUNDERSIZE_G            0x07A4
-#define DWC_MMC_RXJABBERERROR            0x07A0
-#define DWC_MMC_RXRUNTERROR              0x079C
-#define DWC_MMC_RXALIGNMENTERROR         0x0798
-#define DWC_MMC_RXCRCERROR               0x0794
-#define DWC_MMC_RXMULTICASTPACKETS_G     0x0790
-#define DWC_MMC_RXBROADCASTPACKETS_G     0x078C
-#define DWC_MMC_RXOCTETCOUNT_G           0x0788
-#define DWC_MMC_RXOCTETCOUNT_GB          0x0784
-#define DWC_MMC_RXPACKETCOUNT_GB         0x0780
-
-static int debug = -1;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
-
-/* DMA ring descriptor. These are used as support descriptors for the HW DMA */
-struct ring_desc {
-       struct sk_buff *skb;
-       dma_addr_t mapping;
-       size_t len;
-};
-
-/* DMA hardware descriptor */
-struct dwceqos_dma_desc {
-       u32     des0;
-       u32     des1;
-       u32     des2;
-       u32     des3;
-} ____cacheline_aligned;
-
-struct dwceqos_mmc_counters {
-       __u64 txlpitranscntr;
-       __u64 txpiuscntr;
-       __u64 txoversize_g;
-       __u64 txvlanpackets_g;
-       __u64 txpausepackets;
-       __u64 txexcessdef;
-       __u64 txpacketcount_g;
-       __u64 txoctetcount_g;
-       __u64 txcarriererror;
-       __u64 txexcesscol;
-       __u64 txlatecol;
-       __u64 txdeferred;
-       __u64 txmulticol_g;
-       __u64 txsinglecol_g;
-       __u64 txunderflowerror;
-       __u64 txbroadcastpackets_gb;
-       __u64 txmulticastpackets_gb;
-       __u64 txunicastpackets_gb;
-       __u64 tx1024tomaxoctets_gb;
-       __u64 tx512to1023octets_gb;
-       __u64 tx256to511octets_gb;
-       __u64 tx128to255octets_gb;
-       __u64 tx65to127octets_gb;
-       __u64 tx64octets_gb;
-       __u64 txmulticastpackets_g;
-       __u64 txbroadcastpackets_g;
-       __u64 txpacketcount_gb;
-       __u64 txoctetcount_gb;
-
-       __u64 rxlpitranscntr;
-       __u64 rxlpiuscntr;
-       __u64 rxctrlpackets_g;
-       __u64 rxrcverror;
-       __u64 rxwatchdog;
-       __u64 rxvlanpackets_gb;
-       __u64 rxfifooverflow;
-       __u64 rxpausepackets;
-       __u64 rxoutofrangetype;
-       __u64 rxlengtherror;
-       __u64 rxunicastpackets_g;
-       __u64 rx1024tomaxoctets_gb;
-       __u64 rx512to1023octets_gb;
-       __u64 rx256to511octets_gb;
-       __u64 rx128to255octets_gb;
-       __u64 rx65to127octets_gb;
-       __u64 rx64octets_gb;
-       __u64 rxoversize_g;
-       __u64 rxundersize_g;
-       __u64 rxjabbererror;
-       __u64 rxrunterror;
-       __u64 rxalignmenterror;
-       __u64 rxcrcerror;
-       __u64 rxmulticastpackets_g;
-       __u64 rxbroadcastpackets_g;
-       __u64 rxoctetcount_g;
-       __u64 rxoctetcount_gb;
-       __u64 rxpacketcount_gb;
-};
-
-/* Ethtool statistics */
-
-struct dwceqos_stat {
-       const char stat_name[ETH_GSTRING_LEN];
-       int   offset;
-};
-
-#define STAT_ITEM(name, var) \
-       {\
-               name,\
-               offsetof(struct dwceqos_mmc_counters, var),\
-       }
-
-static const struct dwceqos_stat dwceqos_ethtool_stats[] = {
-       STAT_ITEM("tx_bytes", txoctetcount_gb),
-       STAT_ITEM("tx_packets", txpacketcount_gb),
-       STAT_ITEM("tx_unicst_packets", txunicastpackets_gb),
-       STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb),
-       STAT_ITEM("tx_multicast_packets",  txmulticastpackets_gb),
-       STAT_ITEM("tx_pause_packets", txpausepackets),
-       STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb),
-       STAT_ITEM("tx_65_to_127_byte_packets",  tx65to127octets_gb),
-       STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb),
-       STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb),
-       STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
-       STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb),
-       STAT_ITEM("tx_underflow_errors", txunderflowerror),
-       STAT_ITEM("tx_lpi_count", txlpitranscntr),
-
-       STAT_ITEM("rx_bytes", rxoctetcount_gb),
-       STAT_ITEM("rx_packets", rxpacketcount_gb),
-       STAT_ITEM("rx_unicast_packets", rxunicastpackets_g),
-       STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g),
-       STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g),
-       STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb),
-       STAT_ITEM("rx_pause_packets", rxpausepackets),
-       STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb),
-       STAT_ITEM("rx_65_to_127_byte_packets",  rx65to127octets_gb),
-       STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb),
-       STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb),
-       STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
-       STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb),
-       STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow),
-       STAT_ITEM("rx_oversize_packets", rxoversize_g),
-       STAT_ITEM("rx_undersize_packets", rxundersize_g),
-       STAT_ITEM("rx_jabbers", rxjabbererror),
-       STAT_ITEM("rx_align_errors", rxalignmenterror),
-       STAT_ITEM("rx_crc_errors", rxcrcerror),
-       STAT_ITEM("rx_lpi_count", rxlpitranscntr),
-};
-
-/* Configuration of AXI bus parameters.
- * These values depend on the parameters set on the MAC core as well
- * as the AXI interconnect.
- */
-struct dwceqos_bus_cfg {
-       /* Enable AXI low-power interface. */
-       bool en_lpi;
-       /* Limit on number of outstanding AXI write requests. */
-       u32 write_requests;
-       /* Limit on number of outstanding AXI read requests. */
-       u32 read_requests;
-       /* Bitmap of allowed AXI burst lengths, 4-256 beats. */
-       u32 burst_map;
-       /* DMA Programmable burst length*/
-       u32 tx_pbl;
-       u32 rx_pbl;
-};
-
-struct dwceqos_flowcontrol {
-       int autoneg;
-       int rx;
-       int rx_current;
-       int tx;
-       int tx_current;
-};
-
-struct net_local {
-       void __iomem *baseaddr;
-       struct clk *phy_ref_clk;
-       struct clk *apb_pclk;
-
-       struct device_node *phy_node;
-       struct net_device *ndev;
-       struct platform_device *pdev;
-
-       u32 msg_enable;
-
-       struct tasklet_struct tx_bdreclaim_tasklet;
-       struct workqueue_struct *txtimeout_handler_wq;
-       struct work_struct txtimeout_reinit;
-
-       phy_interface_t phy_interface;
-       struct mii_bus *mii_bus;
-
-       unsigned int link;
-       unsigned int speed;
-       unsigned int duplex;
-
-       struct napi_struct napi;
-
-       /* DMA Descriptor Areas */
-       struct ring_desc *rx_skb;
-       struct ring_desc *tx_skb;
-
-       struct dwceqos_dma_desc *tx_descs;
-       struct dwceqos_dma_desc *rx_descs;
-
-       /* DMA Mapped Descriptor areas*/
-       dma_addr_t tx_descs_addr;
-       dma_addr_t rx_descs_addr;
-       dma_addr_t tx_descs_tail_addr;
-       dma_addr_t rx_descs_tail_addr;
-
-       size_t tx_free;
-       size_t tx_next;
-       size_t rx_cur;
-       size_t tx_cur;
-
-       /* Spinlocks for accessing DMA Descriptors */
-       spinlock_t tx_lock;
-
-       /* Spinlock for register read-modify-writes. */
-       spinlock_t hw_lock;
-
-       u32 feature0;
-       u32 feature1;
-       u32 feature2;
-
-       struct dwceqos_bus_cfg bus_cfg;
-       bool en_tx_lpi_clockgating;
-
-       int eee_enabled;
-       int eee_active;
-       int csr_val;
-       u32 gso_size;
-
-       struct dwceqos_mmc_counters mmc_counters;
-       /* Protect the mmc_counter updates. */
-       spinlock_t stats_lock;
-       u32 mmc_rx_counters_mask;
-       u32 mmc_tx_counters_mask;
-
-       struct dwceqos_flowcontrol flowcontrol;
-
-       /* Tracks the intermediate state of phy started but hardware
-        * init not finished yet.
-        */
-       bool phy_defer;
-};
-
-static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
-                                     u32 tx_mask);
-
-static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
-                                 unsigned int reg_n);
-static int dwceqos_stop(struct net_device *ndev);
-static int dwceqos_open(struct net_device *ndev);
-static void dwceqos_tx_poll_demand(struct net_local *lp);
-
-static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable);
-static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable);
-
-static void dwceqos_reset_state(struct net_local *lp);
-
-#define dwceqos_read(lp, reg)                                          \
-       readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg))
-#define dwceqos_write(lp, reg, val)                                    \
-       writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg))
-
-static void dwceqos_reset_state(struct net_local *lp)
-{
-       lp->link    = 0;
-       lp->speed   = 0;
-       lp->duplex  = DUPLEX_UNKNOWN;
-       lp->flowcontrol.rx_current = 0;
-       lp->flowcontrol.tx_current = 0;
-       lp->eee_active = 0;
-       lp->eee_enabled = 0;
-}
-
-static void print_descriptor(struct net_local *lp, int index, int tx)
-{
-       struct dwceqos_dma_desc *dd;
-
-       if (tx)
-               dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index];
-       else
-               dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index];
-
-       pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX",
-               index, dd);
-       pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2,
-               dd->des3);
-}
-
-static void print_status(struct net_local *lp)
-{
-       size_t desci, i;
-
-       pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free,
-               lp->tx_cur, lp->tx_next);
-
-       print_descriptor(lp, lp->rx_cur, 0);
-
-       for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0;
-                i < DWCEQOS_TX_DCNT;
-                ++i) {
-               print_descriptor(lp, desci, 1);
-               desci = (desci + 1) % DWCEQOS_TX_DCNT;
-       }
-
-       pr_info("DMA_Debug_Status0:          0x%08x\n",
-               dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0));
-       pr_info("DMA_CH0_Status:             0x%08x\n",
-               dwceqos_read(lp, REG_DWCEQOS_DMA_IS));
-       pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n",
-               dwceqos_read(lp, 0x1144));
-       pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n",
-               dwceqos_read(lp, 0x1154));
-       pr_info("MTL_Debug_Status:      0x%08x\n",
-               dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST));
-       pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n",
-               dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST));
-       pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n",
-               dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST));
-       pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n",
-               dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC),
-               dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC));
-}
-
-static void dwceqos_mdio_set_csr(struct net_local *lp)
-{
-       int rate = clk_get_rate(lp->apb_pclk);
-
-       if (rate <= 20000000)
-               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20;
-       else if (rate <= 35000000)
-               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35;
-       else if (rate <= 60000000)
-               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60;
-       else if (rate <= 100000000)
-               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100;
-       else if (rate <= 150000000)
-               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150;
-       else if (rate <= 250000000)
-               lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250;
-}
-
-/* Simple MDIO functions implementing mii_bus */
-static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg)
-{
-       struct net_local *lp = bus->priv;
-       u32 regval;
-       int i;
-       int data;
-
-       regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
-               DWCEQOS_MDIO_PHYREG(phyreg) |
-               DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
-               DWCEQOS_MAC_MDIO_ADDR_GB |
-               DWCEQOS_MAC_MDIO_ADDR_GOC_READ;
-       dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
-
-       for (i = 0; i < 5; ++i) {
-               usleep_range(64, 128);
-               if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
-                     DWCEQOS_MAC_MDIO_ADDR_GB))
-                       break;
-       }
-
-       data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA);
-       if (i == 5) {
-               netdev_warn(lp->ndev, "MDIO read timed out\n");
-               data = 0xffff;
-       }
-
-       return data & 0xffff;
-}
-
-static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
-                             u16 value)
-{
-       struct net_local *lp = bus->priv;
-       u32 regval;
-       int i;
-
-       dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value);
-
-       regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
-               DWCEQOS_MDIO_PHYREG(phyreg) |
-               DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
-               DWCEQOS_MAC_MDIO_ADDR_GB |
-               DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE;
-       dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
-
-       for (i = 0; i < 5; ++i) {
-               usleep_range(64, 128);
-               if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
-                     DWCEQOS_MAC_MDIO_ADDR_GB))
-                       break;
-       }
-       if (i == 5)
-               netdev_warn(lp->ndev, "MDIO write timed out\n");
-       return 0;
-}
-
-static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
-       struct net_local *lp = netdev_priv(ndev);
-       struct phy_device *phydev = ndev->phydev;
-
-       if (!netif_running(ndev))
-               return -EINVAL;
-
-       if (!phydev)
-               return -ENODEV;
-
-       switch (cmd) {
-       case SIOCGMIIPHY:
-       case SIOCGMIIREG:
-       case SIOCSMIIREG:
-               return phy_mii_ioctl(phydev, rq, cmd);
-       default:
-               dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd);
-               return -EOPNOTSUPP;
-       }
-}
-
-static void dwceqos_link_down(struct net_local *lp)
-{
-       u32 regval;
-       unsigned long flags;
-
-       /* Indicate link down to the LPI state machine */
-       spin_lock_irqsave(&lp->hw_lock, flags);
-       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
-       regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
-       dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
-       spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_link_up(struct net_local *lp)
-{
-       struct net_device *ndev = lp->ndev;
-       u32 regval;
-       unsigned long flags;
-
-       /* Indicate link up to the LPI state machine */
-       spin_lock_irqsave(&lp->hw_lock, flags);
-       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
-       regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
-       dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
-       spin_unlock_irqrestore(&lp->hw_lock, flags);
-
-       lp->eee_active = !phy_init_eee(ndev->phydev, 0);
-
-       /* Check for changed EEE capability */
-       if (!lp->eee_active && lp->eee_enabled) {
-               lp->eee_enabled = 0;
-
-               spin_lock_irqsave(&lp->hw_lock, flags);
-               regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
-               regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
-               dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
-               spin_unlock_irqrestore(&lp->hw_lock, flags);
-       }
-}
-
-static void dwceqos_set_speed(struct net_local *lp)
-{
-       struct net_device *ndev = lp->ndev;
-       struct phy_device *phydev = ndev->phydev;
-       u32 regval;
-
-       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
-       regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES |
-                   DWCEQOS_MAC_CFG_DM);
-
-       if (phydev->duplex)
-               regval |= DWCEQOS_MAC_CFG_DM;
-       if (phydev->speed == SPEED_10) {
-               regval |= DWCEQOS_MAC_CFG_PS;
-       } else if (phydev->speed == SPEED_100) {
-               regval |= DWCEQOS_MAC_CFG_PS |
-                       DWCEQOS_MAC_CFG_FES;
-       } else if (phydev->speed != SPEED_1000) {
-               netdev_err(lp->ndev,
-                          "unknown PHY speed %d\n",
-                          phydev->speed);
-               return;
-       }
-
-       dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval);
-}
-
-static void dwceqos_adjust_link(struct net_device *ndev)
-{
-       struct net_local *lp = netdev_priv(ndev);
-       struct phy_device *phydev = ndev->phydev;
-       int status_change = 0;
-
-       if (lp->phy_defer)
-               return;
-
-       if (phydev->link) {
-               if ((lp->speed != phydev->speed) ||
-                   (lp->duplex != phydev->duplex)) {
-                       dwceqos_set_speed(lp);
-
-                       lp->speed = phydev->speed;
-                       lp->duplex = phydev->duplex;
-                       status_change = 1;
-               }
-
-               if (lp->flowcontrol.autoneg) {
-                       lp->flowcontrol.rx = phydev->pause ||
-                                            phydev->asym_pause;
-                       lp->flowcontrol.tx = phydev->pause ||
-                                            phydev->asym_pause;
-               }
-
-               if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) {
-                       if (netif_msg_link(lp))
-                               netdev_dbg(ndev, "set rx flow to %d\n",
-                                          lp->flowcontrol.rx);
-                       dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx);
-                       lp->flowcontrol.rx_current = lp->flowcontrol.rx;
-               }
-               if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) {
-                       if (netif_msg_link(lp))
-                               netdev_dbg(ndev, "set tx flow to %d\n",
-                                          lp->flowcontrol.tx);
-                       dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx);
-                       lp->flowcontrol.tx_current = lp->flowcontrol.tx;
-               }
-       }
-
-       if (phydev->link != lp->link) {
-               lp->link = phydev->link;
-               status_change = 1;
-       }
-
-       if (status_change) {
-               if (phydev->link) {
-                       netif_trans_update(lp->ndev);
-                       dwceqos_link_up(lp);
-               } else {
-                       dwceqos_link_down(lp);
-               }
-               phy_print_status(phydev);
-       }
-}
-
-static int dwceqos_mii_probe(struct net_device *ndev)
-{
-       struct net_local *lp = netdev_priv(ndev);
-       struct phy_device *phydev = NULL;
-
-       if (lp->phy_node) {
-               phydev = of_phy_connect(lp->ndev,
-                                       lp->phy_node,
-                                       &dwceqos_adjust_link,
-                                       0,
-                                       lp->phy_interface);
-
-               if (!phydev) {
-                       netdev_err(ndev, "no PHY found\n");
-                       return -1;
-               }
-       } else {
-               netdev_err(ndev, "no PHY configured\n");
-               return -ENODEV;
-       }
-
-       if (netif_msg_probe(lp))
-               phy_attached_info(phydev);
-
-       phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
-                            SUPPORTED_Asym_Pause;
-
-       lp->link    = 0;
-       lp->speed   = 0;
-       lp->duplex  = DUPLEX_UNKNOWN;
-       lp->flowcontrol.autoneg = AUTONEG_ENABLE;
-
-       return 0;
-}
-
-static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index)
-{
-       struct sk_buff *new_skb;
-       dma_addr_t new_skb_baddr = 0;
-
-       new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
-       if (!new_skb) {
-               netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index);
-               goto err_out;
-       }
-
-       new_skb_baddr = dma_map_single(lp->ndev->dev.parent,
-                                      new_skb->data, DWCEQOS_RX_BUF_SIZE,
-                                      DMA_FROM_DEVICE);
-       if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
-               netdev_err(lp->ndev, "DMA map error\n");
-               dev_kfree_skb(new_skb);
-               new_skb = NULL;
-               goto err_out;
-       }
-
-       lp->rx_descs[index].des0 = new_skb_baddr;
-       lp->rx_descs[index].des1 = 0;
-       lp->rx_descs[index].des2 = 0;
-       lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE |
-                                  DWCEQOS_DMA_RDES3_BUF1V |
-                                  DWCEQOS_DMA_RDES3_OWN;
-
-       lp->rx_skb[index].mapping = new_skb_baddr;
-       lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE;
-
-err_out:
-       lp->rx_skb[index].skb = new_skb;
-}
-
-static void dwceqos_clean_rings(struct net_local *lp)
-{
-       int i;
-
-       if (lp->rx_skb) {
-               for (i = 0; i < DWCEQOS_RX_DCNT; i++) {
-                       if (lp->rx_skb[i].skb) {
-                               dma_unmap_single(lp->ndev->dev.parent,
-                                                lp->rx_skb[i].mapping,
-                                                lp->rx_skb[i].len,
-                                                DMA_FROM_DEVICE);
-
-                               dev_kfree_skb(lp->rx_skb[i].skb);
-                               lp->rx_skb[i].skb = NULL;
-                               lp->rx_skb[i].mapping = 0;
-                       }
-               }
-       }
-
-       if (lp->tx_skb) {
-               for (i = 0; i < DWCEQOS_TX_DCNT; i++) {
-                       if (lp->tx_skb[i].skb) {
-                               dev_kfree_skb(lp->tx_skb[i].skb);
-                               lp->tx_skb[i].skb = NULL;
-                       }
-                       if (lp->tx_skb[i].mapping) {
-                               dma_unmap_single(lp->ndev->dev.parent,
-                                                lp->tx_skb[i].mapping,
-                                                lp->tx_skb[i].len,
-                                                DMA_TO_DEVICE);
-                               lp->tx_skb[i].mapping = 0;
-                       }
-               }
-       }
-}
-
-static void dwceqos_descriptor_free(struct net_local *lp)
-{
-       int size;
-
-       dwceqos_clean_rings(lp);
-
-       kfree(lp->tx_skb);
-       lp->tx_skb = NULL;
-       kfree(lp->rx_skb);
-       lp->rx_skb = NULL;
-
-       size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
-       if (lp->rx_descs) {
-               dma_free_coherent(lp->ndev->dev.parent, size,
-                                 (void *)(lp->rx_descs), lp->rx_descs_addr);
-               lp->rx_descs = NULL;
-       }
-
-       size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
-       if (lp->tx_descs) {
-               dma_free_coherent(lp->ndev->dev.parent, size,
-                                 (void *)(lp->tx_descs), lp->tx_descs_addr);
-               lp->tx_descs = NULL;
-       }
-}
-
-static int dwceqos_descriptor_init(struct net_local *lp)
-{
-       int size;
-       u32 i;
-
-       lp->gso_size = 0;
-
-       lp->tx_skb = NULL;
-       lp->rx_skb = NULL;
-       lp->rx_descs = NULL;
-       lp->tx_descs = NULL;
-
-       /* Reset the DMA indexes */
-       lp->rx_cur = 0;
-       lp->tx_cur = 0;
-       lp->tx_next = 0;
-       lp->tx_free = DWCEQOS_TX_DCNT;
-
-       /* Allocate Ring descriptors */
-       size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc);
-       lp->rx_skb = kzalloc(size, GFP_KERNEL);
-       if (!lp->rx_skb)
-               goto err_out;
-
-       size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc);
-       lp->tx_skb = kzalloc(size, GFP_KERNEL);
-       if (!lp->tx_skb)
-               goto err_out;
-
-       /* Allocate DMA descriptors */
-       size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
-       lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
-                       &lp->rx_descs_addr, GFP_KERNEL);
-       if (!lp->rx_descs)
-               goto err_out;
-       lp->rx_descs_tail_addr = lp->rx_descs_addr +
-               sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT;
-
-       size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
-       lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
-                       &lp->tx_descs_addr, GFP_KERNEL);
-       if (!lp->tx_descs)
-               goto err_out;
-       lp->tx_descs_tail_addr = lp->tx_descs_addr +
-               sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT;
-
-       /* Initialize RX Ring Descriptors and buffers */
-       for (i = 0; i < DWCEQOS_RX_DCNT; ++i) {
-               dwceqos_alloc_rxring_desc(lp, i);
-               if (!(lp->rx_skb[lp->rx_cur].skb))
-                       goto err_out;
-       }
-
-       /* Initialize TX Descriptors */
-       for (i = 0; i < DWCEQOS_TX_DCNT; ++i) {
-               lp->tx_descs[i].des0 = 0;
-               lp->tx_descs[i].des1 = 0;
-               lp->tx_descs[i].des2 = 0;
-               lp->tx_descs[i].des3 = 0;
-       }
-
-       /* Make descriptor writes visible to the DMA. */
-       wmb();
-
-       return 0;
-
-err_out:
-       dwceqos_descriptor_free(lp);
-       return -ENOMEM;
-}
-
-static int dwceqos_packet_avail(struct net_local *lp)
-{
-       return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN);
-}
-
-static void dwceqos_get_hwfeatures(struct net_local *lp)
-{
-       lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0);
-       lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1);
-       lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2);
-}
-
-static void dwceqos_dma_enable_txirq(struct net_local *lp)
-{
-       u32 regval;
-       unsigned long flags;
-
-       spin_lock_irqsave(&lp->hw_lock, flags);
-       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
-       regval |= DWCEQOS_DMA_CH0_IE_TIE;
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
-       spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_dma_disable_txirq(struct net_local *lp)
-{
-       u32 regval;
-       unsigned long flags;
-
-       spin_lock_irqsave(&lp->hw_lock, flags);
-       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
-       regval &= ~DWCEQOS_DMA_CH0_IE_TIE;
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
-       spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_dma_enable_rxirq(struct net_local *lp)
-{
-       u32 regval;
-       unsigned long flags;
-
-       spin_lock_irqsave(&lp->hw_lock, flags);
-       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
-       regval |= DWCEQOS_DMA_CH0_IE_RIE;
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
-       spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_dma_disable_rxirq(struct net_local *lp)
-{
-       u32 regval;
-       unsigned long flags;
-
-       spin_lock_irqsave(&lp->hw_lock, flags);
-       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
-       regval &= ~DWCEQOS_DMA_CH0_IE_RIE;
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
-       spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_enable_mmc_interrupt(struct net_local *lp)
-{
-       dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0);
-       dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0);
-}
-
-static int dwceqos_mii_init(struct net_local *lp)
-{
-       int ret = -ENXIO;
-       struct resource res;
-       struct device_node *mdionode;
-
-       mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio");
-
-       if (!mdionode)
-               return 0;
-
-       lp->mii_bus = mdiobus_alloc();
-       if (!lp->mii_bus) {
-               ret = -ENOMEM;
-               goto err_out;
-       }
-
-       lp->mii_bus->name  = "DWCEQOS MII bus";
-       lp->mii_bus->read  = &dwceqos_mdio_read;
-       lp->mii_bus->write = &dwceqos_mdio_write;
-       lp->mii_bus->priv = lp;
-       lp->mii_bus->parent = &lp->pdev->dev;
-
-       of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
-       snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
-                (unsigned long long)res.start);
-       if (of_mdiobus_register(lp->mii_bus, mdionode))
-               goto err_out_free_mdiobus;
-
-       return 0;
-
-err_out_free_mdiobus:
-       mdiobus_free(lp->mii_bus);
-err_out:
-       of_node_put(mdionode);
-       return ret;
-}
-
-/* DMA reset. When issued also resets all MTL and MAC registers as well */
-static void dwceqos_reset_hw(struct net_local *lp)
-{
-       /* Wait (at most) 0.5 seconds for DMA reset*/
-       int i = 5000;
-       u32 reg;
-
-       /* Force gigabit to guarantee a TX clock for GMII. */
-       reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
-       reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES);
-       reg |= DWCEQOS_MAC_CFG_DM;
-       dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg);
-
-       dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR);
-
-       do {
-               udelay(100);
-               i--;
-               reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE);
-       } while ((reg & DWCEQOS_DMA_MODE_SWR) && i);
-       /* We might experience a timeout if the chip clock mux is broken */
-       if (!i)
-               netdev_err(lp->ndev, "DMA reset timed out!\n");
-}
-
-static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status)
-{
-       if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) {
-               netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n",
-                          dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ?
-                               "read" : "write",
-                          dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ?
-                               "descr" : "data",
-                          dma_status);
-
-               print_status(lp);
-       }
-       if (dma_status & DWCEQOS_DMA_CH0_IS_REB) {
-               netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n",
-                          dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ?
-                               "read" : "write",
-                          dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ?
-                               "descr" : "data",
-                          dma_status);
-
-               print_status(lp);
-       }
-}
-
-static void dwceqos_mmc_interrupt(struct net_local *lp)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&lp->stats_lock, flags);
-
-       /* A latched mmc interrupt can not be masked, we must read
-        *  all the counters with an interrupt pending.
-        */
-       dwceqos_read_mmc_counters(lp,
-                                 dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ),
-                                 dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ));
-
-       spin_unlock_irqrestore(&lp->stats_lock, flags);
-}
-
-static void dwceqos_mac_interrupt(struct net_local *lp)
-{
-       u32 cause;
-
-       cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS);
-
-       if (cause & DWCEQOS_MAC_IS_MMC_INT)
-               dwceqos_mmc_interrupt(lp);
-}
-
-static irqreturn_t dwceqos_interrupt(int irq, void *dev_id)
-{
-       struct net_device *ndev = dev_id;
-       struct net_local *lp = netdev_priv(ndev);
-
-       u32 cause;
-       u32 dma_status;
-       irqreturn_t ret = IRQ_NONE;
-
-       cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS);
-       /* DMA Channel 0 Interrupt */
-       if (cause & DWCEQOS_DMA_IS_DC0IS) {
-               dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA);
-
-               /* Transmit Interrupt */
-               if (dma_status & DWCEQOS_DMA_CH0_IS_TI) {
-                       tasklet_schedule(&lp->tx_bdreclaim_tasklet);
-                       dwceqos_dma_disable_txirq(lp);
-               }
-
-               /* Receive Interrupt */
-               if (dma_status & DWCEQOS_DMA_CH0_IS_RI) {
-                       /* Disable RX IRQs */
-                       dwceqos_dma_disable_rxirq(lp);
-                       napi_schedule(&lp->napi);
-               }
-
-               /* Fatal Bus Error interrupt */
-               if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) {
-                       dwceqos_fatal_bus_error(lp, dma_status);
-
-                       /* errata 9000831707 */
-                       dma_status |= DWCEQOS_DMA_CH0_IS_TEB |
-                                     DWCEQOS_DMA_CH0_IS_REB;
-               }
-
-               /* Ack all DMA Channel 0 IRQs */
-               dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status);
-               ret = IRQ_HANDLED;
-       }
-
-       if (cause & DWCEQOS_DMA_IS_MTLIS) {
-               u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL);
-
-               dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val);
-               ret = IRQ_HANDLED;
-       }
-
-       if (cause & DWCEQOS_DMA_IS_MACIS) {
-               dwceqos_mac_interrupt(lp);
-               ret = IRQ_HANDLED;
-       }
-       return ret;
-}
-
-static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable)
-{
-       u32 regval;
-       unsigned long flags;
-
-       spin_lock_irqsave(&lp->hw_lock, flags);
-
-       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL);
-       if (enable)
-               regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
-       else
-               regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
-       dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval);
-
-       spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable)
-{
-       u32 regval;
-       unsigned long flags;
-
-       spin_lock_irqsave(&lp->hw_lock, flags);
-
-       /* MTL flow control */
-       regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
-       if (enable)
-               regval |= DWCEQOS_MTL_RXQ_EHFC;
-       else
-               regval &= ~DWCEQOS_MTL_RXQ_EHFC;
-
-       dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
-
-       /* MAC flow control */
-       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW);
-       if (enable)
-               regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE;
-       else
-               regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE;
-       dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
-
-       spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_configure_flow_control(struct net_local *lp)
-{
-       u32 regval;
-       unsigned long flags;
-       int RQS, RFD, RFA;
-
-       spin_lock_irqsave(&lp->hw_lock, flags);
-
-       regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
-
-       /* The queue size is in units of 256 bytes. We want 512 bytes units for
-        * the threshold fields.
-        */
-       RQS = ((regval >> 20) & 0x3FF) + 1;
-       RQS /= 2;
-
-       /* The thresholds are relative to a full queue, with a bias
-        * of 1 KiByte below full.
-        */
-       RFD = RQS / 2 - 2;
-       RFA = RQS / 8 - 2;
-
-       regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8);
-
-       if (RFD >= 0 && RFA >= 0) {
-               dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
-       } else {
-               netdev_warn(lp->ndev,
-                           "FIFO too small for flow control.");
-       }
-
-       regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) |
-                DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS;
-
-       dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
-
-       spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_configure_clock(struct net_local *lp)
-{
-       unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000;
-
-       BUG_ON(!rate_mhz);
-
-       dwceqos_write(lp,
-                     REG_DWCEQOS_MAC_1US_TIC_COUNTER,
-                     DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1));
-}
-
-static void dwceqos_configure_bus(struct net_local *lp)
-{
-       u32 sysbus_reg;
-
-       /* N.B. We do not support the Fixed Burst mode because it
-        * opens a race window by making HW access to DMA descriptors
-        * non-atomic.
-        */
-
-       sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL;
-
-       if (lp->bus_cfg.en_lpi)
-               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI;
-
-       if (lp->bus_cfg.burst_map)
-               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
-                       lp->bus_cfg.burst_map);
-       else
-               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
-                       DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT);
-
-       if (lp->bus_cfg.read_requests)
-               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
-                       lp->bus_cfg.read_requests - 1);
-       else
-               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
-                       DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT);
-
-       if (lp->bus_cfg.write_requests)
-               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
-                       lp->bus_cfg.write_requests - 1);
-       else
-               sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
-                       DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT);
-
-       if (netif_msg_hw(lp))
-               netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg);
-
-       dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg);
-}
-
-static void dwceqos_init_hw(struct net_local *lp)
-{
-       struct net_device *ndev = lp->ndev;
-       u32 regval;
-       u32 buswidth;
-       u32 dma_skip;
-
-       /* Software reset */
-       dwceqos_reset_hw(lp);
-
-       dwceqos_configure_bus(lp);
-
-       /* Probe data bus width, 32/64/128 bits. */
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF);
-       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL);
-       buswidth = (regval ^ 0xF) + 1;
-
-       /* Cache-align dma descriptors. */
-       dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth;
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL,
-                     DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) |
-                     DWCEQOS_DMA_CH_CTRL_PBLX8);
-
-       /* Initialize DMA Channel 0 */
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1);
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1);
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST,
-                     (u32)lp->tx_descs_addr);
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST,
-                     (u32)lp->rx_descs_addr);
-
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
-                     lp->tx_descs_tail_addr);
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
-                     lp->rx_descs_tail_addr);
-
-       if (lp->bus_cfg.tx_pbl)
-               regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl);
-       else
-               regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
-
-       /* Enable TSO if the HW support it */
-       if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
-               regval |= DWCEQOS_DMA_CH_TX_TSE;
-
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval);
-
-       if (lp->bus_cfg.rx_pbl)
-               regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl);
-       else
-               regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
-
-       regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE);
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
-
-       regval |= DWCEQOS_DMA_CH_CTRL_START;
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
-
-       /* Initialize MTL Queues */
-       regval = DWCEQOS_MTL_SCHALG_STRICT;
-       dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval);
-
-       regval = DWCEQOS_MTL_TXQ_SIZE(
-                       DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) |
-               DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF |
-               DWCEQOS_MTL_TXQ_TTC512;
-       dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval);
-
-       regval = DWCEQOS_MTL_RXQ_SIZE(
-                       DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) |
-               DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF;
-       dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
-
-       dwceqos_configure_flow_control(lp);
-
-       /* Initialize MAC */
-       dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
-
-       lp->eee_enabled = 0;
-
-       dwceqos_configure_clock(lp);
-
-       /* MMC counters */
-
-       /* probe implemented counters */
-       dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u);
-       dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u);
-       lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK);
-       lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK);
-
-       dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST |
-               DWCEQOS_MMC_CTRL_RSTONRD);
-       dwceqos_enable_mmc_interrupt(lp);
-
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0);
-       dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
-
-       dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
-               DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
-
-       /* Start TX DMA */
-       regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL);
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL,
-                     regval | DWCEQOS_DMA_CH_CTRL_START);
-
-       /* Enable MAC TX/RX */
-       regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
-       dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
-                     regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
-
-       lp->phy_defer = false;
-       mutex_lock(&ndev->phydev->lock);
-       phy_read_status(ndev->phydev);
-       dwceqos_adjust_link(lp->ndev);
-       mutex_unlock(&ndev->phydev->lock);
-}
-
-static void dwceqos_tx_reclaim(unsigned long data)
-{
-       struct net_device *ndev = (struct net_device *)data;
-       struct net_local *lp = netdev_priv(ndev);
-       unsigned int tx_bytes = 0;
-       unsigned int tx_packets = 0;
-
-       spin_lock(&lp->tx_lock);
-
-       while (lp->tx_free < DWCEQOS_TX_DCNT) {
-               struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur];
-               struct ring_desc *rd = &lp->tx_skb[lp->tx_cur];
-
-               /* Descriptor still being held by DMA ? */
-               if (dd->des3 & DWCEQOS_DMA_TDES3_OWN)
-                       break;
-
-               if (rd->mapping)
-                       dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len,
-                                        DMA_TO_DEVICE);
-
-               if (unlikely(rd->skb)) {
-                       ++tx_packets;
-                       tx_bytes += rd->skb->len;
-                       dev_consume_skb_any(rd->skb);
-               }
-
-               rd->skb = NULL;
-               rd->mapping = 0;
-               lp->tx_free++;
-               lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT;
-
-               if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) &&
-                   (dd->des3 & DWCEQOS_DMA_RDES3_ES)) {
-                       if (netif_msg_tx_err(lp))
-                               netdev_err(ndev, "TX Error, TDES3 = 0x%x\n",
-                                          dd->des3);
-                       if (netif_msg_hw(lp))
-                               print_status(lp);
-               }
-       }
-       spin_unlock(&lp->tx_lock);
-
-       netdev_completed_queue(ndev, tx_packets, tx_bytes);
-
-       dwceqos_dma_enable_txirq(lp);
-       netif_wake_queue(ndev);
-}
-
-static int dwceqos_rx(struct net_local *lp, int budget)
-{
-       struct sk_buff *skb;
-       u32 tot_size = 0;
-       unsigned int n_packets = 0;
-       unsigned int n_descs = 0;
-       u32 len;
-
-       struct dwceqos_dma_desc *dd;
-       struct sk_buff *new_skb;
-       dma_addr_t new_skb_baddr = 0;
-
-       while (n_descs < budget) {
-               if (!dwceqos_packet_avail(lp))
-                       break;
-
-               new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
-               if (!new_skb) {
-                       netdev_err(lp->ndev, "no memory for new sk_buff\n");
-                       break;
-               }
-
-               /* Get dma handle of skb->data */
-               new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent,
-                                       new_skb->data,
-                                       DWCEQOS_RX_BUF_SIZE,
-                                       DMA_FROM_DEVICE);
-               if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
-                       netdev_err(lp->ndev, "DMA map error\n");
-                       dev_kfree_skb(new_skb);
-                       break;
-               }
-
-               /* Read descriptor data after reading owner bit. */
-               dma_rmb();
-
-               dd = &lp->rx_descs[lp->rx_cur];
-               len = DWCEQOS_DMA_RDES3_PL(dd->des3);
-               skb = lp->rx_skb[lp->rx_cur].skb;
-
-               /* Unmap old buffer */
-               dma_unmap_single(lp->ndev->dev.parent,
-                                lp->rx_skb[lp->rx_cur].mapping,
-                                lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE);
-
-               /* Discard packet on reception error or bad checksum */
-               if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) ||
-                   (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) {
-                       dev_kfree_skb(skb);
-                       skb = NULL;
-               } else {
-                       skb_put(skb, len);
-                       skb->protocol = eth_type_trans(skb, lp->ndev);
-                       switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) {
-                       case DWCEQOS_DMA_RDES1_PT_UDP:
-                       case DWCEQOS_DMA_RDES1_PT_TCP:
-                       case DWCEQOS_DMA_RDES1_PT_ICMP:
-                               skb->ip_summed = CHECKSUM_UNNECESSARY;
-                               break;
-                       default:
-                               skb->ip_summed = CHECKSUM_NONE;
-                               break;
-                       }
-               }
-
-               if (unlikely(!skb)) {
-                       if (netif_msg_rx_err(lp))
-                               netdev_dbg(lp->ndev, "rx error: des3=%X\n",
-                                          lp->rx_descs[lp->rx_cur].des3);
-               } else {
-                       tot_size += skb->len;
-                       n_packets++;
-
-                       netif_receive_skb(skb);
-               }
-
-               lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr;
-               lp->rx_descs[lp->rx_cur].des1 = 0;
-               lp->rx_descs[lp->rx_cur].des2 = 0;
-               /* The DMA must observe des0/1/2 written before des3. */
-               wmb();
-               lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE |
-                                               DWCEQOS_DMA_RDES3_OWN  |
-                                               DWCEQOS_DMA_RDES3_BUF1V;
-
-               lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr;
-               lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE;
-               lp->rx_skb[lp->rx_cur].skb = new_skb;
-
-               n_descs++;
-               lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT;
-       }
-
-       /* Make sure any ownership update is written to the descriptors before
-        * DMA wakeup.
-        */
-       wmb();
-
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI);
-       /* Wake up RX by writing tail pointer */
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
-                     lp->rx_descs_tail_addr);
-
-       return n_descs;
-}
-
-static int dwceqos_rx_poll(struct napi_struct *napi, int budget)
-{
-       struct net_local *lp = container_of(napi, struct net_local, napi);
-       int work_done = 0;
-
-       work_done = dwceqos_rx(lp, budget - work_done);
-
-       if (!dwceqos_packet_avail(lp) && work_done < budget) {
-               napi_complete(napi);
-               dwceqos_dma_enable_rxirq(lp);
-       } else {
-               work_done = budget;
-       }
-
-       return work_done;
-}
-
-/* Reinitialize function if a TX timed out */
-static void dwceqos_reinit_for_txtimeout(struct work_struct *data)
-{
-       struct net_local *lp = container_of(data, struct net_local,
-               txtimeout_reinit);
-
-       netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n",
-                  DWCEQOS_TX_TIMEOUT);
-
-       if (netif_msg_hw(lp))
-               print_status(lp);
-
-       rtnl_lock();
-       dwceqos_stop(lp->ndev);
-       dwceqos_open(lp->ndev);
-       rtnl_unlock();
-}
-
-/* DT Probing function called by main probe */
-static inline int dwceqos_probe_config_dt(struct platform_device *pdev)
-{
-       struct net_device *ndev;
-       struct net_local *lp;
-       const void *mac_address;
-       struct dwceqos_bus_cfg *bus_cfg;
-       struct device_node *np = pdev->dev.of_node;
-
-       ndev = platform_get_drvdata(pdev);
-       lp = netdev_priv(ndev);
-       bus_cfg = &lp->bus_cfg;
-
-       /* Set the MAC address. */
-       mac_address = of_get_mac_address(pdev->dev.of_node);
-       if (mac_address)
-               ether_addr_copy(ndev->dev_addr, mac_address);
-
-       /* These are all optional parameters */
-       lp->en_tx_lpi_clockgating =  of_property_read_bool(np,
-               "snps,en-tx-lpi-clockgating");
-       bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi");
-       of_property_read_u32(np, "snps,write-requests",
-                            &bus_cfg->write_requests);
-       of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests);
-       of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map);
-       of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl);
-       of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl);
-
-       netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n",
-                  bus_cfg->en_lpi,
-                  bus_cfg->write_requests,
-                  bus_cfg->read_requests,
-                  bus_cfg->burst_map,
-                  bus_cfg->rx_pbl,
-                  bus_cfg->tx_pbl);
-
-       return 0;
-}
-
-static int dwceqos_open(struct net_device *ndev)
-{
-       struct net_local *lp = netdev_priv(ndev);
-       int res;
-
-       dwceqos_reset_state(lp);
-       res = dwceqos_descriptor_init(lp);
-       if (res) {
-               netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res);
-               return res;
-       }
-       netdev_reset_queue(ndev);
-
-       /* The dwceqos reset state machine requires all phy clocks to complete,
-        * hence the unusual init order with phy_start first.
-        */
-       lp->phy_defer = true;
-       phy_start(ndev->phydev);
-       dwceqos_init_hw(lp);
-       napi_enable(&lp->napi);
-
-       netif_start_queue(ndev);
-       tasklet_enable(&lp->tx_bdreclaim_tasklet);
-
-       /* Enable Interrupts -- do this only after we enable NAPI and the
-        * tasklet.
-        */
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
-                     DWCEQOS_DMA_CH0_IE_NIE |
-                     DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
-                     DWCEQOS_DMA_CH0_IE_AIE |
-                     DWCEQOS_DMA_CH0_IE_FBEE);
-
-       return 0;
-}
-
-static bool dweqos_is_tx_dma_suspended(struct net_local *lp)
-{
-       u32 reg;
-
-       reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0);
-       reg = DMA_GET_TX_STATE_CH0(reg);
-
-       return reg == DMA_TX_CH_SUSPENDED;
-}
-
-static void dwceqos_drain_dma(struct net_local *lp)
-{
-       /* Wait for all pending TX buffers to be sent. Upper limit based
-        * on max frame size on a 10 Mbit link.
-        */
-       size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100;
-
-       while (!dweqos_is_tx_dma_suspended(lp) && limit--)
-               usleep_range(100, 200);
-}
-
-static int dwceqos_stop(struct net_device *ndev)
-{
-       struct net_local *lp = netdev_priv(ndev);
-
-       tasklet_disable(&lp->tx_bdreclaim_tasklet);
-       napi_disable(&lp->napi);
-
-       /* Stop all tx before we drain the tx dma. */
-       netif_tx_lock_bh(lp->ndev);
-       netif_stop_queue(ndev);
-       netif_tx_unlock_bh(lp->ndev);
-
-       dwceqos_drain_dma(lp);
-       dwceqos_reset_hw(lp);
-       phy_stop(ndev->phydev);
-
-       dwceqos_descriptor_free(lp);
-
-       return 0;
-}
-
-static void dwceqos_dmadesc_set_ctx(struct net_local *lp,
-                                   unsigned short gso_size)
-{
-       struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next];
-
-       dd->des0 = 0;
-       dd->des1 = 0;
-       dd->des2 = gso_size;
-       dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV;
-
-       lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
-}
-
-static void dwceqos_tx_poll_demand(struct net_local *lp)
-{
-       dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
-                     lp->tx_descs_tail_addr);
-}
-
-struct dwceqos_tx {
-       size_t nr_descriptors;
-       size_t initial_descriptor;
-       size_t last_descriptor;
-       size_t prev_gso_size;
-       size_t network_header_len;
-};
-
-static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp,
-                              struct dwceqos_tx *tx)
-{
-       size_t n = 1;
-       size_t i;
-
-       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size)
-               ++n;
-
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
-               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
-               n +=  (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) /
-                      BYTES_PER_DMA_DESC;
-       }
-
-       tx->nr_descriptors = n;
-       tx->initial_descriptor = lp->tx_next;
-       tx->last_descriptor = lp->tx_next;
-       tx->prev_gso_size = lp->gso_size;
-
-       tx->network_header_len = skb_transport_offset(skb);
-       if (skb_is_gso(skb))
-               tx->network_header_len += tcp_hdrlen(skb);
-}
-
-static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp,
-                            struct dwceqos_tx *tx)
-{
-       struct ring_desc *rd;
-       struct dwceqos_dma_desc *dd;
-       size_t payload_len;
-       dma_addr_t dma_handle;
-
-       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) {
-               dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size);
-               lp->gso_size = skb_shinfo(skb)->gso_size;
-       }
-
-       dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data,
-                                   skb_headlen(skb), DMA_TO_DEVICE);
-
-       if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
-               netdev_err(lp->ndev, "TX DMA Mapping error\n");
-               return -ENOMEM;
-       }
-
-       rd = &lp->tx_skb[lp->tx_next];
-       dd = &lp->tx_descs[lp->tx_next];
-
-       rd->skb = NULL;
-       rd->len = skb_headlen(skb);
-       rd->mapping = dma_handle;
-
-       /* Set up DMA Descriptor */
-       dd->des0 = dma_handle;
-
-       if (skb_is_gso(skb)) {
-               payload_len = skb_headlen(skb) - tx->network_header_len;
-
-               if (payload_len)
-                       dd->des1 = dma_handle + tx->network_header_len;
-               dd->des2 = tx->network_header_len |
-                       DWCEQOS_DMA_DES2_B2L(payload_len);
-               dd->des3 = DWCEQOS_DMA_TDES3_TSE |
-                       DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) |
-                       (skb->len - tx->network_header_len);
-       } else {
-               dd->des1 = 0;
-               dd->des2 = skb_headlen(skb);
-               dd->des3 = skb->len;
-
-               switch (skb->ip_summed) {
-               case CHECKSUM_PARTIAL:
-                       dd->des3 |= DWCEQOS_DMA_TDES3_CA;
-               case CHECKSUM_NONE:
-               case CHECKSUM_UNNECESSARY:
-               case CHECKSUM_COMPLETE:
-               default:
-                       break;
-               }
-       }
-
-       dd->des3 |= DWCEQOS_DMA_TDES3_FD;
-       if (lp->tx_next  != tx->initial_descriptor)
-               dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
-
-       tx->last_descriptor = lp->tx_next;
-       lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
-
-       return 0;
-}
-
-static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp,
-                           struct dwceqos_tx *tx)
-{
-       struct ring_desc *rd = NULL;
-       struct dwceqos_dma_desc *dd;
-       dma_addr_t dma_handle;
-       size_t i;
-
-       /* Setup more ring and DMA descriptor if the packet is fragmented */
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
-               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-               size_t frag_size;
-               size_t consumed_size;
-
-               /* Map DMA Area */
-               dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0,
-                                             skb_frag_size(frag),
-                                             DMA_TO_DEVICE);
-               if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
-                       netdev_err(lp->ndev, "DMA Mapping error\n");
-                       return -ENOMEM;
-               }
-
-               /* order-3 fragments span more than one descriptor. */
-               frag_size = skb_frag_size(frag);
-               consumed_size = 0;
-               while (consumed_size < frag_size) {
-                       size_t dma_size = min_t(size_t, 16376,
-                                               frag_size - consumed_size);
-
-                       rd = &lp->tx_skb[lp->tx_next];
-                       memset(rd, 0, sizeof(*rd));
-
-                       dd = &lp->tx_descs[lp->tx_next];
-
-                       /* Set DMA Descriptor fields */
-                       dd->des0 = dma_handle + consumed_size;
-                       dd->des1 = 0;
-                       dd->des2 = dma_size;
-
-                       if (skb_is_gso(skb))
-                               dd->des3 = (skb->len - tx->network_header_len);
-                       else
-                               dd->des3 = skb->len;
-
-                       dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
-
-                       tx->last_descriptor = lp->tx_next;
-                       lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
-                       consumed_size += dma_size;
-               }
-
-               rd->len = skb_frag_size(frag);
-               rd->mapping = dma_handle;
-       }
-
-       return 0;
-}
-
-static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp,
-                               struct dwceqos_tx *tx)
-{
-       lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD;
-       lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC;
-
-       lp->tx_skb[tx->last_descriptor].skb = skb;
-
-       /* Make all descriptor updates visible to the DMA before setting the
-        * owner bit.
-        */
-       wmb();
-
-       lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN;
-
-       /* Make the owner bit visible before TX wakeup. */
-       wmb();
-
-       dwceqos_tx_poll_demand(lp);
-}
-
-static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx)
-{
-       size_t i = tx->initial_descriptor;
-
-       while (i != lp->tx_next) {
-               if (lp->tx_skb[i].mapping)
-                       dma_unmap_single(lp->ndev->dev.parent,
-                                        lp->tx_skb[i].mapping,
-                                        lp->tx_skb[i].len,
-                                        DMA_TO_DEVICE);
-
-               lp->tx_skb[i].mapping = 0;
-               lp->tx_skb[i].skb = NULL;
-
-               memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i]));
-
-               i = (i + 1) % DWCEQOS_TX_DCNT;
-       }
-
-       lp->tx_next = tx->initial_descriptor;
-       lp->gso_size = tx->prev_gso_size;
-}
-
-static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
-       struct net_local *lp = netdev_priv(ndev);
-       struct dwceqos_tx trans;
-       int err;
-
-       dwceqos_tx_prepare(skb, lp, &trans);
-       if (lp->tx_free < trans.nr_descriptors) {
-               netif_stop_queue(ndev);
-               return NETDEV_TX_BUSY;
-       }
-
-       err = dwceqos_tx_linear(skb, lp, &trans);
-       if (err)
-               goto tx_error;
-
-       err = dwceqos_tx_frags(skb, lp, &trans);
-       if (err)
-               goto tx_error;
-
-       WARN_ON(lp->tx_next !=
-               ((trans.initial_descriptor + trans.nr_descriptors) %
-                DWCEQOS_TX_DCNT));
-
-       spin_lock_bh(&lp->tx_lock);
-       lp->tx_free -= trans.nr_descriptors;
-       dwceqos_tx_finalize(skb, lp, &trans);
-       netdev_sent_queue(ndev, skb->len);
-       spin_unlock_bh(&lp->tx_lock);
-
-       netif_trans_update(ndev);
-       return 0;
-
-tx_error:
-       dwceqos_tx_rollback(lp, &trans);
-       dev_kfree_skb_any(skb);
-       return 0;
-}
-
-/* Set MAC address and then update HW accordingly */
-static int dwceqos_set_mac_address(struct net_device *ndev, void *addr)
-{
-       struct net_local *lp = netdev_priv(ndev);
-       struct sockaddr *hwaddr = (struct sockaddr *)addr;
-
-       if (netif_running(ndev))
-               return -EBUSY;
-
-       if (!is_valid_ether_addr(hwaddr->sa_data))
-               return -EADDRNOTAVAIL;
-
-       memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len);
-
-       dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
-       return 0;
-}
-
-static void dwceqos_tx_timeout(struct net_device *ndev)
-{
-       struct net_local *lp = netdev_priv(ndev);
-
-       queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit);
-}
-
-static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
-                                 unsigned int reg_n)
-{
-       unsigned long data;
-
-       data = (addr[5] << 8) | addr[4];
-       dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n),
-                     data | DWCEQOS_MAC_MAC_ADDR_HI_EN);
-       data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
-       dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data);
-}
-
-static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n)
-{
-       /* Do not disable MAC address 0 */
-       if (reg_n != 0)
-               dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0);
-}
-
-static void dwceqos_set_rx_mode(struct net_device *ndev)
-{
-       struct net_local *lp = netdev_priv(ndev);
-       u32 regval = 0;
-       u32 mc_filter[2];
-       int reg = 1;
-       struct netdev_hw_addr *ha;
-       unsigned int max_mac_addr;
-
-       max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1);
-
-       if (ndev->flags & IFF_PROMISC) {
-               regval = DWCEQOS_MAC_PKT_FILT_PR;
-       } else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) ||
-                               (ndev->flags & IFF_ALLMULTI))) {
-               regval = DWCEQOS_MAC_PKT_FILT_PM;
-               dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff);
-               dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff);
-       } else if (!netdev_mc_empty(ndev)) {
-               regval = DWCEQOS_MAC_PKT_FILT_HMC;
-               memset(mc_filter, 0, sizeof(mc_filter));
-               netdev_for_each_mc_addr(ha, ndev) {
-                       /* The upper 6 bits of the calculated CRC are used to
-                        * index the contens of the hash table
-                        */
-                       int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
-                       /* The most significant bit determines the register
-                        * to use (H/L) while the other 5 bits determine
-                        * the bit within the register.
-                        */
-                       mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
-               }
-               dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]);
-               dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]);
-       }
-       if (netdev_uc_count(ndev) > max_mac_addr) {
-               regval |= DWCEQOS_MAC_PKT_FILT_PR;
-       } else {
-               netdev_for_each_uc_addr(ha, ndev) {
-                       dwceqos_set_umac_addr(lp, ha->addr, reg);
-                       reg++;
-               }
-               for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++)
-                       dwceqos_disable_umac_addr(lp, reg);
-       }
-       dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval);
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void dwceqos_poll_controller(struct net_device *ndev)
-{
-       disable_irq(ndev->irq);
-       dwceqos_interrupt(ndev->irq, ndev);
-       enable_irq(ndev->irq);
-}
-#endif
-
-static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
-                                     u32 tx_mask)
-{
-       if (tx_mask & BIT(27))
-               lp->mmc_counters.txlpitranscntr +=
-                       dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR);
-       if (tx_mask & BIT(26))
-               lp->mmc_counters.txpiuscntr +=
-                       dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR);
-       if (tx_mask & BIT(25))
-               lp->mmc_counters.txoversize_g +=
-                       dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G);
-       if (tx_mask & BIT(24))
-               lp->mmc_counters.txvlanpackets_g +=
-                       dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G);
-       if (tx_mask & BIT(23))
-               lp->mmc_counters.txpausepackets +=
-                       dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS);
-       if (tx_mask & BIT(22))
-               lp->mmc_counters.txexcessdef +=
-                       dwceqos_read(lp, DWC_MMC_TXEXCESSDEF);
-       if (tx_mask & BIT(21))
-               lp->mmc_counters.txpacketcount_g +=
-                       dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G);
-       if (tx_mask & BIT(20))
-               lp->mmc_counters.txoctetcount_g +=
-                       dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G);
-       if (tx_mask & BIT(19))
-               lp->mmc_counters.txcarriererror +=
-                       dwceqos_read(lp, DWC_MMC_TXCARRIERERROR);
-       if (tx_mask & BIT(18))
-               lp->mmc_counters.txexcesscol +=
-                       dwceqos_read(lp, DWC_MMC_TXEXCESSCOL);
-       if (tx_mask & BIT(17))
-               lp->mmc_counters.txlatecol +=
-                       dwceqos_read(lp, DWC_MMC_TXLATECOL);
-       if (tx_mask & BIT(16))
-               lp->mmc_counters.txdeferred +=
-                       dwceqos_read(lp, DWC_MMC_TXDEFERRED);
-       if (tx_mask & BIT(15))
-               lp->mmc_counters.txmulticol_g +=
-                       dwceqos_read(lp, DWC_MMC_TXMULTICOL_G);
-       if (tx_mask & BIT(14))
-               lp->mmc_counters.txsinglecol_g +=
-                       dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G);
-       if (tx_mask & BIT(13))
-               lp->mmc_counters.txunderflowerror +=
-                       dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR);
-       if (tx_mask & BIT(12))
-               lp->mmc_counters.txbroadcastpackets_gb +=
-                       dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB);
-       if (tx_mask & BIT(11))
-               lp->mmc_counters.txmulticastpackets_gb +=
-                       dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB);
-       if (tx_mask & BIT(10))
-               lp->mmc_counters.txunicastpackets_gb +=
-                       dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB);
-       if (tx_mask & BIT(9))
-               lp->mmc_counters.tx1024tomaxoctets_gb +=
-                       dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB);
-       if (tx_mask & BIT(8))
-               lp->mmc_counters.tx512to1023octets_gb +=
-                       dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB);
-       if (tx_mask & BIT(7))
-               lp->mmc_counters.tx256to511octets_gb +=
-                       dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB);
-       if (tx_mask & BIT(6))
-               lp->mmc_counters.tx128to255octets_gb +=
-                       dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB);
-       if (tx_mask & BIT(5))
-               lp->mmc_counters.tx65to127octets_gb +=
-                       dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB);
-       if (tx_mask & BIT(4))
-               lp->mmc_counters.tx64octets_gb +=
-                       dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB);
-       if (tx_mask & BIT(3))
-               lp->mmc_counters.txmulticastpackets_g +=
-                       dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G);
-       if (tx_mask & BIT(2))
-               lp->mmc_counters.txbroadcastpackets_g +=
-                       dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G);
-       if (tx_mask & BIT(1))
-               lp->mmc_counters.txpacketcount_gb +=
-                       dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB);
-       if (tx_mask & BIT(0))
-               lp->mmc_counters.txoctetcount_gb +=
-                       dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB);
-
-       if (rx_mask & BIT(27))
-               lp->mmc_counters.rxlpitranscntr +=
-                       dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR);
-       if (rx_mask & BIT(26))
-               lp->mmc_counters.rxlpiuscntr +=
-                       dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR);
-       if (rx_mask & BIT(25))
-               lp->mmc_counters.rxctrlpackets_g +=
-                       dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G);
-       if (rx_mask & BIT(24))
-               lp->mmc_counters.rxrcverror +=
-                       dwceqos_read(lp, DWC_MMC_RXRCVERROR);
-       if (rx_mask & BIT(23))
-               lp->mmc_counters.rxwatchdog +=
-                       dwceqos_read(lp, DWC_MMC_RXWATCHDOG);
-       if (rx_mask & BIT(22))
-               lp->mmc_counters.rxvlanpackets_gb +=
-                       dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB);
-       if (rx_mask & BIT(21))
-               lp->mmc_counters.rxfifooverflow +=
-                       dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW);
-       if (rx_mask & BIT(20))
-               lp->mmc_counters.rxpausepackets +=
-                       dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS);
-       if (rx_mask & BIT(19))
-               lp->mmc_counters.rxoutofrangetype +=
-                       dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE);
-       if (rx_mask & BIT(18))
-               lp->mmc_counters.rxlengtherror +=
-                       dwceqos_read(lp, DWC_MMC_RXLENGTHERROR);
-       if (rx_mask & BIT(17))
-               lp->mmc_counters.rxunicastpackets_g +=
-                       dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G);
-       if (rx_mask & BIT(16))
-               lp->mmc_counters.rx1024tomaxoctets_gb +=
-                       dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB);
-       if (rx_mask & BIT(15))
-               lp->mmc_counters.rx512to1023octets_gb +=
-                       dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB);
-       if (rx_mask & BIT(14))
-               lp->mmc_counters.rx256to511octets_gb +=
-                       dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB);
-       if (rx_mask & BIT(13))
-               lp->mmc_counters.rx128to255octets_gb +=
-                       dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB);
-       if (rx_mask & BIT(12))
-               lp->mmc_counters.rx65to127octets_gb +=
-                       dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB);
-       if (rx_mask & BIT(11))
-               lp->mmc_counters.rx64octets_gb +=
-                       dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB);
-       if (rx_mask & BIT(10))
-               lp->mmc_counters.rxoversize_g +=
-                       dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G);
-       if (rx_mask & BIT(9))
-               lp->mmc_counters.rxundersize_g +=
-                       dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G);
-       if (rx_mask & BIT(8))
-               lp->mmc_counters.rxjabbererror +=
-                       dwceqos_read(lp, DWC_MMC_RXJABBERERROR);
-       if (rx_mask & BIT(7))
-               lp->mmc_counters.rxrunterror +=
-                       dwceqos_read(lp, DWC_MMC_RXRUNTERROR);
-       if (rx_mask & BIT(6))
-               lp->mmc_counters.rxalignmenterror +=
-                       dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR);
-       if (rx_mask & BIT(5))
-               lp->mmc_counters.rxcrcerror +=
-                       dwceqos_read(lp, DWC_MMC_RXCRCERROR);
-       if (rx_mask & BIT(4))
-               lp->mmc_counters.rxmulticastpackets_g +=
-                       dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G);
-       if (rx_mask & BIT(3))
-               lp->mmc_counters.rxbroadcastpackets_g +=
-                       dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G);
-       if (rx_mask & BIT(2))
-               lp->mmc_counters.rxoctetcount_g +=
-                       dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G);
-       if (rx_mask & BIT(1))
-               lp->mmc_counters.rxoctetcount_gb +=
-                       dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB);
-       if (rx_mask & BIT(0))
-               lp->mmc_counters.rxpacketcount_gb +=
-                       dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB);
-}
-
-static struct rtnl_link_stats64*
-dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
-{
-       unsigned long flags;
-       struct net_local *lp = netdev_priv(ndev);
-       struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters;
-
-       spin_lock_irqsave(&lp->stats_lock, flags);
-       dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
-                                 lp->mmc_tx_counters_mask);
-       spin_unlock_irqrestore(&lp->stats_lock, flags);
-
-       s->rx_packets = hwstats->rxpacketcount_gb;
-       s->rx_bytes = hwstats->rxoctetcount_gb;
-       s->rx_errors = hwstats->rxpacketcount_gb -
-               hwstats->rxbroadcastpackets_g -
-               hwstats->rxmulticastpackets_g -
-               hwstats->rxunicastpackets_g;
-       s->multicast = hwstats->rxmulticastpackets_g;
-       s->rx_length_errors = hwstats->rxlengtherror;
-       s->rx_crc_errors = hwstats->rxcrcerror;
-       s->rx_fifo_errors = hwstats->rxfifooverflow;
-
-       s->tx_packets = hwstats->txpacketcount_gb;
-       s->tx_bytes = hwstats->txoctetcount_gb;
-
-       if (lp->mmc_tx_counters_mask & BIT(21))
-               s->tx_errors = hwstats->txpacketcount_gb -
-                       hwstats->txpacketcount_g;
-       else
-               s->tx_errors = hwstats->txunderflowerror +
-                       hwstats->txcarriererror;
-
-       return s;
-}
-
-static void
-dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
-{
-       const struct net_local *lp = netdev_priv(ndev);
-
-       strcpy(ed->driver, lp->pdev->dev.driver->name);
-       strcpy(ed->version, DRIVER_VERSION);
-}
-
-static void dwceqos_get_pauseparam(struct net_device *ndev,
-                                  struct ethtool_pauseparam *pp)
-{
-       const struct net_local *lp = netdev_priv(ndev);
-
-       pp->autoneg = lp->flowcontrol.autoneg;
-       pp->tx_pause = lp->flowcontrol.tx;
-       pp->rx_pause = lp->flowcontrol.rx;
-}
-
-static int dwceqos_set_pauseparam(struct net_device *ndev,
-                                 struct ethtool_pauseparam *pp)
-{
-       struct net_local *lp = netdev_priv(ndev);
-       int ret = 0;
-
-       lp->flowcontrol.autoneg = pp->autoneg;
-       if (pp->autoneg) {
-               ndev->phydev->advertising |= ADVERTISED_Pause;
-               ndev->phydev->advertising |= ADVERTISED_Asym_Pause;
-       } else {
-               ndev->phydev->advertising &= ~ADVERTISED_Pause;
-               ndev->phydev->advertising &= ~ADVERTISED_Asym_Pause;
-               lp->flowcontrol.rx = pp->rx_pause;
-               lp->flowcontrol.tx = pp->tx_pause;
-       }
-
-       if (netif_running(ndev))
-               ret = phy_start_aneg(ndev->phydev);
-
-       return ret;
-}
-
-static void dwceqos_get_strings(struct net_device *ndev, u32 stringset,
-                               u8 *data)
-{
-       size_t i;
-
-       if (stringset != ETH_SS_STATS)
-               return;
-
-       for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
-               memcpy(data, dwceqos_ethtool_stats[i].stat_name,
-                      ETH_GSTRING_LEN);
-               data += ETH_GSTRING_LEN;
-       }
-}
-
-static void dwceqos_get_ethtool_stats(struct net_device *ndev,
-                                     struct ethtool_stats *stats, u64 *data)
-{
-       struct net_local *lp = netdev_priv(ndev);
-       unsigned long flags;
-       size_t i;
-       u8 *mmcstat = (u8 *)&lp->mmc_counters;
-
-       spin_lock_irqsave(&lp->stats_lock, flags);
-       dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
-                                 lp->mmc_tx_counters_mask);
-       spin_unlock_irqrestore(&lp->stats_lock, flags);
-
-       for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
-               memcpy(data,
-                      mmcstat + dwceqos_ethtool_stats[i].offset,
-                      sizeof(u64));
-               data++;
-       }
-}
-
-static int dwceqos_get_sset_count(struct net_device *ndev, int sset)
-{
-       if (sset == ETH_SS_STATS)
-               return ARRAY_SIZE(dwceqos_ethtool_stats);
-
-       return -EOPNOTSUPP;
-}
-
-static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs,
-                            void *space)
-{
-       const struct net_local *lp = netdev_priv(dev);
-       u32 *reg_space = (u32 *)space;
-       int reg_offset;
-       int reg_ix = 0;
-
-       /* MAC registers */
-       for (reg_offset = START_MAC_REG_OFFSET;
-               reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
-               reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
-               reg_ix++;
-       }
-       /* MTL registers */
-       for (reg_offset = START_MTL_REG_OFFSET;
-               reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
-               reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
-               reg_ix++;
-       }
-
-       /* DMA registers */
-       for (reg_offset = START_DMA_REG_OFFSET;
-               reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
-               reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
-               reg_ix++;
-       }
-
-       BUG_ON(4 * reg_ix > REG_SPACE_SIZE);
-}
-
-static int dwceqos_get_regs_len(struct net_device *dev)
-{
-       return REG_SPACE_SIZE;
-}
-
-static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl)
-{
-       return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off";
-}
-
-static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl)
-{
-       return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off";
-}
-
-static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
-{
-       struct net_local *lp = netdev_priv(ndev);
-       u32 lpi_status;
-       u32 lpi_enabled;
-
-       if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
-               return -EOPNOTSUPP;
-
-       edata->eee_active  = lp->eee_active;
-       edata->eee_enabled = lp->eee_enabled;
-       edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER);
-       lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
-       lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA);
-       edata->tx_lpi_enabled = lpi_enabled;
-
-       if (netif_msg_hw(lp)) {
-               u32 regval;
-
-               regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
-
-               netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n",
-                           dwceqos_get_rx_lpi_state(regval),
-                           dwceqos_get_tx_lpi_state(regval));
-       }
-
-       return phy_ethtool_get_eee(ndev->phydev, edata);
-}
-
-static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
-{
-       struct net_local *lp = netdev_priv(ndev);
-       u32 regval;
-       unsigned long flags;
-
-       if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
-               return -EOPNOTSUPP;
-
-       if (edata->eee_enabled && !lp->eee_active)
-               return -EOPNOTSUPP;
-
-       if (edata->tx_lpi_enabled) {
-               if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN ||
-                   edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX)
-                       return -EINVAL;
-       }
-
-       lp->eee_enabled = edata->eee_enabled;
-
-       if (edata->eee_enabled && edata->tx_lpi_enabled) {
-               dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER,
-                             edata->tx_lpi_timer);
-
-               spin_lock_irqsave(&lp->hw_lock, flags);
-               regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
-               regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE;
-               if (lp->en_tx_lpi_clockgating)
-                       regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE;
-               dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
-               spin_unlock_irqrestore(&lp->hw_lock, flags);
-       } else {
-               spin_lock_irqsave(&lp->hw_lock, flags);
-               regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
-               regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
-               dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
-               spin_unlock_irqrestore(&lp->hw_lock, flags);
-       }
-
-       return phy_ethtool_set_eee(ndev->phydev, edata);
-}
-
-static u32 dwceqos_get_msglevel(struct net_device *ndev)
-{
-       const struct net_local *lp = netdev_priv(ndev);
-
-       return lp->msg_enable;
-}
-
-static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
-{
-       struct net_local *lp = netdev_priv(ndev);
-
-       lp->msg_enable = msglevel;
-}
-
-static const struct ethtool_ops dwceqos_ethtool_ops = {
-       .get_drvinfo    = dwceqos_get_drvinfo,
-       .get_link       = ethtool_op_get_link,
-       .get_pauseparam = dwceqos_get_pauseparam,
-       .set_pauseparam = dwceqos_set_pauseparam,
-       .get_strings    = dwceqos_get_strings,
-       .get_ethtool_stats = dwceqos_get_ethtool_stats,
-       .get_sset_count = dwceqos_get_sset_count,
-       .get_regs       = dwceqos_get_regs,
-       .get_regs_len   = dwceqos_get_regs_len,
-       .get_eee        = dwceqos_get_eee,
-       .set_eee        = dwceqos_set_eee,
-       .get_msglevel   = dwceqos_get_msglevel,
-       .set_msglevel   = dwceqos_set_msglevel,
-       .get_link_ksettings = phy_ethtool_get_link_ksettings,
-       .set_link_ksettings = phy_ethtool_set_link_ksettings,
-};
-
-static const struct net_device_ops netdev_ops = {
-       .ndo_open               = dwceqos_open,
-       .ndo_stop               = dwceqos_stop,
-       .ndo_start_xmit         = dwceqos_start_xmit,
-       .ndo_set_rx_mode        = dwceqos_set_rx_mode,
-       .ndo_set_mac_address    = dwceqos_set_mac_address,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = dwceqos_poll_controller,
-#endif
-       .ndo_do_ioctl           = dwceqos_ioctl,
-       .ndo_tx_timeout         = dwceqos_tx_timeout,
-       .ndo_get_stats64        = dwceqos_get_stats64,
-};
-
-static const struct of_device_id dwceq_of_match[] = {
-       { .compatible = "snps,dwc-qos-ethernet-4.10", },
-       {}
-};
-MODULE_DEVICE_TABLE(of, dwceq_of_match);
-
-static int dwceqos_probe(struct platform_device *pdev)
-{
-       struct resource *r_mem = NULL;
-       struct net_device *ndev;
-       struct net_local *lp;
-       int ret = -ENXIO;
-
-       r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r_mem) {
-               dev_err(&pdev->dev, "no IO resource defined.\n");
-               return -ENXIO;
-       }
-
-       ndev = alloc_etherdev(sizeof(*lp));
-       if (!ndev) {
-               dev_err(&pdev->dev, "etherdev allocation failed.\n");
-               return -ENOMEM;
-       }
-
-       SET_NETDEV_DEV(ndev, &pdev->dev);
-
-       lp = netdev_priv(ndev);
-       lp->ndev = ndev;
-       lp->pdev = pdev;
-       lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT);
-
-       spin_lock_init(&lp->tx_lock);
-       spin_lock_init(&lp->hw_lock);
-       spin_lock_init(&lp->stats_lock);
-
-       lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
-       if (IS_ERR(lp->apb_pclk)) {
-               dev_err(&pdev->dev, "apb_pclk clock not found.\n");
-               ret = PTR_ERR(lp->apb_pclk);
-               goto err_out_free_netdev;
-       }
-
-       ret = clk_prepare_enable(lp->apb_pclk);
-       if (ret) {
-               dev_err(&pdev->dev, "Unable to enable APER clock.\n");
-               goto err_out_free_netdev;
-       }
-
-       lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem);
-       if (IS_ERR(lp->baseaddr)) {
-               dev_err(&pdev->dev, "failed to map baseaddress.\n");
-               ret = PTR_ERR(lp->baseaddr);
-               goto err_out_clk_dis_aper;
-       }
-
-       ndev->irq = platform_get_irq(pdev, 0);
-       ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ;
-       ndev->netdev_ops = &netdev_ops;
-       ndev->ethtool_ops = &dwceqos_ethtool_ops;
-       ndev->base_addr = r_mem->start;
-
-       dwceqos_get_hwfeatures(lp);
-       dwceqos_mdio_set_csr(lp);
-
-       ndev->hw_features = NETIF_F_SG;
-
-       if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
-               ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
-
-       if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL)
-               ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-
-       if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL)
-               ndev->hw_features |= NETIF_F_RXCSUM;
-
-       ndev->features = ndev->hw_features;
-
-       lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
-       if (IS_ERR(lp->phy_ref_clk)) {
-               dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
-               ret = PTR_ERR(lp->phy_ref_clk);
-               goto err_out_clk_dis_aper;
-       }
-
-       ret = clk_prepare_enable(lp->phy_ref_clk);
-       if (ret) {
-               dev_err(&pdev->dev, "Unable to enable device clock.\n");
-               goto err_out_clk_dis_aper;
-       }
-
-       lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
-                                               "phy-handle", 0);
-       if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) {
-               ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
-               if (ret < 0) {
-                       dev_err(&pdev->dev, "invalid fixed-link");
-                       goto err_out_clk_dis_phy;
-               }
-
-               lp->phy_node = of_node_get(lp->pdev->dev.of_node);
-       }
-
-       ret = of_get_phy_mode(lp->pdev->dev.of_node);
-       if (ret < 0) {
-               dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
-               goto err_out_deregister_fixed_link;
-       }
-
-       lp->phy_interface = ret;
-
-       ret = dwceqos_mii_init(lp);
-       if (ret) {
-               dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
-               goto err_out_deregister_fixed_link;
-       }
-
-       ret = dwceqos_mii_probe(ndev);
-       if (ret != 0) {
-               netdev_err(ndev, "mii_probe fail.\n");
-               ret = -ENXIO;
-               goto err_out_deregister_fixed_link;
-       }
-
-       dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
-
-       tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim,
-                    (unsigned long)ndev);
-       tasklet_disable(&lp->tx_bdreclaim_tasklet);
-
-       lp->txtimeout_handler_wq = alloc_workqueue(DRIVER_NAME,
-                                                  WQ_MEM_RECLAIM, 0);
-       INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout);
-
-       platform_set_drvdata(pdev, ndev);
-       ret = dwceqos_probe_config_dt(pdev);
-       if (ret) {
-               dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
-                       ret);
-               goto err_out_deregister_fixed_link;
-       }
-       dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
-                pdev->id, ndev->base_addr, ndev->irq);
-
-       ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0,
-                              ndev->name, ndev);
-       if (ret) {
-               dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
-                       ndev->irq, ret);
-               goto err_out_deregister_fixed_link;
-       }
-
-       if (netif_msg_probe(lp))
-               netdev_dbg(ndev, "net_local@%p\n", lp);
-
-       netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
-
-       ret = register_netdev(ndev);
-       if (ret) {
-               dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
-               goto err_out_deregister_fixed_link;
-       }
-
-       return 0;
-
-err_out_deregister_fixed_link:
-       if (of_phy_is_fixed_link(pdev->dev.of_node))
-               of_phy_deregister_fixed_link(pdev->dev.of_node);
-err_out_clk_dis_phy:
-       clk_disable_unprepare(lp->phy_ref_clk);
-err_out_clk_dis_aper:
-       clk_disable_unprepare(lp->apb_pclk);
-err_out_free_netdev:
-       of_node_put(lp->phy_node);
-       free_netdev(ndev);
-       platform_set_drvdata(pdev, NULL);
-       return ret;
-}
-
-static int dwceqos_remove(struct platform_device *pdev)
-{
-       struct net_device *ndev = platform_get_drvdata(pdev);
-       struct net_local *lp;
-
-       if (ndev) {
-               lp = netdev_priv(ndev);
-
-               if (ndev->phydev) {
-                       phy_disconnect(ndev->phydev);
-                       if (of_phy_is_fixed_link(pdev->dev.of_node))
-                               of_phy_deregister_fixed_link(pdev->dev.of_node);
-               }
-               mdiobus_unregister(lp->mii_bus);
-               mdiobus_free(lp->mii_bus);
-
-               unregister_netdev(ndev);
-
-               clk_disable_unprepare(lp->phy_ref_clk);
-               clk_disable_unprepare(lp->apb_pclk);
-
-               free_netdev(ndev);
-       }
-
-       return 0;
-}
-
-static struct platform_driver dwceqos_driver = {
-       .probe   = dwceqos_probe,
-       .remove  = dwceqos_remove,
-       .driver  = {
-               .name  = DRIVER_NAME,
-               .of_match_table = dwceq_of_match,
-       },
-};
-
-module_platform_driver(dwceqos_driver);
-
-MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>");
-MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>");
index baa3e4a5731c7388c3f97a4e2e7be31a7df60706..f864fd0663dbf830bd94b1db4daa7a7b5ad3ce1d 100644 (file)
@@ -303,7 +303,7 @@ static int bdx_poll(struct napi_struct *napi, int budget)
                 * device lock and allow waiting tasks (eg rmmod) to advance) */
                priv->napi_stop = 0;
 
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                bdx_enable_interrupts(priv);
        }
        return work_done;
index 65088224c207d9b6155192281b551cc823045b48..9f3d9c67e3fe0f50b2d1119e74b7eac4b93e8bae 100644 (file)
@@ -145,6 +145,7 @@ do {                                                                \
                cpsw->data.active_slave)
 #define IRQ_NUM                        2
 #define CPSW_MAX_QUEUES                8
+#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
 
 static int debug_level;
 module_param(debug_level, int, 0);
@@ -158,6 +159,10 @@ static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
 module_param(rx_packet_max, int, 0);
 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
 
+static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
+module_param(descs_pool_size, int, 0444);
+MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
+
 struct cpsw_wr_regs {
        u32     id_ver;
        u32     soft_reset;
@@ -352,7 +357,6 @@ struct cpsw_slave {
        struct phy_device               *phy;
        struct net_device               *ndev;
        u32                             port_vlan;
-       u32                             open_stat;
 };
 
 static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
@@ -395,6 +399,7 @@ struct cpsw_common {
        struct cpts                     *cpts;
        int                             rx_ch_num, tx_ch_num;
        int                             speed;
+       int                             usage_count;
 };
 
 struct cpsw_priv {
@@ -699,18 +704,9 @@ static void cpsw_rx_handler(void *token, int len, int status)
        cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb);
 
        if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
-               bool ndev_status = false;
-               struct cpsw_slave *slave = cpsw->slaves;
-               int n;
-
-               if (cpsw->data.dual_emac) {
-                       /* In dual emac mode check for all interfaces */
-                       for (n = cpsw->data.slaves; n; n--, slave++)
-                               if (netif_running(slave->ndev))
-                                       ndev_status = true;
-               }
-
-               if (ndev_status && (status >= 0)) {
+               /* In dual emac mode check for all interfaces */
+               if (cpsw->data.dual_emac && cpsw->usage_count &&
+                   (status >= 0)) {
                        /* The packet received is for the interface which
                         * is already down and the other interface is up
                         * and running, instead of freeing which results
@@ -934,7 +930,7 @@ static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
        }
 
        if (num_rx < budget) {
-               napi_complete(napi_rx);
+               napi_complete_done(napi_rx, num_rx);
                writel(0xff, &cpsw->wr_regs->rx_en);
                if (cpsw->quirk_irq && cpsw->rx_irq_disabled) {
                        cpsw->rx_irq_disabled = false;
@@ -1230,21 +1226,6 @@ static void cpsw_get_ethtool_stats(struct net_device *ndev,
        }
 }
 
-static int cpsw_common_res_usage_state(struct cpsw_common *cpsw)
-{
-       u32 i;
-       u32 usage_count = 0;
-
-       if (!cpsw->data.dual_emac)
-               return 0;
-
-       for (i = 0; i < cpsw->data.slaves; i++)
-               if (cpsw->slaves[i].open_stat)
-                       usage_count++;
-
-       return usage_count;
-}
-
 static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
                                        struct sk_buff *skb,
                                        struct cpdma_chan *txch)
@@ -1478,8 +1459,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
                return ret;
        }
 
-       if (!cpsw_common_res_usage_state(cpsw))
-               cpsw_intr_disable(cpsw);
        netif_carrier_off(ndev);
 
        /* Notify the stack of the actual queue counts. */
@@ -1501,8 +1480,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
                 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
                 CPSW_RTL_VERSION(reg));
 
-       /* initialize host and slave ports */
-       if (!cpsw_common_res_usage_state(cpsw))
+       /* Initialize host and slave ports */
+       if (!cpsw->usage_count)
                cpsw_init_host_port(priv);
        for_each_slave(priv, cpsw_slave_open, priv);
 
@@ -1513,7 +1492,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
                cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
                                  ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
 
-       if (!cpsw_common_res_usage_state(cpsw)) {
+       /* initialize shared resources for every ndev */
+       if (!cpsw->usage_count) {
                /* disable priority elevation */
                __raw_writel(0, &cpsw->regs->ptype);
 
@@ -1555,9 +1535,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
 
        cpdma_ctlr_start(cpsw->dma);
        cpsw_intr_enable(cpsw);
-
-       if (cpsw->data.dual_emac)
-               cpsw->slaves[priv->emac_port].open_stat = true;
+       cpsw->usage_count++;
 
        return 0;
 
@@ -1578,7 +1556,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
        netif_tx_stop_all_queues(priv->ndev);
        netif_carrier_off(priv->ndev);
 
-       if (cpsw_common_res_usage_state(cpsw) <= 1) {
+       if (cpsw->usage_count <= 1) {
                napi_disable(&cpsw->napi_rx);
                napi_disable(&cpsw->napi_tx);
                cpts_unregister(cpsw->cpts);
@@ -1591,9 +1569,8 @@ static int cpsw_ndo_stop(struct net_device *ndev)
        if (cpsw_need_resplit(cpsw))
                cpsw_split_res(ndev);
 
+       cpsw->usage_count--;
        pm_runtime_put_sync(cpsw->dev);
-       if (cpsw->data.dual_emac)
-               cpsw->slaves[priv->emac_port].open_stat = false;
        return 0;
 }
 
@@ -1606,12 +1583,10 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
        struct cpdma_chan *txch;
        int ret, q_idx;
 
-       netif_trans_update(ndev);
-
        if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
                cpsw_err(priv, tx_err, "packet pad failed\n");
                ndev->stats.tx_dropped++;
-               return NETDEV_TX_OK;
+               return NET_XMIT_DROP;
        }
 
        if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
@@ -2363,17 +2338,11 @@ static int cpsw_update_channels(struct cpsw_priv *priv,
        return 0;
 }
 
-static int cpsw_set_channels(struct net_device *ndev,
-                            struct ethtool_channels *chs)
+static void cpsw_suspend_data_pass(struct net_device *ndev)
 {
-       struct cpsw_priv *priv = netdev_priv(ndev);
-       struct cpsw_common *cpsw = priv->cpsw;
+       struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
        struct cpsw_slave *slave;
-       int i, ret;
-
-       ret = cpsw_check_ch_settings(cpsw, chs);
-       if (ret < 0)
-               return ret;
+       int i;
 
        /* Disable NAPI scheduling */
        cpsw_intr_disable(cpsw);
@@ -2391,6 +2360,51 @@ static int cpsw_set_channels(struct net_device *ndev,
 
        /* Handle rest of tx packets and stop cpdma channels */
        cpdma_ctlr_stop(cpsw->dma);
+}
+
+static int cpsw_resume_data_pass(struct net_device *ndev)
+{
+       struct cpsw_priv *priv = netdev_priv(ndev);
+       struct cpsw_common *cpsw = priv->cpsw;
+       struct cpsw_slave *slave;
+       int i, ret;
+
+       /* Allow rx packets handling */
+       for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
+               if (slave->ndev && netif_running(slave->ndev))
+                       netif_dormant_off(slave->ndev);
+
+       /* After this receive is started */
+       if (cpsw->usage_count) {
+               ret = cpsw_fill_rx_channels(priv);
+               if (ret)
+                       return ret;
+
+               cpdma_ctlr_start(cpsw->dma);
+               cpsw_intr_enable(cpsw);
+       }
+
+       /* Resume transmit for every affected interface */
+       for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
+               if (slave->ndev && netif_running(slave->ndev))
+                       netif_tx_start_all_queues(slave->ndev);
+
+       return 0;
+}
+
+static int cpsw_set_channels(struct net_device *ndev,
+                            struct ethtool_channels *chs)
+{
+       struct cpsw_priv *priv = netdev_priv(ndev);
+       struct cpsw_common *cpsw = priv->cpsw;
+       struct cpsw_slave *slave;
+       int i, ret;
+
+       ret = cpsw_check_ch_settings(cpsw, chs);
+       if (ret < 0)
+               return ret;
+
+       cpsw_suspend_data_pass(ndev);
        ret = cpsw_update_channels(priv, chs);
        if (ret)
                goto err;
@@ -2413,30 +2427,14 @@ static int cpsw_set_channels(struct net_device *ndev,
                        dev_err(priv->dev, "cannot set real number of rx queues\n");
                        goto err;
                }
-
-               /* Enable rx packets handling */
-               netif_dormant_off(slave->ndev);
        }
 
-       if (cpsw_common_res_usage_state(cpsw)) {
-               ret = cpsw_fill_rx_channels(priv);
-               if (ret)
-                       goto err;
-
+       if (cpsw->usage_count)
                cpsw_split_res(ndev);
 
-               /* After this receive is started */
-               cpdma_ctlr_start(cpsw->dma);
-               cpsw_intr_enable(cpsw);
-       }
-
-       /* Resume transmit for every affected interface */
-       for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
-               if (!(slave->ndev && netif_running(slave->ndev)))
-                       continue;
-               netif_tx_start_all_queues(slave->ndev);
-       }
-       return 0;
+       ret = cpsw_resume_data_pass(ndev);
+       if (!ret)
+               return 0;
 err:
        dev_err(priv->dev, "cannot update channels number, closing device\n");
        dev_close(ndev);
@@ -2479,6 +2477,52 @@ static int cpsw_nway_reset(struct net_device *ndev)
                return -EOPNOTSUPP;
 }
 
+static void cpsw_get_ringparam(struct net_device *ndev,
+                              struct ethtool_ringparam *ering)
+{
+       struct cpsw_priv *priv = netdev_priv(ndev);
+       struct cpsw_common *cpsw = priv->cpsw;
+
+       /* not supported */
+       ering->tx_max_pending = 0;
+       ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
+       ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
+       ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
+}
+
+static int cpsw_set_ringparam(struct net_device *ndev,
+                             struct ethtool_ringparam *ering)
+{
+       struct cpsw_priv *priv = netdev_priv(ndev);
+       struct cpsw_common *cpsw = priv->cpsw;
+       int ret;
+
+       /* ignore ering->tx_pending - only rx_pending adjustment is supported */
+
+       if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
+           ering->rx_pending < CPSW_MAX_QUEUES ||
+           ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES))
+               return -EINVAL;
+
+       if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
+               return 0;
+
+       cpsw_suspend_data_pass(ndev);
+
+       cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
+
+       if (cpsw->usage_count)
+               cpdma_chan_split_pool(cpsw->dma);
+
+       ret = cpsw_resume_data_pass(ndev);
+       if (!ret)
+               return 0;
+
+       dev_err(&ndev->dev, "cannot set ring params, closing device\n");
+       dev_close(ndev);
+       return ret;
+}
+
 static const struct ethtool_ops cpsw_ethtool_ops = {
        .get_drvinfo    = cpsw_get_drvinfo,
        .get_msglevel   = cpsw_get_msglevel,
@@ -2505,6 +2549,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
        .get_eee        = cpsw_get_eee,
        .set_eee        = cpsw_set_eee,
        .nway_reset     = cpsw_nway_reset,
+       .get_ringparam = cpsw_get_ringparam,
+       .set_ringparam = cpsw_set_ringparam,
 };
 
 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
@@ -2969,6 +3015,7 @@ static int cpsw_probe(struct platform_device *pdev)
        dma_params.has_ext_regs         = true;
        dma_params.desc_hw_addr         = dma_params.desc_mem_phys;
        dma_params.bus_freq_mhz         = cpsw->bus_freq_mhz;
+       dma_params.descs_pool_size      = descs_pool_size;
 
        cpsw->dma = cpdma_ctlr_create(&dma_params);
        if (!cpsw->dma) {
@@ -2985,7 +3032,7 @@ static int cpsw_probe(struct platform_device *pdev)
                goto clean_dma_ret;
        }
 
-       ale_params.dev                  = &ndev->dev;
+       ale_params.dev                  = &pdev->dev;
        ale_params.ale_ageout           = ale_ageout;
        ale_params.ale_entries          = data->ale_entries;
        ale_params.ale_ports            = data->slaves;
@@ -3072,9 +3119,9 @@ static int cpsw_probe(struct platform_device *pdev)
                goto clean_ale_ret;
        }
 
-       cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
-                   &ss_res->start, ndev->irq);
-
+       cpsw_notice(priv, probe,
+                   "initialized device (regs %pa, irq %d, pool size %d)\n",
+                   &ss_res->start, ndev->irq, dma_params.descs_pool_size);
        if (cpsw->data.dual_emac) {
                ret = cpsw_probe_dual_emac(priv);
                if (ret) {
index 43b061bd8e0724f228939a5e7e0e38eb23cc30e5..ddd43e09111e2510558a84a2725946d563e162b1 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine
+ * Texas Instruments N-Port Ethernet Switch Address Lookup Engine
  *
  * Copyright (C) 2012 Texas Instruments
  *
 
 #define BITMASK(bits)          (BIT(bits) - 1)
 
-#define ALE_VERSION_MAJOR(rev) ((rev >> 8) & 0xff)
+#define ALE_VERSION_MAJOR(rev, mask) (((rev) >> 8) & (mask))
 #define ALE_VERSION_MINOR(rev) (rev & 0xff)
+#define ALE_VERSION_1R3                0x0103
+#define ALE_VERSION_1R4                0x0104
 
 /* ALE Registers */
 #define ALE_IDVER              0x00
+#define ALE_STATUS             0x04
 #define ALE_CONTROL            0x08
 #define ALE_PRESCALE           0x10
 #define ALE_UNKNOWNVLAN                0x18
 #define ALE_TABLE              0x34
 #define ALE_PORTCTL            0x40
 
+/* ALE NetCP NU switch specific Registers */
+#define ALE_UNKNOWNVLAN_MEMBER                 0x90
+#define ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD      0x94
+#define ALE_UNKNOWNVLAN_REG_MCAST_FLOOD                0x98
+#define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS     0x9C
+#define ALE_VLAN_MASK_MUX(reg)                 (0xc0 + (0x4 * (reg)))
+
 #define ALE_TABLE_WRITE                BIT(31)
 
 #define ALE_TYPE_FREE                  0
 #define ALE_UCAST_OUI                  2
 #define ALE_UCAST_TOUCHED              3
 
+#define ALE_TABLE_SIZE_MULTIPLIER      1024
+#define ALE_STATUS_SIZE_MASK           0x1f
+#define ALE_TABLE_SIZE_DEFAULT         64
+
 static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
 {
        int idx;
@@ -84,20 +98,34 @@ static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value)   \
        cpsw_ale_set_field(ale_entry, start, bits, value);              \
 }
 
+#define DEFINE_ALE_FIELD1(name, start)                                 \
+static inline int cpsw_ale_get_##name(u32 *ale_entry, u32 bits)                \
+{                                                                      \
+       return cpsw_ale_get_field(ale_entry, start, bits);              \
+}                                                                      \
+static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value,      \
+               u32 bits)                                               \
+{                                                                      \
+       cpsw_ale_set_field(ale_entry, start, bits, value);              \
+}
+
 DEFINE_ALE_FIELD(entry_type,           60,     2)
 DEFINE_ALE_FIELD(vlan_id,              48,     12)
 DEFINE_ALE_FIELD(mcast_state,          62,     2)
-DEFINE_ALE_FIELD(port_mask,            66,     3)
+DEFINE_ALE_FIELD1(port_mask,           66)
 DEFINE_ALE_FIELD(super,                        65,     1)
 DEFINE_ALE_FIELD(ucast_type,           62,     2)
-DEFINE_ALE_FIELD(port_num,             66,     2)
+DEFINE_ALE_FIELD1(port_num,            66)
 DEFINE_ALE_FIELD(blocked,              65,     1)
 DEFINE_ALE_FIELD(secure,               64,     1)
-DEFINE_ALE_FIELD(vlan_untag_force,     24,     3)
-DEFINE_ALE_FIELD(vlan_reg_mcast,       16,     3)
-DEFINE_ALE_FIELD(vlan_unreg_mcast,     8,      3)
-DEFINE_ALE_FIELD(vlan_member_list,     0,      3)
+DEFINE_ALE_FIELD1(vlan_untag_force,    24)
+DEFINE_ALE_FIELD1(vlan_reg_mcast,      16)
+DEFINE_ALE_FIELD1(vlan_unreg_mcast,    8)
+DEFINE_ALE_FIELD1(vlan_member_list,    0)
 DEFINE_ALE_FIELD(mcast,                        40,     1)
+/* ALE NetCP nu switch specific */
+DEFINE_ALE_FIELD(vlan_unreg_mcast_idx, 20,     3)
+DEFINE_ALE_FIELD(vlan_reg_mcast_idx,   44,     3)
 
 /* The MAC address field in the ALE entry cannot be macroized as above */
 static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
@@ -223,14 +251,16 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
 {
        int mask;
 
-       mask = cpsw_ale_get_port_mask(ale_entry);
+       mask = cpsw_ale_get_port_mask(ale_entry,
+                                     ale->port_mask_bits);
        if ((mask & port_mask) == 0)
                return; /* ports dont intersect, not interested */
        mask &= ~port_mask;
 
        /* free if only remaining port is host port */
        if (mask)
-               cpsw_ale_set_port_mask(ale_entry, mask);
+               cpsw_ale_set_port_mask(ale_entry, mask,
+                                      ale->port_mask_bits);
        else
                cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
 }
@@ -291,7 +321,7 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
        cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
        cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
        cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
-       cpsw_ale_set_port_num(ale_entry, port);
+       cpsw_ale_set_port_num(ale_entry, port, ale->port_num_bits);
 
        idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
        if (idx < 0)
@@ -338,9 +368,11 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
        cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
        cpsw_ale_set_mcast_state(ale_entry, mcast_state);
 
-       mask = cpsw_ale_get_port_mask(ale_entry);
+       mask = cpsw_ale_get_port_mask(ale_entry,
+                                     ale->port_mask_bits);
        port_mask |= mask;
-       cpsw_ale_set_port_mask(ale_entry, port_mask);
+       cpsw_ale_set_port_mask(ale_entry, port_mask,
+                              ale->port_mask_bits);
 
        if (idx < 0)
                idx = cpsw_ale_match_free(ale);
@@ -367,7 +399,8 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
        cpsw_ale_read(ale, idx, ale_entry);
 
        if (port_mask)
-               cpsw_ale_set_port_mask(ale_entry, port_mask);
+               cpsw_ale_set_port_mask(ale_entry, port_mask,
+                                      ale->port_mask_bits);
        else
                cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
 
@@ -376,6 +409,21 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
 }
 EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast);
 
+/* ALE NetCP NU switch specific vlan functions */
+static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+                                   int reg_mcast, int unreg_mcast)
+{
+       int idx;
+
+       /* Set VLAN registered multicast flood mask */
+       idx = cpsw_ale_get_vlan_reg_mcast_idx(ale_entry);
+       writel(reg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
+
+       /* Set VLAN unregistered multicast flood mask */
+       idx = cpsw_ale_get_vlan_unreg_mcast_idx(ale_entry);
+       writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
+}
+
 int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
                      int reg_mcast, int unreg_mcast)
 {
@@ -389,10 +437,16 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
        cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN);
        cpsw_ale_set_vlan_id(ale_entry, vid);
 
-       cpsw_ale_set_vlan_untag_force(ale_entry, untag);
-       cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast);
-       cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
-       cpsw_ale_set_vlan_member_list(ale_entry, port);
+       cpsw_ale_set_vlan_untag_force(ale_entry, untag, ale->vlan_field_bits);
+       if (!ale->params.nu_switch_ale) {
+               cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast,
+                                           ale->vlan_field_bits);
+               cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
+                                             ale->vlan_field_bits);
+       } else {
+               cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast);
+       }
+       cpsw_ale_set_vlan_member_list(ale_entry, port, ale->vlan_field_bits);
 
        if (idx < 0)
                idx = cpsw_ale_match_free(ale);
@@ -418,7 +472,8 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
        cpsw_ale_read(ale, idx, ale_entry);
 
        if (port_mask)
-               cpsw_ale_set_vlan_member_list(ale_entry, port_mask);
+               cpsw_ale_set_vlan_member_list(ale_entry, port_mask,
+                                             ale->vlan_field_bits);
        else
                cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
 
@@ -446,12 +501,15 @@ void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
                if (type != ALE_TYPE_VLAN)
                        continue;
 
-               unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry);
+               unreg_mcast =
+                       cpsw_ale_get_vlan_unreg_mcast(ale_entry,
+                                                     ale->vlan_field_bits);
                if (allmulti)
                        unreg_mcast |= 1;
                else
                        unreg_mcast &= ~1;
-               cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
+               cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
+                                             ale->vlan_field_bits);
                cpsw_ale_write(ale, idx, ale_entry);
        }
 }
@@ -464,7 +522,7 @@ struct ale_control_info {
        int             bits;
 };
 
-static const struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
+static struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
        [ALE_ENABLE]            = {
                .name           = "enable",
                .offset         = ALE_CONTROL,
@@ -721,11 +779,83 @@ static void cpsw_ale_timer(unsigned long arg)
 
 void cpsw_ale_start(struct cpsw_ale *ale)
 {
-       u32 rev;
+       u32 rev, ale_entries;
 
        rev = __raw_readl(ale->params.ale_regs + ALE_IDVER);
-       dev_dbg(ale->params.dev, "initialized cpsw ale revision %d.%d\n",
-               ALE_VERSION_MAJOR(rev), ALE_VERSION_MINOR(rev));
+       if (!ale->params.major_ver_mask)
+               ale->params.major_ver_mask = 0xff;
+       ale->version =
+               (ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) |
+                ALE_VERSION_MINOR(rev);
+       dev_info(ale->params.dev, "initialized cpsw ale version %d.%d\n",
+                ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask),
+                ALE_VERSION_MINOR(rev));
+
+       if (!ale->params.ale_entries) {
+               ale_entries =
+                       __raw_readl(ale->params.ale_regs + ALE_STATUS) &
+                                   ALE_STATUS_SIZE_MASK;
+               /* ALE available on newer NetCP switches has introduced
+                * a register, ALE_STATUS, to indicate the size of ALE
+                * table which shows the size as a multiple of 1024 entries.
+                * For these, params.ale_entries will be set to zero. So
+                * read the register and update the value of ale_entries.
+                * ALE table on NetCP lite, is much smaller and is indicated
+                * by a value of zero in ALE_STATUS. So use a default value
+                * of ALE_TABLE_SIZE_DEFAULT for this. Caller is expected
+                * to set the value of ale_entries for all other versions
+                * of ALE.
+                */
+               if (!ale_entries)
+                       ale_entries = ALE_TABLE_SIZE_DEFAULT;
+               else
+                       ale_entries *= ALE_TABLE_SIZE_MULTIPLIER;
+               ale->params.ale_entries = ale_entries;
+       }
+       dev_info(ale->params.dev,
+                "ALE Table size %ld\n", ale->params.ale_entries);
+
+       /* set default bits for existing h/w */
+       ale->port_mask_bits = 3;
+       ale->port_num_bits = 2;
+       ale->vlan_field_bits = 3;
+
+       /* Set defaults override for ALE on NetCP NU switch and for version
+        * 1R3
+        */
+       if (ale->params.nu_switch_ale) {
+               /* Separate registers for unknown vlan configuration.
+                * Also there are N bits, where N is number of ale
+                * ports and shift value should be 0
+                */
+               ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].bits =
+                                       ale->params.ale_ports;
+               ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].offset =
+                                       ALE_UNKNOWNVLAN_MEMBER;
+               ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].bits =
+                                       ale->params.ale_ports;
+               ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].shift = 0;
+               ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].offset =
+                                       ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD;
+               ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].bits =
+                                       ale->params.ale_ports;
+               ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].shift = 0;
+               ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].offset =
+                                       ALE_UNKNOWNVLAN_REG_MCAST_FLOOD;
+               ale_controls[ALE_PORT_UNTAGGED_EGRESS].bits =
+                                       ale->params.ale_ports;
+               ale_controls[ALE_PORT_UNTAGGED_EGRESS].shift = 0;
+               ale_controls[ALE_PORT_UNTAGGED_EGRESS].offset =
+                                       ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
+               ale->port_mask_bits = ale->params.ale_ports;
+               ale->port_num_bits = ale->params.ale_ports - 1;
+               ale->vlan_field_bits = ale->params.ale_ports;
+       } else if (ale->version == ALE_VERSION_1R3) {
+               ale->port_mask_bits = ale->params.ale_ports;
+               ale->port_num_bits = 3;
+               ale->vlan_field_bits = ale->params.ale_ports;
+       }
+
        cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
        cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
 
index a7001894f3daef3369050714c38e001de88346aa..25d24e8d0904b5135e303aa09c72a65cf81b19de 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Texas Instruments 3-Port Ethernet Switch Address Lookup Engine APIs
+ * Texas Instruments N-Port Ethernet Switch Address Lookup Engine APIs
  *
  * Copyright (C) 2012 Texas Instruments
  *
@@ -21,6 +21,16 @@ struct cpsw_ale_params {
        unsigned long           ale_ageout;     /* in secs */
        unsigned long           ale_entries;
        unsigned long           ale_ports;
+       /* NU Switch has specific handling as number of bits in ALE entries
+        * are different than other versions of ALE. Also there are specific
+        * registers for unknown vlan specific fields. So use nu_switch_ale
+        * to identify this hardware.
+        */
+       bool                    nu_switch_ale;
+       /* mask bit used in NU Switch ALE is 3 bits instead of 8 bits. So
+        * pass it from caller.
+        */
+       u32                     major_ver_mask;
 };
 
 struct cpsw_ale {
@@ -28,6 +38,11 @@ struct cpsw_ale {
        struct timer_list       timer;
        unsigned long           ageout;
        int                     allmulti;
+       u32                     version;
+       /* These bits are different on NetCP NU Switch ALE */
+       u32                     port_mask_bits;
+       u32                     port_num_bits;
+       u32                     vlan_field_bits;
 };
 
 enum cpsw_ale_control {
index 36518fc5c7cc822a98a949b9d3ac47b5aaa89f1b..7ecc6b70e7e898a5b0bd052cf60423d942905131 100644 (file)
@@ -108,6 +108,8 @@ struct cpdma_ctlr {
        spinlock_t              lock;
        struct cpdma_chan       *channels[2 * CPDMA_MAX_CHANNELS];
        int chan_num;
+       int                     num_rx_desc; /* RX descriptors number */
+       int                     num_tx_desc; /* TX descriptors number */
 };
 
 struct cpdma_chan {
@@ -166,12 +168,12 @@ static struct cpdma_control_info controls[] = {
 #define num_chan       params.num_chan
 
 /* various accessors */
-#define dma_reg_read(ctlr, ofs)                __raw_readl((ctlr)->dmaregs + (ofs))
-#define chan_read(chan, fld)           __raw_readl((chan)->fld)
-#define desc_read(desc, fld)           __raw_readl(&(desc)->fld)
-#define dma_reg_write(ctlr, ofs, v)    __raw_writel(v, (ctlr)->dmaregs + (ofs))
-#define chan_write(chan, fld, v)       __raw_writel(v, (chan)->fld)
-#define desc_write(desc, fld, v)       __raw_writel((u32)(v), &(desc)->fld)
+#define dma_reg_read(ctlr, ofs)                readl((ctlr)->dmaregs + (ofs))
+#define chan_read(chan, fld)           readl((chan)->fld)
+#define desc_read(desc, fld)           readl(&(desc)->fld)
+#define dma_reg_write(ctlr, ofs, v)    writel(v, (ctlr)->dmaregs + (ofs))
+#define chan_write(chan, fld, v)       writel(v, (chan)->fld)
+#define desc_write(desc, fld, v)       writel((u32)(v), &(desc)->fld)
 
 #define cpdma_desc_to_port(chan, mode, directed)                       \
        do {                                                            \
@@ -181,8 +183,10 @@ static struct cpdma_control_info controls[] = {
                                 (directed << CPDMA_TO_PORT_SHIFT));    \
        } while (0)
 
-static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
+static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
 {
+       struct cpdma_desc_pool *pool = ctlr->pool;
+
        if (!pool)
                return;
 
@@ -191,10 +195,8 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
             gen_pool_size(pool->gen_pool),
             gen_pool_avail(pool->gen_pool));
        if (pool->cpumap)
-               dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
+               dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
                                  pool->phys);
-       else
-               iounmap(pool->iomap);
 }
 
 /*
@@ -203,37 +205,50 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
  * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
  * abstract out these details
  */
-static struct cpdma_desc_pool *
-cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
-                               int size, int align)
+int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
 {
+       struct cpdma_params *cpdma_params = &ctlr->params;
        struct cpdma_desc_pool *pool;
-       int ret;
+       int ret = -ENOMEM;
 
-       pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
+       pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
        if (!pool)
                goto gen_pool_create_fail;
+       ctlr->pool = pool;
+
+       pool->mem_size  = cpdma_params->desc_mem_size;
+       pool->desc_size = ALIGN(sizeof(struct cpdma_desc),
+                               cpdma_params->desc_align);
+       pool->num_desc  = pool->mem_size / pool->desc_size;
+
+       if (cpdma_params->descs_pool_size) {
+               /* recalculate memory size required cpdma descriptor pool
+                * basing on number of descriptors specified by user and
+                * if memory size > CPPI internal RAM size (desc_mem_size)
+                * then switch to use DDR
+                */
+               pool->num_desc = cpdma_params->descs_pool_size;
+               pool->mem_size = pool->desc_size * pool->num_desc;
+               if (pool->mem_size > cpdma_params->desc_mem_size)
+                       cpdma_params->desc_mem_phys = 0;
+       }
 
-       pool->dev       = dev;
-       pool->mem_size  = size;
-       pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
-       pool->num_desc  = size / pool->desc_size;
-
-       pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
-                                             "cpdma");
+       pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
+                                             -1, "cpdma");
        if (IS_ERR(pool->gen_pool)) {
-               dev_err(dev, "pool create failed %ld\n",
-                       PTR_ERR(pool->gen_pool));
+               ret = PTR_ERR(pool->gen_pool);
+               dev_err(ctlr->dev, "pool create failed %d\n", ret);
                goto gen_pool_create_fail;
        }
 
-       if (phys) {
-               pool->phys  = phys;
-               pool->iomap = ioremap(phys, size); /* should be memremap? */
-               pool->hw_addr = hw_addr;
+       if (cpdma_params->desc_mem_phys) {
+               pool->phys  = cpdma_params->desc_mem_phys;
+               pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
+                                          pool->mem_size);
+               pool->hw_addr = cpdma_params->desc_hw_addr;
        } else {
-               pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
-                                                 GFP_KERNEL);
+               pool->cpumap = dma_alloc_coherent(ctlr->dev,  pool->mem_size,
+                                                 &pool->hw_addr, GFP_KERNEL);
                pool->iomap = (void __iomem __force *)pool->cpumap;
                pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
        }
@@ -244,16 +259,17 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
        ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
                                pool->phys, pool->mem_size, -1);
        if (ret < 0) {
-               dev_err(dev, "pool add failed %d\n", ret);
+               dev_err(ctlr->dev, "pool add failed %d\n", ret);
                goto gen_pool_add_virt_fail;
        }
 
-       return pool;
+       return 0;
 
 gen_pool_add_virt_fail:
-       cpdma_desc_pool_destroy(pool);
+       cpdma_desc_pool_destroy(ctlr);
 gen_pool_create_fail:
-       return NULL;
+       ctlr->pool = NULL;
+       return ret;
 }
 
 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -502,13 +518,11 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
        ctlr->chan_num = 0;
        spin_lock_init(&ctlr->lock);
 
-       ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
-                                           ctlr->params.desc_mem_phys,
-                                           ctlr->params.desc_hw_addr,
-                                           ctlr->params.desc_mem_size,
-                                           ctlr->params.desc_align);
-       if (!ctlr->pool)
+       if (cpdma_desc_pool_create(ctlr))
                return NULL;
+       /* split pool equally between RX/TX by default */
+       ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
+       ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
 
        if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
                ctlr->num_chan = CPDMA_MAX_CHANNELS;
@@ -542,10 +556,10 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
        }
 
        for (i = 0; i < ctlr->num_chan; i++) {
-               __raw_writel(0, ctlr->params.txhdp + 4 * i);
-               __raw_writel(0, ctlr->params.rxhdp + 4 * i);
-               __raw_writel(0, ctlr->params.txcp + 4 * i);
-               __raw_writel(0, ctlr->params.rxcp + 4 * i);
+               writel(0, ctlr->params.txhdp + 4 * i);
+               writel(0, ctlr->params.rxhdp + 4 * i);
+               writel(0, ctlr->params.txcp + 4 * i);
+               writel(0, ctlr->params.rxcp + 4 * i);
        }
 
        dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
@@ -623,7 +637,7 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
        for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
                cpdma_chan_destroy(ctlr->channels[i]);
 
-       cpdma_desc_pool_destroy(ctlr->pool);
+       cpdma_desc_pool_destroy(ctlr);
        return ret;
 }
 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
@@ -708,22 +722,22 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
                }
        }
        /* use remains */
-       most_chan->desc_num += desc_cnt;
+       if (most_chan)
+               most_chan->desc_num += desc_cnt;
 }
 
 /**
  * cpdma_chan_split_pool - Splits ctrl pool between all channels.
  * Has to be called under ctlr lock
  */
-static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
+int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
 {
        int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
-       struct cpdma_desc_pool *pool = ctlr->pool;
        int free_rx_num = 0, free_tx_num = 0;
        int rx_weight = 0, tx_weight = 0;
        int tx_desc_num, rx_desc_num;
        struct cpdma_chan *chan;
-       int i, tx_num = 0;
+       int i;
 
        if (!ctlr->chan_num)
                return 0;
@@ -741,15 +755,14 @@ static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
                        if (!chan->weight)
                                free_tx_num++;
                        tx_weight += chan->weight;
-                       tx_num++;
                }
        }
 
        if (rx_weight > 100 || tx_weight > 100)
                return -EINVAL;
 
-       tx_desc_num = (tx_num * pool->num_desc) / ctlr->chan_num;
-       rx_desc_num = pool->num_desc - tx_desc_num;
+       tx_desc_num = ctlr->num_tx_desc;
+       rx_desc_num = ctlr->num_rx_desc;
 
        if (free_tx_num) {
                tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
@@ -765,6 +778,8 @@ static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(cpdma_chan_split_pool);
+
 
 /* cpdma_chan_set_weight - set weight of a channel in percentage.
  * Tx and Rx channels have separate weights. That is 100% for RX
@@ -820,8 +835,8 @@ EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate);
  */
 int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
 {
-       struct cpdma_ctlr *ctlr = ch->ctlr;
        unsigned long flags, ch_flags;
+       struct cpdma_ctlr *ctlr;
        int ret, prio_mode;
        u32 rmask;
 
@@ -831,6 +846,7 @@ int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
        if (ch->rate == rate)
                return rate;
 
+       ctlr = ch->ctlr;
        spin_lock_irqsave(&ctlr->lock, flags);
        spin_lock_irqsave(&ch->lock, ch_flags);
 
@@ -898,7 +914,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
        chan->chan_num  = chan_num;
        chan->handler   = handler;
        chan->rate      = 0;
-       chan->desc_num = ctlr->pool->num_desc / 2;
        chan->weight    = 0;
 
        if (is_rx_chan(chan)) {
@@ -1061,13 +1076,17 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
        mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
        cpdma_desc_to_port(chan, mode, directed);
 
-       desc_write(desc, hw_next,   0);
-       desc_write(desc, hw_buffer, buffer);
-       desc_write(desc, hw_len,    len);
-       desc_write(desc, hw_mode,   mode | len);
-       desc_write(desc, sw_token,  token);
-       desc_write(desc, sw_buffer, buffer);
-       desc_write(desc, sw_len,    len);
+       /* Relaxed IO accessors can be used here as there is read barrier
+        * at the end of write sequence.
+        */
+       writel_relaxed(0, &desc->hw_next);
+       writel_relaxed(buffer, &desc->hw_buffer);
+       writel_relaxed(len, &desc->hw_len);
+       writel_relaxed(mode | len, &desc->hw_mode);
+       writel_relaxed(token, &desc->sw_token);
+       writel_relaxed(buffer, &desc->sw_buffer);
+       writel_relaxed(len, &desc->sw_len);
+       desc_read(desc, sw_len);
 
        __cpdma_chan_submit(chan, desc);
 
@@ -1136,7 +1155,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
        }
        desc_dma = desc_phys(pool, desc);
 
-       status  = __raw_readl(&desc->hw_mode);
+       status  = desc_read(desc, hw_mode);
        outlen  = status & 0x7ff;
        if (status & CPDMA_DESC_OWNER) {
                chan->stats.busy_dequeue++;
@@ -1155,7 +1174,7 @@ static int __cpdma_chan_process(struct cpdma_chan *chan)
        chan->count--;
        chan->stats.good_dequeue++;
 
-       if (status & CPDMA_DESC_EOQ) {
+       if ((status & CPDMA_DESC_EOQ) && chan->head) {
                chan->stats.requeue++;
                chan_write(chan, hdp, desc_phys(pool, chan->head));
        }
@@ -1316,4 +1335,23 @@ int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
 }
 EXPORT_SYMBOL_GPL(cpdma_control_set);
 
+int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
+{
+       return ctlr->num_rx_desc;
+}
+EXPORT_SYMBOL_GPL(cpdma_get_num_rx_descs);
+
+int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
+{
+       return ctlr->num_tx_desc;
+}
+EXPORT_SYMBOL_GPL(cpdma_get_num_tx_descs);
+
+void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
+{
+       ctlr->num_rx_desc = num_rx_desc;
+       ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
+}
+EXPORT_SYMBOL_GPL(cpdma_set_num_rx_descs);
+
 MODULE_LICENSE("GPL");
index 4a167db2ababfed5adad4d9f8448dd78bc8971e8..fd65ce2b83deb0d715cb397cad1b9c59c3f59d1d 100644 (file)
@@ -37,6 +37,7 @@ struct cpdma_params {
        int                     desc_mem_size;
        int                     desc_align;
        u32                     bus_freq_mhz;
+       u32                     descs_pool_size;
 
        /*
         * Some instances of embedded cpdma controllers have extra control and
@@ -113,5 +114,9 @@ enum cpdma_control {
 
 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
+int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr);
+void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc);
+int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr);
+int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr);
 
 #endif
index 481c7bf0395bfcdc3add56da8b01b61432415d81..64d5527feb2ac20c3229c667d806039050f03a0a 100644 (file)
@@ -1295,7 +1295,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
                                        &emac_rxhost_errcodes[cause][0], ch);
                }
        } else if (num_rx_pkts < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, num_rx_pkts);
                emac_int_enable(priv);
        }
 
index 0f58c584ae09cd04f89de518ba3b4f7432049456..8900a6fad318c497b2957210939854b01bc8b631 100644 (file)
@@ -23,6 +23,7 @@
 
 #include <linux/netdevice.h>
 #include <linux/soc/ti/knav_dma.h>
+#include <linux/u64_stats_sync.h>
 
 /* Maximum Ethernet frame size supported by Keystone switch */
 #define NETCP_MAX_FRAME_SIZE           9504
@@ -68,6 +69,20 @@ struct netcp_addr {
        struct list_head        node;
 };
 
+struct netcp_stats {
+       struct u64_stats_sync   syncp_rx ____cacheline_aligned_in_smp;
+       u64                     rx_packets;
+       u64                     rx_bytes;
+       u32                     rx_errors;
+       u32                     rx_dropped;
+
+       struct u64_stats_sync   syncp_tx ____cacheline_aligned_in_smp;
+       u64                     tx_packets;
+       u64                     tx_bytes;
+       u32                     tx_errors;
+       u32                     tx_dropped;
+};
+
 struct netcp_intf {
        struct device           *dev;
        struct device           *ndev_dev;
@@ -87,6 +102,11 @@ struct netcp_intf {
        void                    *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
        struct napi_struct      rx_napi;
        struct napi_struct      tx_napi;
+#define ETH_SW_CAN_REMOVE_ETH_FCS      BIT(0)
+       u32                     hw_cap;
+
+       /* 64-bit netcp stats */
+       struct netcp_stats      stats;
 
        void                    *rx_channel;
        const char              *dma_chan_name;
@@ -115,6 +135,7 @@ struct netcp_packet {
        struct sk_buff          *skb;
        __le32                  *epib;
        u32                     *psdata;
+       u32                     eflags;
        unsigned int            psdata_len;
        struct netcp_intf       *netcp;
        struct netcp_tx_pipe    *tx_pipe;
index c243335ed6496546aa8b0c23547ed7d560f4ba96..7c7ae0890e90c450c2228e44cf618cdfdedcceec 100644 (file)
@@ -122,6 +122,13 @@ static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc,
        *ndesc = le32_to_cpu(desc->next_desc);
 }
 
+static void get_desc_info(u32 *desc_info, u32 *pkt_info,
+                         struct knav_dma_desc *desc)
+{
+       *desc_info = le32_to_cpu(desc->desc_info);
+       *pkt_info = le32_to_cpu(desc->packet_info);
+}
+
 static u32 get_sw_data(int index, struct knav_dma_desc *desc)
 {
        /* No Endian conversion needed as this data is untouched by hw */
@@ -622,6 +629,7 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
 
 static void netcp_empty_rx_queue(struct netcp_intf *netcp)
 {
+       struct netcp_stats *rx_stats = &netcp->stats;
        struct knav_dma_desc *desc;
        unsigned int dma_sz;
        dma_addr_t dma;
@@ -635,16 +643,17 @@ static void netcp_empty_rx_queue(struct netcp_intf *netcp)
                if (unlikely(!desc)) {
                        dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n",
                                __func__);
-                       netcp->ndev->stats.rx_errors++;
+                       rx_stats->rx_errors++;
                        continue;
                }
                netcp_free_rx_desc_chain(netcp, desc);
-               netcp->ndev->stats.rx_dropped++;
+               rx_stats->rx_dropped++;
        }
 }
 
 static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
 {
+       struct netcp_stats *rx_stats = &netcp->stats;
        unsigned int dma_sz, buf_len, org_buf_len;
        struct knav_dma_desc *desc, *ndesc;
        unsigned int pkt_sz = 0, accum_sz;
@@ -653,6 +662,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
        struct netcp_packet p_info;
        struct sk_buff *skb;
        void *org_buf_ptr;
+       u32 tmp;
 
        dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
        if (!dma_desc)
@@ -724,21 +734,27 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
                knav_pool_desc_put(netcp->rx_pool, ndesc);
        }
 
-       /* Free the primary descriptor */
-       knav_pool_desc_put(netcp->rx_pool, desc);
-
        /* check for packet len and warn */
        if (unlikely(pkt_sz != accum_sz))
                dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
                        pkt_sz, accum_sz);
 
-       /* Remove ethernet FCS from the packet */
-       __pskb_trim(skb, skb->len - ETH_FCS_LEN);
+       /* Newer version of the Ethernet switch can trim the Ethernet FCS
+        * from the packet and is indicated in hw_cap. So trim it only for
+        * older h/w
+        */
+       if (!(netcp->hw_cap & ETH_SW_CAN_REMOVE_ETH_FCS))
+               __pskb_trim(skb, skb->len - ETH_FCS_LEN);
 
        /* Call each of the RX hooks */
        p_info.skb = skb;
        skb->dev = netcp->ndev;
        p_info.rxtstamp_complete = false;
+       get_desc_info(&tmp, &p_info.eflags, desc);
+       p_info.epib = desc->epib;
+       p_info.psdata = (u32 __force *)desc->psdata;
+       p_info.eflags = ((p_info.eflags >> KNAV_DMA_DESC_EFLAGS_SHIFT) &
+                        KNAV_DMA_DESC_EFLAGS_MASK);
        list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
                int ret;
 
@@ -747,14 +763,20 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
                if (unlikely(ret)) {
                        dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n",
                                rx_hook->order, ret);
-                       netcp->ndev->stats.rx_errors++;
+                       /* Free the primary descriptor */
+                       rx_stats->rx_dropped++;
+                       knav_pool_desc_put(netcp->rx_pool, desc);
                        dev_kfree_skb(skb);
                        return 0;
                }
        }
+       /* Free the primary descriptor */
+       knav_pool_desc_put(netcp->rx_pool, desc);
 
-       netcp->ndev->stats.rx_packets++;
-       netcp->ndev->stats.rx_bytes += skb->len;
+       u64_stats_update_begin(&rx_stats->syncp_rx);
+       rx_stats->rx_packets++;
+       rx_stats->rx_bytes += skb->len;
+       u64_stats_update_end(&rx_stats->syncp_rx);
 
        /* push skb up the stack */
        skb->protocol = eth_type_trans(skb, netcp->ndev);
@@ -763,7 +785,7 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
 
 free_desc:
        netcp_free_rx_desc_chain(netcp, desc);
-       netcp->ndev->stats.rx_errors++;
+       rx_stats->rx_errors++;
        return 0;
 }
 
@@ -947,7 +969,7 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget)
 
        netcp_rxpool_refill(netcp);
        if (packets < budget) {
-               napi_complete(&netcp->rx_napi);
+               napi_complete_done(&netcp->rx_napi, packets);
                knav_queue_enable_notify(netcp->rx_queue);
        }
 
@@ -994,6 +1016,7 @@ static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
 static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
                                          unsigned int budget)
 {
+       struct netcp_stats *tx_stats = &netcp->stats;
        struct knav_dma_desc *desc;
        struct netcp_tx_cb *tx_cb;
        struct sk_buff *skb;
@@ -1008,7 +1031,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
                desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz);
                if (unlikely(!desc)) {
                        dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
-                       netcp->ndev->stats.tx_errors++;
+                       tx_stats->tx_errors++;
                        continue;
                }
 
@@ -1019,7 +1042,7 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
                netcp_free_tx_desc_chain(netcp, desc, dma_sz);
                if (!skb) {
                        dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
-                       netcp->ndev->stats.tx_errors++;
+                       tx_stats->tx_errors++;
                        continue;
                }
 
@@ -1036,8 +1059,10 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
                        netif_wake_subqueue(netcp->ndev, subqueue);
                }
 
-               netcp->ndev->stats.tx_packets++;
-               netcp->ndev->stats.tx_bytes += skb->len;
+               u64_stats_update_begin(&tx_stats->syncp_tx);
+               tx_stats->tx_packets++;
+               tx_stats->tx_bytes += skb->len;
+               u64_stats_update_end(&tx_stats->syncp_tx);
                dev_kfree_skb(skb);
                pkts++;
        }
@@ -1212,9 +1237,9 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
                /* psdata points to both native-endian and device-endian data */
                __le32 *psdata = (void __force *)p_info.psdata;
 
-               memmove(p_info.psdata, p_info.psdata + p_info.psdata_len,
-                       p_info.psdata_len);
-               set_words(p_info.psdata, p_info.psdata_len, psdata);
+               set_words((u32 *)psdata +
+                         (KNAV_DMA_NUM_PS_WORDS - p_info.psdata_len),
+                         p_info.psdata_len, psdata);
                tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
                        KNAV_DMA_DESC_PSLEN_SHIFT;
        }
@@ -1258,6 +1283,7 @@ out:
 static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
        struct netcp_intf *netcp = netdev_priv(ndev);
+       struct netcp_stats *tx_stats = &netcp->stats;
        int subqueue = skb_get_queue_mapping(skb);
        struct knav_dma_desc *desc;
        int desc_count, ret = 0;
@@ -1273,7 +1299,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                        /* If we get here, the skb has already been dropped */
                        dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
                                 ret);
-                       ndev->stats.tx_dropped++;
+                       tx_stats->tx_dropped++;
                        return ret;
                }
                skb->len = NETCP_MIN_PACKET_SIZE;
@@ -1290,8 +1316,6 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        if (ret)
                goto drop;
 
-       netif_trans_update(ndev);
-
        /* Check Tx pool count & stop subqueue if needed */
        desc_count = knav_pool_count(netcp->tx_pool);
        if (desc_count < netcp->tx_pause_threshold) {
@@ -1301,7 +1325,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        return NETDEV_TX_OK;
 
 drop:
-       ndev->stats.tx_dropped++;
+       tx_stats->tx_dropped++;
        if (desc)
                netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
        dev_kfree_skb(skb);
@@ -1883,12 +1907,44 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
        return 0;
 }
 
+static void
+netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
+{
+       struct netcp_intf *netcp = netdev_priv(ndev);
+       struct netcp_stats *p = &netcp->stats;
+       u64 rxpackets, rxbytes, txpackets, txbytes;
+       unsigned int start;
+
+       do {
+               start = u64_stats_fetch_begin_irq(&p->syncp_rx);
+               rxpackets       = p->rx_packets;
+               rxbytes         = p->rx_bytes;
+       } while (u64_stats_fetch_retry_irq(&p->syncp_rx, start));
+
+       do {
+               start = u64_stats_fetch_begin_irq(&p->syncp_tx);
+               txpackets       = p->tx_packets;
+               txbytes         = p->tx_bytes;
+       } while (u64_stats_fetch_retry_irq(&p->syncp_tx, start));
+
+       stats->rx_packets = rxpackets;
+       stats->rx_bytes = rxbytes;
+       stats->tx_packets = txpackets;
+       stats->tx_bytes = txbytes;
+
+       /* The following are stored as 32 bit */
+       stats->rx_errors = p->rx_errors;
+       stats->rx_dropped = p->rx_dropped;
+       stats->tx_dropped = p->tx_dropped;
+}
+
 static const struct net_device_ops netcp_netdev_ops = {
        .ndo_open               = netcp_ndo_open,
        .ndo_stop               = netcp_ndo_stop,
        .ndo_start_xmit         = netcp_ndo_start_xmit,
        .ndo_set_rx_mode        = netcp_set_rx_mode,
        .ndo_do_ioctl           = netcp_ndo_ioctl,
+       .ndo_get_stats64        = netcp_get_stats,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_vlan_rx_add_vid    = netcp_rx_add_vid,
@@ -1935,6 +1991,8 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
        INIT_LIST_HEAD(&netcp->txhook_list_head);
        INIT_LIST_HEAD(&netcp->rxhook_list_head);
        INIT_LIST_HEAD(&netcp->addr_list);
+       u64_stats_init(&netcp->stats.syncp_rx);
+       u64_stats_init(&netcp->stats.syncp_tx);
        netcp->netcp_device = netcp_device;
        netcp->dev = netcp_device->device;
        netcp->ndev = ndev;
index 7d9e36f66735cfd7da35f5df0f17c4d003dc4e0c..eece3e2eec14199809b1b55219835662bad1d012 100644 (file)
@@ -81,7 +81,6 @@
 #define GBENU_CPTS_OFFSET              0x1d000
 #define GBENU_ALE_OFFSET               0x1e000
 #define GBENU_HOST_PORT_NUM            0
-#define GBENU_NUM_ALE_ENTRIES          1024
 #define GBENU_SGMII_MODULE_SIZE                0x100
 
 /* 10G Ethernet SS defines */
 #define XGBE10_ALE_OFFSET              0x700
 #define XGBE10_HW_STATS_OFFSET         0x800
 #define XGBE10_HOST_PORT_NUM           0
-#define XGBE10_NUM_ALE_ENTRIES         1024
+#define XGBE10_NUM_ALE_ENTRIES         2048
 
 #define        GBE_TIMER_INTERVAL                      (HZ / 2)
 
 #define MACSL_FULLDUPLEX                       BIT(0)
 
 #define GBE_CTL_P0_ENABLE                      BIT(2)
+#define ETH_SW_CTL_P0_TX_CRC_REMOVE            BIT(13)
 #define GBE13_REG_VAL_STAT_ENABLE_ALL          0xff
 #define XGBE_REG_VAL_STAT_ENABLE_ALL           0xf
 #define GBE_STATS_CD_SEL                       BIT(28)
@@ -2313,7 +2313,6 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
                dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
                        phydev_name(slave->phy));
                phy_start(slave->phy);
-               phy_read_status(slave->phy);
        }
        return 0;
 }
@@ -2821,7 +2820,7 @@ static int gbe_open(void *intf_priv, struct net_device *ndev)
        struct netcp_intf *netcp = netdev_priv(ndev);
        struct gbe_slave *slave = gbe_intf->slave;
        int port_num = slave->port_num;
-       u32 reg;
+       u32 reg, val;
        int ret;
 
        reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
@@ -2851,7 +2850,12 @@ static int gbe_open(void *intf_priv, struct net_device *ndev)
        writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
 
        /* Control register */
-       writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
+       val = GBE_CTL_P0_ENABLE;
+       if (IS_SS_ID_MU(gbe_dev)) {
+               val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
+               netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
+       }
+       writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
 
        /* All statistics enabled and STAT AB visible by default */
        writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
@@ -2930,7 +2934,9 @@ static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
        }
 
        slave->open = false;
-       slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
+       if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
+           (slave->link_interface == XGMII_LINK_MAC_PHY))
+               slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
        slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
 
        if (slave->link_interface >= XGMII_LINK_MAC_PHY)
@@ -3112,7 +3118,6 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev,
                        dev_dbg(dev, "phy found: id is: 0x%s\n",
                                phydev_name(slave->phy));
                        phy_start(slave->phy);
-                       phy_read_status(slave->phy);
                }
        }
 }
@@ -3433,7 +3438,6 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
        gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
        gbe_dev->ale_ports = gbe_dev->max_num_ports;
        gbe_dev->host_port = GBENU_HOST_PORT_NUM;
-       gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
        gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
 
        /* Subsystem registers */
@@ -3601,7 +3605,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
        ale_params.ale_ageout   = GBE_DEFAULT_ALE_AGEOUT;
        ale_params.ale_entries  = gbe_dev->ale_entries;
        ale_params.ale_ports    = gbe_dev->ale_ports;
-
+       if (IS_SS_ID_MU(gbe_dev)) {
+               ale_params.major_ver_mask = 0x7;
+               ale_params.nu_switch_ale = true;
+       }
        gbe_dev->ale = cpsw_ale_create(&ale_params);
        if (!gbe_dev->ale) {
                dev_err(gbe_dev->dev, "error initializing ale engine\n");
index 2255f9a6f3bc28ee0d712b464137ecc7784e1c2f..7c634bc75615870e33c50ad0c514c9339333097f 100644 (file)
@@ -681,7 +681,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
        }
 
        /* There are no packets left. */
-       napi_complete(&info_mpipe->napi);
+       napi_complete_done(&info_mpipe->napi, work);
 
        md = &mpipe_data[instance];
        /* Re-enable hypervisor interrupts. */
index 0a3b7dafa3ba4e14e12b977030f1e93657d622db..49ccee4b9aeccc3b6cd165b461d448f0aba7a6bd 100644 (file)
@@ -842,7 +842,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
                }
        }
 
-       napi_complete(&info->napi);
+       napi_complete_done(&info->napi, work);
 
        if (!priv->active)
                goto done;
@@ -2047,8 +2047,8 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  *
  * Returns the address of the device statistics structure.
  */
-static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
-               struct rtnl_link_stats64 *stats)
+static void tile_net_get_stats64(struct net_device *dev,
+                                struct rtnl_link_stats64 *stats)
 {
        struct tile_net_priv *priv = netdev_priv(dev);
        u64 rx_packets = 0, tx_packets = 0;
@@ -2090,12 +2090,8 @@ static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
        stats->tx_bytes   = tx_bytes;
        stats->rx_errors  = rx_errors;
        stats->rx_dropped = rx_dropped;
-
-       return stats;
 }
 
-
-
 /*
  * Change the Ethernet Address of the NIC.
  *
index 345316c749e761a168d4364362770f90441e5242..72013314bba81fbbbb0f7d37da6143a9b0754b0c 100644 (file)
@@ -1109,7 +1109,7 @@ static int gelic_net_poll(struct napi_struct *napi, int budget)
        }
 
        if (packets_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, packets_done);
                gelic_card_rx_irq_on(card);
        }
        return packets_done;
index cb341dfe65ad562d78ad9ac4ad0bade83e2bffb2..cec9e70ab9955d4c3f7cef2219aa56593ac39fd1 100644 (file)
@@ -1270,7 +1270,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
        /* if all packets are in the stack, enable interrupts and return 0 */
        /* if not, return 1 */
        if (packets_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, packets_done);
                spider_net_rx_irq_on(card);
                card->ignore_rx_ramfull = 0;
        }
index 3be61ed28741ae7d763322c7d28c81cc69f2eeb5..a45f98fa4aa70a6ce0c693bef2fda248754a313b 100644 (file)
@@ -1638,7 +1638,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
        spin_unlock(&lp->rx_lock);
 
        if (received < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, received);
                /* enable interrupts */
                tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
        }
index f153ad729ce5b20cefe8c98ffb6f82761062d935..c5583991da4aa462652b5236992c7731529422db 100644 (file)
@@ -887,7 +887,7 @@ static int tsi108_poll(struct napi_struct *napi, int budget)
 
        if (num_received < budget) {
                data->rxpending = 0;
-               napi_complete(napi);
+               napi_complete_done(napi, num_received);
 
                TSI_WRITE(TSI108_EC_INTMASK,
                                     TSI_READ(TSI108_EC_INTMASK)
index 0a6c4e804eeda9dbad5911626b3157d70da95f1b..c068c58428f7611ddcd526010cb07f8ada61b760 100644 (file)
@@ -513,8 +513,8 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
 static void rhine_tx(struct net_device *dev);
 static int rhine_rx(struct net_device *dev, int limit);
 static void rhine_set_rx_mode(struct net_device *dev);
-static struct rtnl_link_stats64 *rhine_get_stats64(struct net_device *dev,
-              struct rtnl_link_stats64 *stats);
+static void rhine_get_stats64(struct net_device *dev,
+                             struct rtnl_link_stats64 *stats);
 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static const struct ethtool_ops netdev_ethtool_ops;
 static int  rhine_close(struct net_device *dev);
@@ -861,7 +861,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
        }
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                iowrite16(enable_mask, ioaddr + IntrEnable);
                mmiowb();
        }
@@ -2221,7 +2221,7 @@ out_unlock:
        mutex_unlock(&rp->task_lock);
 }
 
-static struct rtnl_link_stats64 *
+static void
 rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct rhine_private *rp = netdev_priv(dev);
@@ -2244,8 +2244,6 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
                stats->tx_packets = rp->tx_stats.packets;
                stats->tx_bytes = rp->tx_stats.bytes;
        } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
-
-       return stats;
 }
 
 static void rhine_set_rx_mode(struct net_device *dev)
index 4716e60e2ccbbc7c9aae9c168638a70c07ed2c1e..d088788b27a751286f7556b7f478b210c0ab68a5 100644 (file)
@@ -2160,7 +2160,7 @@ static int velocity_poll(struct napi_struct *napi, int budget)
        velocity_tx_srv(vptr);
        /* If budget not fully consumed, exit the polling mode */
        if (rx_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rx_done);
                mac_enable_int(vptr->mac_regs);
        }
        spin_unlock_irqrestore(&vptr->lock, flags);
index e1296ef2cf66183d7c8596184c69f122f7d54331..f90267f0519feebb7c9d944535cb81f53c6bb9fe 100644 (file)
@@ -915,7 +915,7 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget)
        }
 
        if (rx_count < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rx_count);
                w5100_enable_intr(priv);
        }
 
index 724fabd38a23edd6473cdf7ab712c0c54e4a0e7e..56ae573001e8e76c8cbd5300ad4fca4873cbc2ae 100644 (file)
@@ -417,7 +417,7 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget)
        }
 
        if (rx_count < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rx_count);
                w5300_write(priv, W5300_IMR, IR_S0);
                mmiowb();
        }
index aa02a03a6d8db22996941cd53d4d12968a4e0ee2..69e31ceccfae4d90a56f6e49312112ec372d1d48 100644 (file)
@@ -1040,20 +1040,6 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
        return 0;
 }
 
-/**
- * xemaclite_remove_ndev - Free the network device
- * @ndev:      Pointer to the network device to be freed
- *
- * This function un maps the IO region of the Emaclite device and frees the net
- * device.
- */
-static void xemaclite_remove_ndev(struct net_device *ndev)
-{
-       if (ndev) {
-               free_netdev(ndev);
-       }
-}
-
 /**
  * get_bool - Get a parameter from the OF device
  * @ofdev:     Pointer to OF device structure
@@ -1077,7 +1063,7 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
        }
 }
 
-static struct net_device_ops xemaclite_netdev_ops;
+static const struct net_device_ops xemaclite_netdev_ops;
 
 /**
  * xemaclite_of_probe - Probe method for the Emaclite device.
@@ -1184,7 +1170,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
        return 0;
 
 error:
-       xemaclite_remove_ndev(ndev);
+       free_netdev(ndev);
        return rc;
 }
 
@@ -1216,7 +1202,7 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
        of_node_put(lp->phy_node);
        lp->phy_node = NULL;
 
-       xemaclite_remove_ndev(ndev);
+       free_netdev(ndev);
 
        return 0;
 }
@@ -1231,7 +1217,7 @@ xemaclite_poll_controller(struct net_device *ndev)
 }
 #endif
 
-static struct net_device_ops xemaclite_netdev_ops = {
+static const struct net_device_ops xemaclite_netdev_ops = {
        .ndo_open               = xemaclite_open,
        .ndo_stop               = xemaclite_close,
        .ndo_start_xmit         = xemaclite_send,
index e395ace3120b1e53ac36079ea9de015aed8f631b..648ff9fdb90959f8960bc7d6f9983abc6daa3058 100644 (file)
@@ -52,7 +52,6 @@ static const char ID_sccs[] = "@(#)cfm.c      2.18 98/10/06 (C) SK " ;
 #define ACTIONS_DONE() (smc->mib.fddiSMTCF_State &= ~AFLAG)
 #define ACTIONS(x)     (x|AFLAG)
 
-#ifdef DEBUG
 /*
  * symbolic state names
  */
@@ -68,7 +67,6 @@ static const char * const cfm_states[] = {
 static const char * const cfm_events[] = {
        "NONE","CF_LOOP_A","CF_LOOP_B","CF_JOIN_A","CF_JOIN_B"
 } ;
-#endif
 
 /*
  * map from state to downstream port type
@@ -230,10 +228,10 @@ void cfm(struct s_smc *smc, int event)
 
        oldstate = smc->mib.fddiSMTCF_State ;
        do {
-               DB_CFM("CFM : state %s%s",
-                       (smc->mib.fddiSMTCF_State & AFLAG) ? "ACTIONS " : "",
-                       cfm_states[smc->mib.fddiSMTCF_State & ~AFLAG]) ;
-               DB_CFM(" event %s\n",cfm_events[event],0) ;
+               DB_CFM("CFM : state %s%s event %s",
+                      smc->mib.fddiSMTCF_State & AFLAG ? "ACTIONS " : "",
+                      cfm_states[smc->mib.fddiSMTCF_State & ~AFLAG],
+                      cfm_events[event]);
                state = smc->mib.fddiSMTCF_State ;
                cfm_fsm(smc,event) ;
                event = 0 ;
@@ -297,7 +295,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd)
                queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
                /* Don't do the WC-Flag changing here */
                ACTIONS_DONE() ;
-               DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+               DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
                break;
        case SC0_ISOLATED :
                /*SC07*/
@@ -338,7 +336,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd)
                        queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
                }
                ACTIONS_DONE() ;
-               DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+               DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
                break ;
        case SC9_C_WRAP_A :
                /*SC10*/
@@ -403,7 +401,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd)
                        queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
                }
                ACTIONS_DONE() ;
-               DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+               DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
                break ;
        case SC10_C_WRAP_B :
                /*SC20*/
@@ -448,7 +446,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd)
                smc->r.rm_join = TRUE ;
                queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
                ACTIONS_DONE() ;
-               DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+               DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
                break ;
        case SC4_THRU_A :
                /*SC41*/
@@ -481,7 +479,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd)
                smc->r.rm_join = TRUE ;
                queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
                ACTIONS_DONE() ;
-               DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+               DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
                break ;
        case SC5_THRU_B :
                /*SC51*/
@@ -519,7 +517,7 @@ static void cfm_fsm(struct s_smc *smc, int cmd)
                        queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */
                }
                ACTIONS_DONE() ;
-               DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ;
+               DB_CFMN(1, "CFM : %s", cfm_states[smc->mib.fddiSMTCF_State]);
                break ;
        case SC11_C_WRAP_S :
                /*SC70*/
index 07da97c303d6893858ad661afcbdb4434c897787..fed3a92d3df49e79ec45eef3a4a6835edb8891b0 100644 (file)
@@ -343,8 +343,8 @@ void init_board(struct s_smc *smc, u_char *mac_addr)
  */
 void sm_pm_bypass_req(struct s_smc *smc, int mode)
 {
-       DB_ECMN(1,"ECM : sm_pm_bypass_req(%s)\n",(mode == BP_INSERT) ?
-                                       "BP_INSERT" : "BP_DEINSERT",0) ;
+       DB_ECMN(1, "ECM : sm_pm_bypass_req(%s)",
+               mode == BP_INSERT ? "BP_INSERT" : "BP_DEINSERT");
 
        if (smc->s.sas != SMT_DAS)
                return ;
index 47d922cb3c08cada1c76e2422326ef9b2cbddf15..eee9ba91346ac43d29fb74da020b8754e52aba8c 100644 (file)
@@ -66,7 +66,6 @@ static const char ID_sccs[] = "@(#)ecm.c      2.7 99/08/05 (C) SK " ;
 #define EC6_CHECK      6                       /* checking bypass */
 #define EC7_DEINSERT   7                       /* bypass being turnde off */
 
-#ifdef DEBUG
 /*
  * symbolic state names
  */
@@ -83,7 +82,6 @@ static const char * const ecm_events[] = {
        "EC_TIMEOUT_TD","EC_TIMEOUT_TMAX",
        "EC_TIMEOUT_IMAX","EC_TIMEOUT_INMAX","EC_TEST_DONE"
 } ;
-#endif
 
 /*
  * all Globals  are defined in smc.h
@@ -126,10 +124,10 @@ void ecm(struct s_smc *smc, int event)
        int     state ;
 
        do {
-               DB_ECM("ECM : state %s%s",
-                       (smc->mib.fddiSMTECMState & AFLAG) ? "ACTIONS " : "",
-                       ecm_states[smc->mib.fddiSMTECMState & ~AFLAG]) ;
-               DB_ECM(" event %s\n",ecm_events[event],0) ;
+               DB_ECM("ECM : state %s%s event %s",
+                      smc->mib.fddiSMTECMState & AFLAG ? "ACTIONS " : "",
+                      ecm_states[smc->mib.fddiSMTECMState & ~AFLAG],
+                      ecm_events[event]);
                state = smc->mib.fddiSMTECMState ;
                ecm_fsm(smc,event) ;
                event = 0 ;
@@ -379,7 +377,7 @@ static void ecm_fsm(struct s_smc *smc, int cmd)
                         (((ls_a == PC_ILS) && (ls_b == PC_QLS)) ||
                          ((ls_a == PC_QLS) && (ls_b == PC_ILS)))){
                        smc->e.sb_flag = TRUE ;
-                       DB_ECMN(1,"ECM : EC6_CHECK - stuck bypass\n",0,0) ;
+                       DB_ECMN(1, "ECM : EC6_CHECK - stuck bypass");
                        AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
                                FDDI_SMT_ERROR, (u_long) FDDI_BYPASS_STUCK,
                                smt_get_error_word(smc));
@@ -443,29 +441,29 @@ static void prop_actions(struct s_smc *smc)
                return ;
        }
 
-       DB_ECM("ECM : prop_actions - trace_prop %d\n", smc->e.trace_prop,0) ;
-       DB_ECM("ECM : prop_actions - in %d out %d\n", port_in,port_out) ;
+       DB_ECM("ECM : prop_actions - trace_prop %lu", smc->e.trace_prop);
+       DB_ECM("ECM : prop_actions - in %d out %d", port_in, port_out);
 
        if (smc->e.trace_prop & ENTITY_BIT(ENTITY_MAC)) {
                /* trace initiatior */
-               DB_ECM("ECM : initiate TRACE on PHY %c\n",'A'+port_in-PA,0) ;
+               DB_ECM("ECM : initiate TRACE on PHY %c", 'A' + port_in - PA);
                queue_event(smc,EVENT_PCM+port_in,PC_TRACE) ;
        }
        else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PA))) &&
                port_out != PA) {
                /* trace propagate upstream */
-               DB_ECM("ECM : propagate TRACE on PHY B\n",0,0) ;
+               DB_ECM("ECM : propagate TRACE on PHY B");
                queue_event(smc,EVENT_PCMB,PC_TRACE) ;
        }
        else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PB))) &&
                port_out != PB) {
                /* trace propagate upstream */
-               DB_ECM("ECM : propagate TRACE on PHY A\n",0,0) ;
+               DB_ECM("ECM : propagate TRACE on PHY A");
                queue_event(smc,EVENT_PCMA,PC_TRACE) ;
        }
        else {
                /* signal trace termination */
-               DB_ECM("ECM : TRACE terminated\n",0,0) ;
+               DB_ECM("ECM : TRACE terminated");
                smc->e.path_test = PT_PENDING ;
        }
        smc->e.trace_prop = 0 ;
@@ -482,13 +480,13 @@ static void prop_actions(struct s_smc *smc)
 
        RS_SET(smc,RS_EVENT) ;
        while (smc->e.trace_prop) {
-               DB_ECM("ECM : prop_actions - trace_prop %d\n",
-                       smc->e.trace_prop,0) ;
+               DB_ECM("ECM : prop_actions - trace_prop %d",
+                      smc->e.trace_prop);
 
                if (smc->e.trace_prop & ENTITY_BIT(ENTITY_MAC)) {
                        initiator = ENTITY_MAC ;
                        smc->e.trace_prop &= ~ENTITY_BIT(ENTITY_MAC) ;
-                       DB_ECM("ECM: MAC initiates trace\n",0,0) ;
+                       DB_ECM("ECM: MAC initiates trace");
                }
                else {
                        for (p = NUMPHYS-1 ; p >= 0 ; p--) {
@@ -503,12 +501,12 @@ static void prop_actions(struct s_smc *smc)
 
                if (upstream == ENTITY_MAC) {
                        /* signal trace termination */
-                       DB_ECM("ECM : TRACE terminated\n",0,0) ;
+                       DB_ECM("ECM : TRACE terminated");
                        smc->e.path_test = PT_PENDING ;
                }
                else {
                        /* trace propagate upstream */
-                       DB_ECM("ECM : propagate TRACE on PHY %d\n",upstream,0) ;
+                       DB_ECM("ECM : propagate TRACE on PHY %d", upstream);
                        queue_event(smc,EVENT_PCM+upstream,PC_TRACE) ;
                }
        }
index 2fc5987b41dc2ec072cc92b41ec4026cc887d2b1..325e2c525e35346c7eb81166f22c7b32f963bb03 100644 (file)
@@ -134,7 +134,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
         * get the resource type
         */
        if (!(p = (void *) sm_to_para(smc,sm,SMT_P0015))) {
-               DB_ESS("ESS: RAF frame error, parameter type not found\n",0,0) ;
+               DB_ESS("ESS: RAF frame error, parameter type not found");
                return fs;
        }
        msg_res_type = ((struct smt_p_0015 *)p)->res_type ;
@@ -146,16 +146,16 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
                /*
                 * error in frame: para ESS command was not found
                 */
-                DB_ESS("ESS: RAF frame error, parameter command not found\n",0,0);
+                DB_ESS("ESS: RAF frame error, parameter command not found");
                 return fs;
        }
 
-       DB_ESSN(2,"fc %x        ft %x\n",sm->smt_class,sm->smt_type) ;
-       DB_ESSN(2,"ver %x       tran %lx\n",sm->smt_version,sm->smt_tid) ;
-       DB_ESSN(2,"stn_id %s\n",addr_to_string(&sm->smt_source),0) ;
+       DB_ESSN(2, "fc %x       ft %x", sm->smt_class, sm->smt_type);
+       DB_ESSN(2, "ver %x      tran %x", sm->smt_version, sm->smt_tid);
+       DB_ESSN(2, "stn_id %s", addr_to_string(&sm->smt_source));
 
-       DB_ESSN(2,"infolen %x   res %x\n",sm->smt_len, msg_res_type) ;
-       DB_ESSN(2,"sbacmd %x\n",cmd->sba_cmd,0) ;
+       DB_ESSN(2, "infolen %x  res %lx", sm->smt_len, msg_res_type);
+       DB_ESSN(2, "sbacmd %x", cmd->sba_cmd);
 
        /*
         * evaluate the ESS command
@@ -189,7 +189,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
                         *       The ESS do not send the Frame to the network!
                         */
                        smc->ess.alloc_trans_id = sm->smt_tid ;
-                       DB_ESS("ESS: save Alloc Req Trans ID %lx\n",sm->smt_tid,0);
+                       DB_ESS("ESS: save Alloc Req Trans ID %x", sm->smt_tid);
                        p = (void *) sm_to_para(smc,sm,SMT_P320F) ;
                        ((struct smt_p_320f *)p)->mib_payload =
                                smc->mib.a[PATH0].fddiPATHSbaPayload ;
@@ -220,7 +220,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
                 * check the parameters
                 */
                if (smt_check_para(smc,sm,plist_raf_alc_res)) {
-                       DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ;
+                       DB_ESS("ESS: RAF with para problem, ignoring");
                        return fs;
                }
 
@@ -241,7 +241,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
                        != SMT_RDF_SUCCESS) ||
                        (sm->smt_tid != smc->ess.alloc_trans_id)) {
 
-                       DB_ESS("ESS: Allocation Response not accepted\n",0,0) ;
+                       DB_ESS("ESS: Allocation Response not accepted");
                        return fs;
                }
 
@@ -261,7 +261,8 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
                 }       
                overhead = ((struct smt_p_3210 *)p)->mib_overhead ;
 
-               DB_ESSN(2,"payload= %lx overhead= %lx\n",payload,overhead) ;
+               DB_ESSN(2, "payload= %lx        overhead= %lx",
+                       payload, overhead);
 
                /*
                 * process the bandwidth allocation
@@ -279,7 +280,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
                 * except only replies
                 */
                if (sm->smt_type != SMT_REQUEST) {
-                       DB_ESS("ESS: Do not process Change Responses\n",0,0) ;
+                       DB_ESS("ESS: Do not process Change Responses");
                        return fs;
                }
 
@@ -287,7 +288,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
                 * check the para for the Change Request
                 */
                if (smt_check_para(smc,sm,plist_raf_chg_req)) {
-                       DB_ESS("ESS: RAF with para problem, ignoring\n",0,0) ;
+                       DB_ESS("ESS: RAF with para problem, ignoring");
                        return fs;
                }
 
@@ -299,7 +300,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
                 */
                if ((((struct smt_p_320b *)sm_to_para(smc,sm,SMT_P320B))->path_index
                        != PRIMARY_RING) || (msg_res_type != SYNC_BW)) {
-                       DB_ESS("ESS: RAF frame with para problem, ignoring\n",0,0) ;
+                       DB_ESS("ESS: RAF frame with para problem, ignoring");
                        return fs;
                }
 
@@ -311,9 +312,10 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
                p = (void *) sm_to_para(smc,sm,SMT_P3210) ;
                overhead = ((struct smt_p_3210 *)p)->mib_overhead ;
 
-               DB_ESSN(2,"ESS: Change Request from %s\n",
-                       addr_to_string(&sm->smt_source),0) ;
-               DB_ESSN(2,"payload= %lx overhead= %lx\n",payload,overhead) ;
+               DB_ESSN(2, "ESS: Change Request from %s",
+                       addr_to_string(&sm->smt_source));
+               DB_ESSN(2, "payload= %lx        overhead= %lx",
+                       payload, overhead);
 
                /*
                 * process the bandwidth allocation
@@ -337,18 +339,18 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
                 * except only requests
                 */
                if (sm->smt_type != SMT_REQUEST) {
-                       DB_ESS("ESS: Do not process a Report Reply\n",0,0) ;
+                       DB_ESS("ESS: Do not process a Report Reply");
                        return fs;
                }
 
-               DB_ESSN(2,"ESS: Report Request from %s\n",
-                       addr_to_string(&(sm->smt_source)),0) ;
+               DB_ESSN(2, "ESS: Report Request from %s",
+                       addr_to_string(&sm->smt_source));
 
                /*
                 * verify that the resource type is sync bw only
                 */
                if (msg_res_type != SYNC_BW) {
-                       DB_ESS("ESS: ignoring RAF with para problem\n",0,0) ;
+                       DB_ESS("ESS: ignoring RAF with para problem");
                        return fs;
                }
 
@@ -364,7 +366,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
                /*
                 * error in frame
                 */
-               DB_ESS("ESS: ignoring RAF with bad sba_cmd\n",0,0) ;
+               DB_ESS("ESS: ignoring RAF with bad sba_cmd");
                break ;
        }
 
@@ -417,17 +419,17 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
         * set the mib attributes fddiPATHSbaOverhead, fddiPATHSbaPayload
         */
 /*     if (smt_set_obj(smc,SMT_P320F,payload,S_SET)) {
-               DB_ESS("ESS: SMT does not accept the payload value\n",0,0) ;
+               DB_ESS("ESS: SMT does not accept the payload value");
                return FALSE;
        }
        if (smt_set_obj(smc,SMT_P3210,overhead,S_SET)) {
-               DB_ESS("ESS: SMT does not accept the overhead value\n",0,0) ;
+               DB_ESS("ESS: SMT does not accept the overhead value");
                return FALSE;
        } */
 
        /* premliminary */
        if (payload > MAX_PAYLOAD || overhead > 5000) {
-               DB_ESS("ESS: payload / overhead not accepted\n",0,0) ;
+               DB_ESS("ESS: payload / overhead not accepted");
                return FALSE;
        }
 
@@ -446,7 +448,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
         * evulate the Payload
         */
        if (payload) {
-               DB_ESSN(2,"ESS: turn SMT_ST_SYNC_SERVICE bit on\n",0,0) ;
+               DB_ESSN(2, "ESS: turn SMT_ST_SYNC_SERVICE bit on");
                smc->ess.sync_bw_available = TRUE ;
 
                smc->ess.sync_bw = overhead -
@@ -454,7 +456,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
                        payload / 1562 ;
        }
        else {
-               DB_ESSN(2,"ESS: turn SMT_ST_SYNC_SERVICE bit off\n",0,0) ;
+               DB_ESSN(2, "ESS: turn SMT_ST_SYNC_SERVICE bit off");
                smc->ess.sync_bw_available = FALSE ;
                smc->ess.sync_bw = 0 ;
                overhead = 0 ;
@@ -464,7 +466,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
        smc->mib.a[PATH0].fddiPATHSbaOverhead = overhead ;
 
 
-       DB_ESSN(2,"tsync = %lx\n",smc->ess.sync_bw,0) ;
+       DB_ESSN(2, "tsync = %lx", smc->ess.sync_bw);
 
        ess_config_fifo(smc) ;
        set_formac_tsync(smc,smc->ess.sync_bw) ;
@@ -541,7 +543,7 @@ void ess_timer_poll(struct s_smc *smc)
        if (!smc->ess.raf_act_timer_poll)
                return ;
 
-       DB_ESSN(2,"ESS: timer_poll\n",0,0) ;
+       DB_ESSN(2, "ESS: timer_poll");
 
        smc->ess.timer_count++ ;
        if (smc->ess.timer_count == 10) {
@@ -667,11 +669,11 @@ static void ess_send_frame(struct s_smc *smc, SMbuf *mb)
                /*
                 * Send the Change Reply to the local SBA
                 */
-               DB_ESS("ESS:Send to the local SBA\n",0,0) ;
+               DB_ESS("ESS:Send to the local SBA");
                if (!smc->ess.sba_reply_pend)
                        smc->ess.sba_reply_pend = mb ;
                else {
-                       DB_ESS("Frame is lost - another frame was pending\n",0,0);
+                       DB_ESS("Frame is lost - another frame was pending");
                        smt_free_mbuf(smc,mb) ;
                }
        }
@@ -679,7 +681,7 @@ static void ess_send_frame(struct s_smc *smc, SMbuf *mb)
                /*
                 * Send the SBA RAF Change Reply to the network
                 */
-               DB_ESS("ESS:Send to the network\n",0,0) ;
+               DB_ESS("ESS:Send to the network");
                smt_send_frame(smc,mb,FC_SMT_INFO,0) ;
        }
 }
index 7d3779ae73771e0e9b908a09aa5a7ebe080bb4b4..24aed28b982cee740740055aeb48d096938621b7 100644 (file)
@@ -726,7 +726,7 @@ void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l)
        if (code_s2u & FM_SMYBEC)
                queue_event(smc,EVENT_RMT,RM_MY_BEACON) ;
        if (change_s2u & code_s2u & FM_SLOCLM) {
-               DB_RMTN(2,"RMT : lower claim received\n",0,0) ;
+               DB_RMTN(2, "RMT : lower claim received");
        }
        if ((code_s2u & FM_SMYCLM) && !(code_s2l & FM_SDUPCLM)) {
                /*
@@ -746,7 +746,7 @@ void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l)
                queue_event(smc,EVENT_RMT,RM_VALID_CLAIM) ;
        }
        if (change_s2u & code_s2u & FM_SHICLM) {
-               DB_RMTN(2,"RMT : higher claim received\n",0,0) ;
+               DB_RMTN(2, "RMT : higher claim received");
        }
        if ( (code_s2l & FM_STRTEXP) ||
             (code_s2l & FM_STRTEXR) )
@@ -1334,7 +1334,7 @@ void rtm_irq(struct s_smc *smc)
        outpw(ADDR(B2_RTM_CRTL),TIM_CL_IRQ) ;           /* clear IRQ */
        if (inpw(ADDR(B2_RTM_CRTL)) & TIM_RES_TOK) {
                outpw(FM_A(FM_CMDREG1),FM_ICL) ;        /* force claim */
-               DB_RMT("RMT: fddiPATHT_Rmode expired\n",0,0) ;
+               DB_RMT("RMT: fddiPATHT_Rmode expired");
                AIX_EVENT(smc, (u_long) FDDI_RING_STATUS,
                                (u_long) FDDI_SMT_EVENT,
                                (u_long) FDDI_RTT, smt_get_event_word(smc));
@@ -1353,8 +1353,8 @@ void rtm_set_timer(struct s_smc *smc)
        /*
         * MIB timer and hardware timer have the same resolution of 80nS
         */
-       DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns\n",
-               (int) smc->mib.a[PATH0].fddiPATHT_Rmode,0) ;
+       DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns",
+              (int)smc->mib.a[PATH0].fddiPATHT_Rmode);
        outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ;
 }
 
@@ -1469,13 +1469,13 @@ static void smt_split_up_fifo(struct s_smc *smc)
        smc->hw.fp.fifo.rx2_fifo_start = smc->hw.fp.fifo.tx_a0_start +
                smc->hw.fp.fifo.tx_a0_size ;
 
-       DB_SMT("FIFO split: mode = %x\n",smc->hw.fp.fifo.fifo_config_mode,0) ;
-       DB_SMT("rbc_ram_start = %x       rbc_ram_end =  %x\n",
-               smc->hw.fp.fifo.rbc_ram_start, smc->hw.fp.fifo.rbc_ram_end) ;
-       DB_SMT("rx1_fifo_start = %x      tx_s_start =   %x\n",
-               smc->hw.fp.fifo.rx1_fifo_start, smc->hw.fp.fifo.tx_s_start) ;
-       DB_SMT("tx_a0_start =   %x       rx2_fifo_start =       %x\n",
-               smc->hw.fp.fifo.tx_a0_start, smc->hw.fp.fifo.rx2_fifo_start) ;
+       DB_SMT("FIFO split: mode = %x", smc->hw.fp.fifo.fifo_config_mode);
+       DB_SMT("rbc_ram_start = %x       rbc_ram_end =  %x",
+              smc->hw.fp.fifo.rbc_ram_start, smc->hw.fp.fifo.rbc_ram_end);
+       DB_SMT("rx1_fifo_start = %x      tx_s_start =   %x",
+              smc->hw.fp.fifo.rx1_fifo_start, smc->hw.fp.fifo.tx_s_start);
+       DB_SMT("tx_a0_start =   %x       rx2_fifo_start =       %x",
+              smc->hw.fp.fifo.tx_a0_start, smc->hw.fp.fifo.rx2_fifo_start);
 }
 
 void formac_reinit_tx(struct s_smc *smc)
index f5bc90ff2a2a1e714fbe7b9cb199417beb3d45d7..5d6891154367e54b4283263cd5193d30286a87cd 100644 (file)
 #endif
 
 #ifdef DEBUG
-#define        DB_PR(flag,a,b,c)       { if (flag) printf(a,b,c) ; }
+#define        DB_PR(flag, fmt, ...)                                           \
+       do { if (flag) printf(fmt "\n", ##__VA_ARGS__); } while (0)
 #else
-#define        DB_PR(flag,a,b,c)
+#define        DB_PR(flag, fmt, ...)   no_printk(fmt "\n", ##__VA_ARGS__)
+
 #endif
 
 #ifdef DEBUG_BRD
-#define DB_ECM(a,b,c)          DB_PR((smc->debug.d_smt&1),a,b,c)
-#define DB_ECMN(n,a,b,c)       DB_PR((smc->debug.d_ecm >=(n)),a,b,c)
-#define DB_RMT(a,b,c)          DB_PR((smc->debug.d_smt&2),a,b,c)
-#define DB_RMTN(n,a,b,c)       DB_PR((smc->debug.d_rmt >=(n)),a,b,c)
-#define DB_CFM(a,b,c)          DB_PR((smc->debug.d_smt&4),a,b,c)
-#define DB_CFMN(n,a,b,c)       DB_PR((smc->debug.d_cfm >=(n)),a,b,c)
-#define DB_PCM(a,b,c)          DB_PR((smc->debug.d_smt&8),a,b,c)
-#define DB_PCMN(n,a,b,c)       DB_PR((smc->debug.d_pcm >=(n)),a,b,c)
-#define DB_SMT(a,b,c)          DB_PR((smc->debug.d_smtf),a,b,c)
-#define DB_SMTN(n,a,b,c)       DB_PR((smc->debug.d_smtf >=(n)),a,b,c)
-#define DB_SBA(a,b,c)          DB_PR((smc->debug.d_sba),a,b,c)
-#define DB_SBAN(n,a,b,c)       DB_PR((smc->debug.d_sba >=(n)),a,b,c)
-#define DB_ESS(a,b,c)          DB_PR((smc->debug.d_ess),a,b,c)
-#define DB_ESSN(n,a,b,c)       DB_PR((smc->debug.d_ess >=(n)),a,b,c)
+#define DB_TEST (smc->debug)
 #else
-#define DB_ECM(a,b,c)          DB_PR((debug.d_smt&1),a,b,c)
-#define DB_ECMN(n,a,b,c)       DB_PR((debug.d_ecm >=(n)),a,b,c)
-#define DB_RMT(a,b,c)          DB_PR((debug.d_smt&2),a,b,c)
-#define DB_RMTN(n,a,b,c)       DB_PR((debug.d_rmt >=(n)),a,b,c)
-#define DB_CFM(a,b,c)          DB_PR((debug.d_smt&4),a,b,c)
-#define DB_CFMN(n,a,b,c)       DB_PR((debug.d_cfm >=(n)),a,b,c)
-#define DB_PCM(a,b,c)          DB_PR((debug.d_smt&8),a,b,c)
-#define DB_PCMN(n,a,b,c)       DB_PR((debug.d_pcm >=(n)),a,b,c)
-#define DB_SMT(a,b,c)          DB_PR((debug.d_smtf),a,b,c)
-#define DB_SMTN(n,a,b,c)       DB_PR((debug.d_smtf >=(n)),a,b,c)
-#define DB_SBA(a,b,c)          DB_PR((debug.d_sba),a,b,c)
-#define DB_SBAN(n,a,b,c)       DB_PR((debug.d_sba >=(n)),a,b,c)
-#define DB_ESS(a,b,c)          DB_PR((debug.d_ess),a,b,c)
-#define DB_ESSN(n,a,b,c)       DB_PR((debug.d_ess >=(n)),a,b,c)
+#define DB_TEST (debug)
 #endif
 
+#define DB_ECM(fmt, ...)                                               \
+       DB_PR((DB_TEST).d_smt & 1, fmt, ##__VA_ARGS__)
+#define DB_ECMN(n, fmt, ...)                                           \
+       DB_PR((DB_TEST).d_ecm >= (n), fmt, ##__VA_ARGS__)
+#define DB_RMT(fmt, ...)                                               \
+       DB_PR((DB_TEST).d_smt & 2, fmt, ##__VA_ARGS__)
+#define DB_RMTN(n, fmt, ...)                                           \
+       DB_PR((DB_TEST).d_rmt >= (n), fmt, ##__VA_ARGS__)
+#define DB_CFM(fmt, ...)                                               \
+       DB_PR((DB_TEST).d_smt & 4, fmt, ##__VA_ARGS__)
+#define DB_CFMN(n, fmt, ...)                                           \
+       DB_PR((DB_TEST).d_cfm >= (n), fmt, ##__VA_ARGS__)
+#define DB_PCM(fmt, ...)                                               \
+       DB_PR((DB_TEST).d_smt & 8, fmt, ##__VA_ARGS__)
+#define DB_PCMN(n, fmt, ...)                                           \
+       DB_PR((DB_TEST).d_pcm >= (n), fmt, ##__VA_ARGS__)
+#define DB_SMT(fmt, ...)                                               \
+       DB_PR((DB_TEST).d_smtf, fmt, ##__VA_ARGS__)
+#define DB_SMTN(n, fmt, ...)                                           \
+       DB_PR((DB_TEST).d_smtf >= (n), fmt, ##__VA_ARGS__)
+#define DB_SBA(fmt, ...)                                               \
+       DB_PR((DB_TEST).d_sba, fmt, ##__VA_ARGS__)
+#define DB_SBAN(n, fmt, ...)                                           \
+       DB_PR((DB_TEST).d_sba >= (n), fmt, ##__VA_ARGS__)
+#define DB_ESS(fmt, ...)                                               \
+       DB_PR((DB_TEST).d_ess, fmt, ##__VA_ARGS__)
+#define DB_ESSN(n, fmt, ...)                                           \
+       DB_PR((DB_TEST).d_ess >= (n), fmt, ##__VA_ARGS__)
+
 #ifndef        SS_NOT_DS
 #define        SK_LOC_DECL(type,var)   type var
 #else
@@ -640,8 +645,8 @@ void dump_smt(struct s_smc *smc, struct smt_header *sm, char *text);
 #define        dump_smt(smc,sm,text)
 #endif
 
-#ifdef DEBUG
 char* addr_to_string(struct fddi_addr *addr);
+#ifdef DEBUG
 void dump_hex(char *p, int len);
 #endif
 
index 4ca2341d7f067a7cd979defac3fea8c0be1aef87..123cfa09c354750078f678dda536d8e8f9eded4d 100644 (file)
@@ -168,13 +168,25 @@ struct os_debug {
 #define DB_P   debug
 #endif
 
-#define DB_RX(a,b,c,lev) if (DB_P.d_os.hwm_rx >= (lev))        printf(a,b,c)
-#define DB_TX(a,b,c,lev) if (DB_P.d_os.hwm_tx >= (lev))        printf(a,b,c)
-#define DB_GEN(a,b,c,lev) if (DB_P.d_os.hwm_gen >= (lev)) printf(a,b,c)
+#define DB_RX(lev, fmt, ...)                                           \
+do {                                                                   \
+       if (DB_P.d_os.hwm_rx >= (lev))                                  \
+               printf(fmt "\n", ##__VA_ARGS__);                        \
+} while (0)
+#define DB_TX(lev, fmt, ...)                                           \
+do {                                                                   \
+       if (DB_P.d_os.hwm_tx >= (lev))                                  \
+               printf(fmt "\n", ##__VA_ARGS__);                        \
+} while (0)
+#define DB_GEN(lev, fmt, ...)                                          \
+do {                                                                   \
+       if (DB_P.d_os.hwm_gen >= (lev))                                 \
+               printf(fmt "\n", ##__VA_ARGS__);                        \
+} while (0)
 #else  /* DEBUG */
-#define DB_RX(a,b,c,lev)
-#define DB_TX(a,b,c,lev)
-#define DB_GEN(a,b,c,lev)
+#define DB_RX(lev, fmt, ...)   no_printk(fmt "\n", ##__VA_ARGS__)
+#define DB_TX(lev, fmt, ...)   no_printk(fmt "\n", ##__VA_ARGS__)
+#define DB_GEN(lev, fmt, ...)  no_printk(fmt "\n", ##__VA_ARGS__)
 #endif /* DEBUG */
 
 #ifndef        SK_BREAK
index d0a68bdd5f63b5934fd8dbf227929a1291fb848c..abbe309051d9ff46c27d62f10f5b5b0bbd05d980 100644 (file)
@@ -158,7 +158,7 @@ u_int mac_drv_check_space(void);
 SMbuf* smt_get_mbuf(struct s_smc *smc);
 
 #ifdef DEBUG
-       void mac_drv_debug_lev(void);
+       void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev);
 #endif
 
 /*
@@ -330,7 +330,7 @@ static u_long init_descr_ring(struct s_smc *smc,
        union s_fp_descr volatile *d2 ;
        u_long  phys ;
 
-       DB_GEN("descr ring starts at = %x ",(void *)start,0,3) ;
+       DB_GEN(3, "descr ring starts at = %p", start);
        for (i=count-1, d1=start; i ; i--) {
                d2 = d1 ;
                d1++ ;          /* descr is owned by the host */
@@ -339,7 +339,7 @@ static u_long init_descr_ring(struct s_smc *smc,
                phys = mac_drv_virt2phys(smc,(void *)d1) ;
                d2->r.rxd_nrdadr = cpu_to_le32(phys) ;
        }
-       DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ;
+       DB_GEN(3, "descr ring ends at = %p", d1);
        d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
        d1->r.rxd_next = &start->r ;
        phys = mac_drv_virt2phys(smc,(void *)start) ;
@@ -364,7 +364,7 @@ static void init_txd_ring(struct s_smc *smc)
        ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p +
                SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ;
        queue = smc->hw.fp.tx[QUEUE_A0] ;
-       DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ;
+       DB_GEN(3, "Init async TxD ring, %d TxDs", HWM_ASYNC_TXD_COUNT);
        (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
                HWM_ASYNC_TXD_COUNT) ;
        phys = le32_to_cpu(ds->txd_ntdadr) ;
@@ -378,7 +378,7 @@ static void init_txd_ring(struct s_smc *smc)
        ds = (struct s_smt_fp_txd volatile *) ((char *)ds +
                HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ;
        queue = smc->hw.fp.tx[QUEUE_S] ;
-       DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ;
+       DB_GEN(3, "Init sync TxD ring, %d TxDs", HWM_SYNC_TXD_COUNT);
        (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
                HWM_SYNC_TXD_COUNT) ;
        phys = le32_to_cpu(ds->txd_ntdadr) ;
@@ -400,7 +400,7 @@ static void init_rxd_ring(struct s_smc *smc)
         */
        ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ;
        queue = smc->hw.fp.rx[QUEUE_R1] ;
-       DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ;
+       DB_GEN(3, "Init RxD ring, %d RxDs", SMT_R1_RXD_COUNT);
        (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
                SMT_R1_RXD_COUNT) ;
        phys = le32_to_cpu(ds->rxd_nrdadr) ;
@@ -469,11 +469,11 @@ void init_fddi_driver(struct s_smc *smc, u_char *mac_addr)
         */
        i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ;
        if (i != 16) {
-               DB_GEN("i = %d",i,0,3) ;
+               DB_GEN(3, "i = %d", i);
                smc->os.hwm.descr_p = (union s_fp_descr volatile *)
                        ((char *)smc->os.hwm.descr_p+i) ;
        }
-       DB_GEN("pt to descr area = %x",(void *)smc->os.hwm.descr_p,0,3) ;
+       DB_GEN(3, "pt to descr area = %p", smc->os.hwm.descr_p);
 
        init_txd_ring(smc) ;
        init_rxd_ring(smc) ;
@@ -501,7 +501,7 @@ SMbuf *smt_get_mbuf(struct s_smc *smc)
                mb->sm_off = 8 ;
                mb->sm_use_count = 1 ;
        }
-       DB_GEN("get SMbuf: mb = %x",(void *)mb,0,3) ;
+       DB_GEN(3, "get SMbuf: mb = %p", mb);
        return mb;      /* May be NULL */
 }
 
@@ -510,14 +510,14 @@ void smt_free_mbuf(struct s_smc *smc, SMbuf *mb)
 
        if (mb) {
                mb->sm_use_count-- ;
-               DB_GEN("free_mbuf: sm_use_count = %d",mb->sm_use_count,0,3) ;
+               DB_GEN(3, "free_mbuf: sm_use_count = %d", mb->sm_use_count);
                /*
                 * If the use_count is != zero the MBuf is queued
                 * more than once and must not queued into the
                 * free MBuf queue
                 */
                if (!mb->sm_use_count) {
-                       DB_GEN("free SMbuf: mb = %x",(void *)mb,0,3) ;
+                       DB_GEN(3, "free SMbuf: mb = %p", mb);
 #ifndef        COMMON_MB_POOL
                        mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ;
                        smc->os.hwm.mbuf_pool.mb_free = mb ;
@@ -741,7 +741,7 @@ void fddi_isr(struct s_smc *smc)
 
        while ((is = GET_ISR() & ISR_MASK)) {
                NDD_TRACE("CH0B",is,0,0) ;
-               DB_GEN("ISA = 0x%x",is,0,7) ;
+               DB_GEN(7, "ISA = 0x%lx", is);
 
                if (is & IMASK_SLOW) {
                        NDD_TRACE("CH1b",is,0,0) ;
@@ -754,20 +754,20 @@ void fddi_isr(struct s_smc *smc)
                        if (is & IS_MINTR1) {   /* FORMAC+ STU1(U/L) */
                                stu = inpw(FM_A(FM_ST1U)) ;
                                stl = inpw(FM_A(FM_ST1L)) ;
-                               DB_GEN("Slow transmit complete",0,0,6) ;
+                               DB_GEN(6, "Slow transmit complete");
                                mac1_irq(smc,stu,stl) ;
                        }
                        if (is & IS_MINTR2) {   /* FORMAC+ STU2(U/L) */
                                stu= inpw(FM_A(FM_ST2U)) ;
                                stl= inpw(FM_A(FM_ST2L)) ;
-                               DB_GEN("Slow receive complete",0,0,6) ;
-                               DB_GEN("stl = %x : stu = %x",stl,stu,7) ;
+                               DB_GEN(6, "Slow receive complete");
+                               DB_GEN(7, "stl = %x : stu = %x", stl, stu);
                                mac2_irq(smc,stu,stl) ;
                        }
                        if (is & IS_MINTR3) {   /* FORMAC+ STU3(U/L) */
                                stu= inpw(FM_A(FM_ST3U)) ;
                                stl= inpw(FM_A(FM_ST3L)) ;
-                               DB_GEN("FORMAC Mode Register 3",0,0,6) ;
+                               DB_GEN(6, "FORMAC Mode Register 3");
                                mac3_irq(smc,stu,stl) ;
                        }
                        if (is & IS_TIMINT) {   /* Timer 82C54-2 */
@@ -814,7 +814,7 @@ void fddi_isr(struct s_smc *smc)
                 *      Fast Tx complete Async/Sync Queue (BMU service)
                 */
                if (is & (IS_XS_F|IS_XA_F)) {
-                       DB_GEN("Fast tx complete queue",0,0,6) ;
+                       DB_GEN(6, "Fast tx complete queue");
                        /*
                         * clear IRQ, Note: no IRQ is lost, because
                         *      we always service both queues
@@ -829,7 +829,7 @@ void fddi_isr(struct s_smc *smc)
                 *      Fast Rx Complete (BMU service)
                 */
                if (is & IS_R1_F) {
-                       DB_GEN("Fast receive complete",0,0,6) ;
+                       DB_GEN(6, "Fast receive complete");
                        /* clear IRQ */
 #ifndef USE_BREAK_ISR
                        outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ;
@@ -1083,13 +1083,13 @@ void process_receive(struct s_smc *smc)
 #endif
                n = 0 ;
                do {
-                       DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ;
+                       DB_RX(5, "Check RxD %p for OWN and EOF", r);
                        DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
                        rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl));
 
                        if (rbctrl & BMU_OWN) {
                                NDD_TRACE("RHxE",r,rfsw,rbctrl) ;
-                               DB_RX("End of RxDs",0,0,4) ;
+                               DB_RX(4, "End of RxDs");
                                goto rx_end ;
                        }
                        /*
@@ -1136,19 +1136,19 @@ void process_receive(struct s_smc *smc)
                        rx_used-- ;
                } while (!(rbctrl & BMU_EOF)) ;
                used_frags = frag_count ;
-               DB_RX("EOF set in RxD, used_frags = %d ",used_frags,0,5) ;
+               DB_RX(5, "EOF set in RxD, used_frags = %d", used_frags);
 
                /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */
                /* BMU_ST_BUF will not be changed by the ASIC */
                DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
                while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
-                       DB_RX("Check STF bit in %x",(void *)r,0,5) ;
+                       DB_RX(5, "Check STF bit in %p", r);
                        r = r->rxd_next ;
                        DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
                        frag_count++ ;
                        rx_used-- ;
                }
-               DB_RX("STF bit found",0,0,5) ;
+               DB_RX(5, "STF bit found");
 
                /*
                 * The received frame is finished for the process receive
@@ -1164,7 +1164,7 @@ void process_receive(struct s_smc *smc)
                rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ;
 
                for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){
-                       DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
+                       DB_RX(5, "dma_complete for RxD %p", r);
                        dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
                }
                smc->hw.fp.err_stats.err_valid++ ;
@@ -1173,34 +1173,34 @@ void process_receive(struct s_smc *smc)
                /* the length of the data including the FC */
                len = (rfsw & RD_LENGTH) - 4 ;
 
-               DB_RX("frame length = %d",len,0,4) ;
+               DB_RX(4, "frame length = %d", len);
                /*
                 * check the frame_length and all error flags
                 */
                if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){
                        if (rfsw & RD_S_MSRABT) {
-                               DB_RX("Frame aborted by the FORMAC",0,0,2) ;
+                               DB_RX(2, "Frame aborted by the FORMAC");
                                smc->hw.fp.err_stats.err_abort++ ;
                        }
                        /*
                         * check frame status
                         */
                        if (rfsw & RD_S_SEAC2) {
-                               DB_RX("E-Indicator set",0,0,2) ;
+                               DB_RX(2, "E-Indicator set");
                                smc->hw.fp.err_stats.err_e_indicator++ ;
                        }
                        if (rfsw & RD_S_SFRMERR) {
-                               DB_RX("CRC error",0,0,2) ;
+                               DB_RX(2, "CRC error");
                                smc->hw.fp.err_stats.err_crc++ ;
                        }
                        if (rfsw & RX_FS_IMPL) {
-                               DB_RX("Implementer frame",0,0,2) ;
+                               DB_RX(2, "Implementer frame");
                                smc->hw.fp.err_stats.err_imp_frame++ ;
                        }
                        goto abort_frame ;
                }
                if (len > FDDI_RAW_MTU-4) {
-                       DB_RX("Frame too long error",0,0,2) ;
+                       DB_RX(2, "Frame too long error");
                        smc->hw.fp.err_stats.err_too_long++ ;
                        goto abort_frame ;
                }
@@ -1209,12 +1209,12 @@ void process_receive(struct s_smc *smc)
                 * of aborded frames to the BMU
                 */
                if (len <= 4) {
-                       DB_RX("Frame length = 0",0,0,2) ;
+                       DB_RX(2, "Frame length = 0");
                        goto abort_frame ;
                }
 
                if (len != (n-4)) {
-                       DB_RX("BMU: rx len differs: [%d:%d]",len,n,4);
+                       DB_RX(4, "BMU: rx len differs: [%d:%d]", len, n);
                        smc->os.hwm.rx_len_error++ ;
                        goto abort_frame ;
                }
@@ -1223,7 +1223,7 @@ void process_receive(struct s_smc *smc)
                 * Check SA == MA
                 */
                virt = (u_char far *) rxd->rxd_virt ;
-               DB_RX("FC = %x",*virt,0,2) ;
+               DB_RX(2, "FC = %x", *virt);
                if (virt[12] == MA[5] &&
                    virt[11] == MA[4] &&
                    virt[10] == MA[3] &&
@@ -1250,7 +1250,7 @@ void process_receive(struct s_smc *smc)
                                            virt[3] != MA[2] ||
                                            virt[2] != MA[1] ||
                                            virt[1] != MA[0]) {
-                                               DB_RX("DA != MA and not multi- or broadcast",0,0,2) ;
+                                               DB_RX(2, "DA != MA and not multi- or broadcast");
                                                goto abort_frame ;
                                        }
                                }
@@ -1259,13 +1259,13 @@ void process_receive(struct s_smc *smc)
                        /*
                         * LLC frame received
                         */
-                       DB_RX("LLC - receive",0,0,4) ;
+                       DB_RX(4, "LLC - receive");
                        mac_drv_rx_complete(smc,rxd,frag_count,len) ;
                }
                else {
                        if (!(mb = smt_get_mbuf(smc))) {
                                smc->hw.fp.err_stats.err_no_buf++ ;
-                               DB_RX("No SMbuf; receive terminated",0,0,4) ;
+                               DB_RX(4, "No SMbuf; receive terminated");
                                goto abort_frame ;
                        }
                        data = smtod(mb,char *) - 1 ;
@@ -1278,7 +1278,7 @@ void process_receive(struct s_smc *smc)
 #else
                        for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){
                                n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ;
-                               DB_RX("cp SMT frame to mb: len = %d",n,0,6) ;
+                               DB_RX(6, "cp SMT frame to mb: len = %d", n);
                                memcpy(data,r->rxd_virt,n) ;
                                data += n ;
                        }
@@ -1294,15 +1294,15 @@ void process_receive(struct s_smc *smc)
                        switch(fc) {
                        case FC_SMT_INFO :
                                smc->hw.fp.err_stats.err_smt_frame++ ;
-                               DB_RX("SMT frame received ",0,0,5) ;
+                               DB_RX(5, "SMT frame received");
 
                                if (smc->os.hwm.pass_SMT) {
-                                       DB_RX("pass SMT frame ",0,0,5) ;
+                                       DB_RX(5, "pass SMT frame");
                                        mac_drv_rx_complete(smc, rxd,
                                                frag_count,len) ;
                                }
                                else {
-                                       DB_RX("requeue RxD",0,0,5) ;
+                                       DB_RX(5, "requeue RxD");
                                        mac_drv_requeue_rxd(smc,rxd,frag_count);
                                }
 
@@ -1310,7 +1310,7 @@ void process_receive(struct s_smc *smc)
                                break ;
                        case FC_SMT_NSA :
                                smc->hw.fp.err_stats.err_smt_frame++ ;
-                               DB_RX("SMT frame received ",0,0,5) ;
+                               DB_RX(5, "SMT frame received");
 
                                /* if pass_NSA set pass the NSA frame or */
                                /* pass_SMT set and the A-Indicator */
@@ -1318,12 +1318,12 @@ void process_receive(struct s_smc *smc)
                                if (smc->os.hwm.pass_NSA ||
                                        (smc->os.hwm.pass_SMT &&
                                        !(rfsw & A_INDIC))) {
-                                       DB_RX("pass SMT frame ",0,0,5) ;
+                                       DB_RX(5, "pass SMT frame");
                                        mac_drv_rx_complete(smc, rxd,
                                                frag_count,len) ;
                                }
                                else {
-                                       DB_RX("requeue RxD",0,0,5) ;
+                                       DB_RX(5, "requeue RxD");
                                        mac_drv_requeue_rxd(smc,rxd,frag_count);
                                }
 
@@ -1331,12 +1331,12 @@ void process_receive(struct s_smc *smc)
                                break ;
                        case FC_BEACON :
                                if (smc->os.hwm.pass_DB) {
-                                       DB_RX("pass DB frame ",0,0,5) ;
+                                       DB_RX(5, "pass DB frame");
                                        mac_drv_rx_complete(smc, rxd,
                                                frag_count,len) ;
                                }
                                else {
-                                       DB_RX("requeue RxD",0,0,5) ;
+                                       DB_RX(5, "requeue RxD");
                                        mac_drv_requeue_rxd(smc,rxd,frag_count);
                                }
                                smt_free_mbuf(smc,mb) ;
@@ -1345,9 +1345,9 @@ void process_receive(struct s_smc *smc)
                                /*
                                 * unknown FC abord the frame
                                 */
-                               DB_RX("unknown FC error",0,0,2) ;
+                               DB_RX(2, "unknown FC error");
                                smt_free_mbuf(smc,mb) ;
-                               DB_RX("requeue RxD",0,0,5) ;
+                               DB_RX(5, "requeue RxD");
                                mac_drv_requeue_rxd(smc,rxd,frag_count) ;
                                if ((fc & 0xf0) == FC_MAC)
                                        smc->hw.fp.err_stats.err_mac_frame++ ;
@@ -1358,16 +1358,16 @@ void process_receive(struct s_smc *smc)
                        }
                }
 
-               DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ;
+               DB_RX(3, "next RxD is %p", queue->rx_curr_get);
                NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ;
 
                continue ;
        /*--------------------------------------------------------------------*/
 abort_frame:
-               DB_RX("requeue RxD",0,0,5) ;
+               DB_RX(5, "requeue RxD");
                mac_drv_requeue_rxd(smc,rxd,frag_count) ;
 
-               DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ;
+               DB_RX(3, "next RxD is %p", queue->rx_curr_get);
                NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ;
        }
 rx_end:
@@ -1381,7 +1381,7 @@ static void smt_to_llc(struct s_smc *smc, SMbuf *mb)
 {
        u_char  fc ;
 
-       DB_RX("send a queued frame to the llc layer",0,0,4) ;
+       DB_RX(4, "send a queued frame to the llc layer");
        smc->os.hwm.r.len = mb->sm_len ;
        smc->os.hwm.r.mb_pos = smtod(mb,char *) ;
        fc = *smc->os.hwm.r.mb_pos ;
@@ -1419,7 +1419,7 @@ void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
        __le32  rbctrl;
 
        NDD_TRACE("RHfB",virt,len,frame_status) ;
-       DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ;
+       DB_RX(2, "hwm_rx_frag: len = %d, frame_status = %x", len, frame_status);
        r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ;
        r->rxd_virt = virt ;
        r->rxd_rbadr = cpu_to_le32(phys) ;
@@ -1475,7 +1475,7 @@ void mac_drv_clear_rx_queue(struct s_smc *smc)
        }
 
        queue = smc->hw.fp.rx[QUEUE_R1] ;
-       DB_RX("clear_rx_queue",0,0,5) ;
+       DB_RX(5, "clear_rx_queue");
 
        /*
         * dma_complete and mac_drv_clear_rxd for all RxDs / receive buffers
@@ -1483,7 +1483,7 @@ void mac_drv_clear_rx_queue(struct s_smc *smc)
        r = queue->rx_curr_get ;
        while (queue->rx_used) {
                DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
-               DB_RX("switch OWN bit of RxD 0x%p ",r,0,5) ;
+               DB_RX(5, "switch OWN bit of RxD 0x%p", r);
                r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
                frag_count = 1 ;
                DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
@@ -1491,23 +1491,23 @@ void mac_drv_clear_rx_queue(struct s_smc *smc)
                DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
                while (r != queue->rx_curr_put &&
                        !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
-                       DB_RX("Check STF bit in %x",(void *)r,0,5) ;
+                       DB_RX(5, "Check STF bit in %p", r);
                        r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
                        DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
                        r = r->rxd_next ;
                        DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
                        frag_count++ ;
                }
-               DB_RX("STF bit found",0,0,5) ;
+               DB_RX(5, "STF bit found");
                next_rxd = r ;
 
                for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){
-                       DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
+                       DB_RX(5, "dma_complete for RxD %p", r);
                        dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR);
                }
 
-               DB_RX("mac_drv_clear_rxd: RxD %x frag_count %d ",
-                       (void *)queue->rx_curr_get,frag_count,5) ;
+               DB_RX(5, "mac_drv_clear_rxd: RxD %p frag_count %d",
+                     queue->rx_curr_get, frag_count);
                mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ;
 
                queue->rx_curr_get = next_rxd ;
@@ -1554,7 +1554,7 @@ int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
        smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ;
        smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ;
        smc->os.hwm.tx_len = frame_len ;
-       DB_TX("hwm_tx_init: fc = %x, len = %d",fc,frame_len,3) ;
+       DB_TX(3, "hwm_tx_init: fc = %x, len = %d", fc, frame_len);
        if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) {
                frame_status |= LAN_TX ;
        }
@@ -1577,23 +1577,23 @@ int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len,
        if (!smc->hw.mac_ring_is_up) {
                frame_status &= ~LAN_TX ;
                frame_status |= RING_DOWN ;
-               DB_TX("Ring is down: terminate LAN_TX",0,0,2) ;
+               DB_TX(2, "Ring is down: terminate LAN_TX");
        }
        if (frag_count > smc->os.hwm.tx_p->tx_free) {
 #ifndef        NDIS_OS2
                mac_drv_clear_txd(smc) ;
                if (frag_count > smc->os.hwm.tx_p->tx_free) {
-                       DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ;
+                       DB_TX(2, "Out of TxDs, terminate LAN_TX");
                        frame_status &= ~LAN_TX ;
                        frame_status |= OUT_OF_TXD ;
                }
 #else
-               DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ;
+               DB_TX(2, "Out of TxDs, terminate LAN_TX");
                frame_status &= ~LAN_TX ;
                frame_status |= OUT_OF_TXD ;
 #endif
        }
-       DB_TX("frame_status = %x",frame_status,0,3) ;
+       DB_TX(3, "frame_status = %x", frame_status);
        NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ;
        return frame_status;
 }
@@ -1642,10 +1642,10 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
         */
        t = queue->tx_curr_put ;
 
-       DB_TX("hwm_tx_frag: len = %d, frame_status = %x ",len,frame_status,2) ;
+       DB_TX(2, "hwm_tx_frag: len = %d, frame_status = %x", len, frame_status);
        if (frame_status & LAN_TX) {
                /* '*t' is already defined */
-               DB_TX("LAN_TX: TxD = %p, virt = %p ",t,virt,3) ;
+               DB_TX(3, "LAN_TX: TxD = %p, virt = %p", t, virt);
                t->txd_virt = virt ;
                t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
                t->txd_tbadr = cpu_to_le32(phys) ;
@@ -1674,11 +1674,11 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
                }
        }
        if (frame_status & LOC_TX) {
-               DB_TX("LOC_TX: ",0,0,3) ;
+               DB_TX(3, "LOC_TX:");
                if (frame_status & FIRST_FRAG) {
                        if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) {
                                smc->hw.fp.err_stats.err_no_buf++ ;
-                               DB_TX("No SMbuf; transmit terminated",0,0,4) ;
+                               DB_TX(4, "No SMbuf; transmit terminated");
                        }
                        else {
                                smc->os.hwm.tx_data =
@@ -1693,7 +1693,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
                }
                if (smc->os.hwm.tx_mb) {
 #ifndef        USE_OS_CPY
-                       DB_TX("copy fragment into MBuf ",0,0,3) ;
+                       DB_TX(3, "copy fragment into MBuf");
                        memcpy(smc->os.hwm.tx_data,virt,len) ;
                        smc->os.hwm.tx_data += len ;
 #endif
@@ -1718,7 +1718,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
                                smc->os.hwm.tx_data++ ;
                                smc->os.hwm.tx_mb->sm_len =
                                        smc->os.hwm.tx_len - 1 ;
-                               DB_TX("pass LLC frame to SMT ",0,0,3) ;
+                               DB_TX(3, "pass LLC frame to SMT");
                                smt_received_pack(smc,smc->os.hwm.tx_mb,
                                                RD_FS_LOCAL) ;
                        }
@@ -1733,7 +1733,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
  */
 static void queue_llc_rx(struct s_smc *smc, SMbuf *mb)
 {
-       DB_GEN("queue_llc_rx: mb = %x",(void *)mb,0,4) ;
+       DB_GEN(4, "queue_llc_rx: mb = %p", mb);
        smc->os.hwm.queued_rx_frames++ ;
        mb->sm_next = (SMbuf *)NULL ;
        if (smc->os.hwm.llc_rx_pipe == NULL) {
@@ -1763,7 +1763,7 @@ static SMbuf *get_llc_rx(struct s_smc *smc)
                smc->os.hwm.queued_rx_frames-- ;
                smc->os.hwm.llc_rx_pipe = mb->sm_next ;
        }
-       DB_GEN("get_llc_rx: mb = 0x%x",(void *)mb,0,4) ;
+       DB_GEN(4, "get_llc_rx: mb = 0x%p", mb);
        return mb;
 }
 
@@ -1773,7 +1773,7 @@ static SMbuf *get_llc_rx(struct s_smc *smc)
  */
 static void queue_txd_mb(struct s_smc *smc, SMbuf *mb)
 {
-       DB_GEN("_rx: queue_txd_mb = %x",(void *)mb,0,4) ;
+       DB_GEN(4, "_rx: queue_txd_mb = %p", mb);
        smc->os.hwm.queued_txd_mb++ ;
        mb->sm_next = (SMbuf *)NULL ;
        if (smc->os.hwm.txd_tx_pipe == NULL) {
@@ -1796,7 +1796,7 @@ static SMbuf *get_txd_mb(struct s_smc *smc)
                smc->os.hwm.queued_txd_mb-- ;
                smc->os.hwm.txd_tx_pipe = mb->sm_next ;
        }
-       DB_GEN("get_txd_mb: mb = 0x%x",(void *)mb,0,4) ;
+       DB_GEN(4, "get_txd_mb: mb = 0x%p", mb);
        return mb;
 }
 
@@ -1819,7 +1819,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
        __le32  tbctrl;
 
        NDD_TRACE("THSB",mb,fc,0) ;
-       DB_TX("smt_send_mbuf: mb = 0x%p, fc = 0x%x",mb,fc,4) ;
+       DB_TX(4, "smt_send_mbuf: mb = 0x%p, fc = 0x%x", mb, fc);
 
        mb->sm_off-- ;  /* set to fc */
        mb->sm_len++ ;  /* + fc */
@@ -1838,7 +1838,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
                if (n >= len) {
                        n = len ;
                }
-               DB_TX("frag: virt/len = 0x%x/%d ",(void *)data,n,5) ;
+               DB_TX(5, "frag: virt/len = 0x%p/%d", data, n);
                virt[frag_count] = data ;
                frag_len[frag_count] = n ;
                frag_count++ ;
@@ -1863,15 +1863,15 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
        if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) {
                frame_status &= ~LAN_TX;
                if (frame_status) {
-                       DB_TX("Ring is down: terminate LAN_TX",0,0,2) ;
+                       DB_TX(2, "Ring is down: terminate LAN_TX");
                }
                else {
-                       DB_TX("Ring is down: terminate transmission",0,0,2) ;
+                       DB_TX(2, "Ring is down: terminate transmission");
                        smt_free_mbuf(smc,mb) ;
                        return ;
                }
        }
-       DB_TX("frame_status = 0x%x ",frame_status,0,5) ;
+       DB_TX(5, "frame_status = 0x%x", frame_status);
 
        if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) {
                mb->sm_use_count = 2 ;
@@ -1881,7 +1881,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
                t = queue->tx_curr_put ;
                frame_status |= FIRST_FRAG ;
                for (i = 0; i < frag_count; i++) {
-                       DB_TX("init TxD = 0x%x",(void *)t,0,5) ;
+                       DB_TX(5, "init TxD = 0x%p", t);
                        if (i == frag_count-1) {
                                frame_status |= LAST_FRAG ;
                                t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR |
@@ -1912,7 +1912,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
        }
 
        if (frame_status & LOC_TX) {
-               DB_TX("pass Mbuf to LLC queue",0,0,5) ;
+               DB_TX(5, "pass Mbuf to LLC queue");
                queue_llc_rx(smc,mb) ;
        }
 
@@ -1953,18 +1953,18 @@ static void mac_drv_clear_txd(struct s_smc *smc)
        for (i = QUEUE_S; i <= QUEUE_A0; i++) {
                queue = smc->hw.fp.tx[i] ;
                t1 = queue->tx_curr_get ;
-               DB_TX("clear_txd: QUEUE = %d (0=sync/1=async)",i,0,5) ;
+               DB_TX(5, "clear_txd: QUEUE = %d (0=sync/1=async)", i);
 
                for ( ; ; ) {
                        frag_count = 0 ;
 
                        do {
                                DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
-                               DB_TX("check OWN/EOF bit of TxD 0x%p",t1,0,5) ;
+                               DB_TX(5, "check OWN/EOF bit of TxD 0x%p", t1);
                                tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
 
                                if (tbctrl & BMU_OWN || !queue->tx_used){
-                                       DB_TX("End of TxDs queue %d",i,0,4) ;
+                                       DB_TX(4, "End of TxDs queue %d", i);
                                        goto free_next_queue ;  /* next queue */
                                }
                                t1 = t1->txd_next ;
@@ -1988,11 +1988,11 @@ static void mac_drv_clear_txd(struct s_smc *smc)
                        }
                        else {
 #ifndef PASS_1ST_TXD_2_TX_COMP
-                               DB_TX("mac_drv_tx_comp for TxD 0x%p",t2,0,4) ;
+                               DB_TX(4, "mac_drv_tx_comp for TxD 0x%p", t2);
                                mac_drv_tx_complete(smc,t2) ;
 #else
-                               DB_TX("mac_drv_tx_comp for TxD 0x%x",
-                                       queue->tx_curr_get,0,4) ;
+                               DB_TX(4, "mac_drv_tx_comp for TxD 0x%x",
+                                     queue->tx_curr_get);
                                mac_drv_tx_complete(smc,queue->tx_curr_get) ;
 #endif
                        }
@@ -2043,7 +2043,7 @@ void mac_drv_clear_tx_queue(struct s_smc *smc)
 
        for (i = QUEUE_S; i <= QUEUE_A0; i++) {
                queue = smc->hw.fp.tx[i] ;
-               DB_TX("clear_tx_queue: QUEUE = %d (0=sync/1=async)",i,0,5) ;
+               DB_TX(5, "clear_tx_queue: QUEUE = %d (0=sync/1=async)", i);
 
                /*
                 * switch the OWN bit of all pending frames to the host
@@ -2052,7 +2052,7 @@ void mac_drv_clear_tx_queue(struct s_smc *smc)
                tx_used = queue->tx_used ;
                while (tx_used) {
                        DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
-                       DB_TX("switch OWN bit of TxD 0x%p ",t,0,5) ;
+                       DB_TX(5, "switch OWN bit of TxD 0x%p", t);
                        t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
                        DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
                        t = t->txd_next ;
index 88d02d0a42c4ac1e93d576b1eb802c15adadd10c..a9ecf923f63d5c56dbcfd7947d81569658e470d4 100644 (file)
@@ -91,7 +91,6 @@ int p
 #define PC8_ACTIVE             8
 #define PC9_MAINT              9
 
-#ifdef DEBUG
 /*
  * symbolic state names
  */
@@ -113,7 +112,6 @@ static const char * const pcm_events[] = {
        "PC_TIMEOUT_TL_MIN","PC_TIMEOUT_T_NEXT","PC_TIMEOUT_LCT",
        "PC_NSE","PC_LEM"
 } ;
-#endif
 
 #ifdef MOT_ELM
 /*
@@ -610,12 +608,11 @@ void pcm(struct s_smc *smc, const int np, int event)
        mib = phy->mib ;
        oldstate = mib->fddiPORTPCMState ;
        do {
-               DB_PCM("PCM %c: state %s",
-                       phy->phy_name,
-                       (mib->fddiPORTPCMState & AFLAG) ? "ACTIONS " : "") ;
-               DB_PCM("%s, event %s\n",
-                       pcm_states[mib->fddiPORTPCMState & ~AFLAG],
-                       pcm_events[event]) ;
+               DB_PCM("PCM %c: state %s%s, event %s",
+                      phy->phy_name,
+                      mib->fddiPORTPCMState & AFLAG ? "ACTIONS " : "",
+                      pcm_states[mib->fddiPORTPCMState & ~AFLAG],
+                      pcm_events[event]);
                state = mib->fddiPORTPCMState ;
                pcm_fsm(smc,phy,event) ;
                event = 0 ;
@@ -1017,7 +1014,7 @@ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd)
                ACTIONS_DONE() ;
                break ;
        case PC9_MAINT :
-               DB_PCMN(1,"PCM %c : MAINT\n",phy->phy_name,0) ;
+               DB_PCMN(1, "PCM %c : MAINT", phy->phy_name);
                /*PC90*/
                if (cmd == PC_ENABLE) {
                        GO_STATE(PC0_OFF) ;
@@ -1126,13 +1123,12 @@ static void lem_evaluate(struct s_smc *smc, struct s_phy *phy)
        }
 
        if (lem->lem_errors) {
-               DB_PCMN(1,"LEM %c :\n",phy->np == PB? 'B' : 'A',0) ;
-               DB_PCMN(1,"errors      : %ld\n",lem->lem_errors,0) ;
-               DB_PCMN(1,"sum_errors  : %ld\n",mib->fddiPORTLem_Ct,0) ;
-               DB_PCMN(1,"current BER : 10E-%d\n",ber/100,0) ;
-               DB_PCMN(1,"float BER   : 10E-(%d/100)\n",lem->lem_float_ber,0) ;
-               DB_PCMN(1,"avg. BER    : 10E-%d\n",
-                       mib->fddiPORTLer_Estimate,0) ;
+               DB_PCMN(1, "LEM %c :", phy->np == PB ? 'B' : 'A');
+               DB_PCMN(1, "errors      : %ld", lem->lem_errors);
+               DB_PCMN(1, "sum_errors  : %ld", mib->fddiPORTLem_Ct);
+               DB_PCMN(1, "current BER : 10E-%d", ber / 100);
+               DB_PCMN(1, "float BER   : 10E-(%d/100)", lem->lem_float_ber);
+               DB_PCMN(1, "avg. BER    : 10E-%d", mib->fddiPORTLer_Estimate);
        }
 
        lem->lem_errors = 0L ;
@@ -1160,8 +1156,8 @@ static void lem_evaluate(struct s_smc *smc, struct s_phy *phy)
 
                /*PC81b*/
 #ifdef CONCENTRATOR
-               DB_PCMN(1,"PCM: LER cutoff on port %d cutoff %d\n",
-                       phy->np, mib->fddiPORTLer_Cutoff) ;
+               DB_PCMN(1, "PCM: LER cutoff on port %d cutoff %d",
+                       phy->np, mib->fddiPORTLer_Cutoff);
 #endif
 #ifdef SMT_EXT_CUTOFF
                smt_port_off_event(smc,phy->np);
@@ -1213,7 +1209,7 @@ static void lem_check_lct(struct s_smc *smc, struct s_phy *phy)
                                phy->pc_lem_fail = TRUE ;
                        break ;
                }
-               DB_PCMN(1," >>errors : %d\n",lem->lem_errors,0) ;
+               DB_PCMN(1, " >>errors : %lu", lem->lem_errors);
        }
        if (phy->pc_lem_fail) {
                mib->fddiPORTLCTFail_Ct++ ;
@@ -1277,7 +1273,7 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
 
        mib = phy->mib ;
 
-       DB_PCMN(1,"SIG rec %x %x:\n", bit,phy->r_val[bit] ) ;
+       DB_PCMN(1, "SIG rec %x %x:", bit, phy->r_val[bit]);
        bit++ ;
 
        switch(bit) {
@@ -1298,8 +1294,8 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
        case 4:
                if (mib->fddiPORTMy_Type == TM &&
                        mib->fddiPORTNeighborType == TM) {
-                       DB_PCMN(1,"PCM %c : E100 withhold M-M\n",
-                               phy->phy_name,0) ;
+                       DB_PCMN(1, "PCM %c : E100 withhold M-M",
+                               phy->phy_name);
                        mib->fddiPORTPC_Withhold = PC_WH_M_M ;
                        RS_SET(smc,RS_EVENT) ;
                }
@@ -1321,16 +1317,16 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
                else {
                        mib->fddiPORTPC_Withhold = PC_WH_OTHER ;
                        RS_SET(smc,RS_EVENT) ;
-                       DB_PCMN(1,"PCM %c : E101 withhold other\n",
-                               phy->phy_name,0) ;
+                       DB_PCMN(1, "PCM %c : E101 withhold other",
+                               phy->phy_name);
                }
                phy->twisted = ((mib->fddiPORTMy_Type != TS) &&
                                (mib->fddiPORTMy_Type != TM) &&
                                (mib->fddiPORTNeighborType ==
                                mib->fddiPORTMy_Type)) ;
                if (phy->twisted) {
-                       DB_PCMN(1,"PCM %c : E102 !!! TWISTED !!!\n",
-                               phy->phy_name,0) ;
+                       DB_PCMN(1, "PCM %c : E102 !!! TWISTED !!!",
+                               phy->phy_name);
                }
                break ;
        case 5 :
@@ -1368,7 +1364,7 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
                if (phy->t_next[7] > smc->s.pcm_lc_medium) {
                        start_pcm_timer0(smc,phy->t_next[7],PC_TIMEOUT_LCT,phy);
                }
-               DB_PCMN(1,"LCT timer = %ld us\n", phy->t_next[7], 0) ;
+               DB_PCMN(1, "LCT timer = %ld us", phy->t_next[7]);
                phy->t_next[9] = smc->s.pcm_t_next_9 ;
                break ;
        case 7:
@@ -1379,8 +1375,9 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
                break ;
        case 8:
                if (phy->t_val[7] || phy->r_val[7]) {
-                       DB_PCMN(1,"PCM %c : E103 LCT fail %s\n",
-                               phy->phy_name,phy->t_val[7]? "local":"remote") ;
+                       DB_PCMN(1, "PCM %c : E103 LCT fail %s",
+                               phy->phy_name,
+                               phy->t_val[7] ? "local" : "remote");
                        queue_event(smc,(int)(EVENT_PCM+phy->np),PC_START) ;
                }
                break ;
@@ -1529,8 +1526,7 @@ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy
                phy->cf_loop = FALSE ;
                lem_check_lct(smc,phy) ;
                if (phy->pc_lem_fail) {
-                       DB_PCMN(1,"PCM %c : E104 LCT failed\n",
-                               phy->phy_name,0) ;
+                       DB_PCMN(1, "PCM %c : E104 LCT failed", phy->phy_name);
                        phy->t_val[7] = 1 ;
                }
                else
@@ -1580,7 +1576,7 @@ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy
                mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ;
                break ;
        }
-       DB_PCMN(1,"SIG snd %x %x:\n", bit,phy->t_val[bit] ) ;
+       DB_PCMN(1, "SIG snd %x %x:", bit, phy->t_val[bit]);
 }
 
 /*
@@ -1783,13 +1779,14 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd)
                }
 
                /*jd 05-Aug-1999 changed: Bug #10419 */
-               DB_PCMN(1,"PLC %d: MDcF = %x\n", np, smc->e.DisconnectFlag);
+               DB_PCMN(1, "PLC %d: MDcF = %x", np, smc->e.DisconnectFlag);
                if (smc->e.DisconnectFlag == FALSE) {
-                       DB_PCMN(1,"PLC %d: restart (reason %x)\n", np, reason);
+                       DB_PCMN(1, "PLC %d: restart (reason %x)", np, reason);
                        queue_event(smc,EVENT_PCM+np,PC_START) ;
                }
                else {
-                       DB_PCMN(1,"PLC %d: NO!! restart (reason %x)\n", np, reason);
+                       DB_PCMN(1, "PLC %d: NO!! restart (reason %x)",
+                               np, reason);
                }
                return ;
        }
@@ -1810,8 +1807,8 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd)
        if (cmd & PL_TRACE_PROP) {      /* MLS while PC8_ACTIV || PC2_TRACE */
                /*PC22b*/
                if (!phy->tr_flag) {
-                       DB_PCMN(1,"PCM : irq TRACE_PROP %d %d\n",
-                               np,smc->mib.fddiSMTECMState) ;
+                       DB_PCMN(1, "PCM : irq TRACE_PROP %d %d",
+                               np, smc->mib.fddiSMTECMState);
                        phy->tr_flag = TRUE ;
                        smc->e.trace_prop |= ENTITY_BIT(ENTITY_PHY(np)) ;
                        queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ;
@@ -1824,8 +1821,9 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd)
        if ((cmd & PL_SELF_TEST) && (phy->mib->fddiPORTPCMState == PC2_TRACE)) {
                /*PC22a*/
                if (smc->e.path_test == PT_PASSED) {
-                       DB_PCMN(1,"PCM : state = %s %d\n", get_pcmstate(smc,np),
-                               phy->mib->fddiPORTPCMState) ;
+                       DB_PCMN(1, "PCM : state = %s %d",
+                               get_pcmstate(smc, np),
+                               phy->mib->fddiPORTPCMState);
 
                        smc->e.path_test = PT_PENDING ;
                        queue_event(smc,EVENT_ECM,EC_PATH_TEST) ;
@@ -1835,9 +1833,10 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd)
                /* break_required (TNE > NS_Max) */
                if (phy->mib->fddiPORTPCMState == PC8_ACTIVE) {
                        if (!phy->tr_flag) {
-                          DB_PCMN(1,"PCM %c : PC81 %s\n",phy->phy_name,"NSE");
-                          queue_event(smc,EVENT_PCM+np,PC_START) ;
-                          return ;
+                               DB_PCMN(1, "PCM %c : PC81 %s",
+                                       phy->phy_name, "NSE");
+                               queue_event(smc, EVENT_PCM + np, PC_START);
+                               return;
                        }
                }
        }
index 52fa162a31e097af411cd20592a05fc501c5bbed..eee447315e328fa57549467199e8f3b2a741b87a 100644 (file)
@@ -284,7 +284,7 @@ void smt_pmf_received_pack(struct s_smc *smc, SMbuf *mb, int local)
        SMbuf           *reply ;
 
        sm = smtod(mb,struct smt_header *) ;
-       DB_SMT("SMT: processing PMF frame at %p len %d\n",sm,mb->sm_len) ;
+       DB_SMT("SMT: processing PMF frame at %p len %d", sm, mb->sm_len);
 #ifdef DEBUG
        dump_smt(smc,sm,"PMF Received") ;
 #endif
@@ -1585,7 +1585,7 @@ void dump_smt(struct s_smc *smc, struct smt_header *sm, char *text)
        dump_hex((char *) &sm->smt_source,6) ;
        printf(" Class %x Type %x Version %x\n",
                sm->smt_class,sm->smt_type,sm->smt_version)  ;
-       printf("TID %lx\t\tSID ",sm->smt_tid) ;
+       printf("TID %x\t\tSID ", sm->smt_tid);
        dump_hex((char *) &sm->smt_sid,8) ;
        printf(" LEN %x\n",sm->smt_len) ;
 
index ef8d5672d9e8dad4e146332fee901f56b58a8ef2..52b22095273a2259e1c26436f4b6b9705862cbb9 100644 (file)
@@ -70,7 +70,6 @@ static const char ID_sccs[] = "@(#)rmt.c      2.13 99/07/02 (C) SK " ;
 #define RM6_DIRECTED   6               /* sending directed beacons */
 #define RM7_TRACE      7               /* trace initiated */
 
-#ifdef DEBUG
 /*
  * symbolic state names
  */
@@ -91,7 +90,6 @@ static const char * const rmt_events[] = {
        "RM_TIMEOUT_ANNOUNCE","RM_TIMEOUT_T_DIRECT",
        "RM_TIMEOUT_D_MAX","RM_TIMEOUT_POLL","RM_TX_STATE_CHANGE"
 } ;
-#endif
 
 /*
  * Globals
@@ -149,10 +147,10 @@ void rmt(struct s_smc *smc, int event)
        int     state ;
 
        do {
-               DB_RMT("RMT : state %s%s",
-                       (smc->mib.m[MAC0].fddiMACRMTState & AFLAG) ? "ACTIONS " : "",
-                       rmt_states[smc->mib.m[MAC0].fddiMACRMTState & ~AFLAG]) ;
-               DB_RMT(" event %s\n",rmt_events[event],0) ;
+               DB_RMT("RMT : state %s%s event %s",
+                      smc->mib.m[MAC0].fddiMACRMTState & AFLAG ? "ACTIONS " : "",
+                      rmt_states[smc->mib.m[MAC0].fddiMACRMTState & ~AFLAG],
+                      rmt_events[event]);
                state = smc->mib.m[MAC0].fddiMACRMTState ;
                rmt_fsm(smc,event) ;
                event = 0 ;
@@ -191,7 +189,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
                smc->r.loop_avail = FALSE ;
                smc->r.sm_ma_avail = FALSE ;
                smc->r.no_flag = TRUE ;
-               DB_RMTN(1,"RMT : ISOLATED\n",0,0) ;
+               DB_RMTN(1, "RMT : ISOLATED");
                ACTIONS_DONE() ;
                break ;
        case RM0_ISOLATED :
@@ -213,7 +211,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
                stop_rmt_timer1(smc) ;
                stop_rmt_timer2(smc) ;
                sm_ma_control(smc,MA_BEACON) ;
-               DB_RMTN(1,"RMT : RING DOWN\n",0,0) ;
+               DB_RMTN(1, "RMT : RING DOWN");
                RS_SET(smc,RS_NORINGOP) ;
                smc->r.sm_ma_avail = FALSE ;
                rmt_indication(smc,0) ;
@@ -248,7 +246,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
                                else
                        smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ;
                }
-               DB_RMTN(1,"RMT : RING UP\n",0,0) ;
+               DB_RMTN(1, "RMT : RING UP");
                RS_CLEAR(smc,RS_NORINGOP) ;
                RS_SET(smc,RS_RINGOPCHANGE) ;
                rmt_indication(smc,1) ;
@@ -285,7 +283,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
                start_rmt_timer1(smc,smc->s.rmt_t_stuck,RM_TIMEOUT_T_STUCK) ;
                start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ;
                sm_mac_check_beacon_claim(smc) ;
-               DB_RMTN(1,"RMT : RM3_DETECT\n",0,0) ;
+               DB_RMTN(1, "RMT : RM3_DETECT");
                ACTIONS_DONE() ;
                break ;
        case RM3_DETECT :
@@ -327,7 +325,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
                         * trace !
                         */
                        if ((tx =  sm_mac_get_tx_state(smc)) == 4 || tx == 5) {
-                       DB_RMTN(2,"RMT : DETECT && TRT_EXPIRED && T4/T5\n",0,0);
+                       DB_RMTN(2, "RMT : DETECT && TRT_EXPIRED && T4/T5");
                                smc->r.bn_flag = TRUE ;
                                /*
                                 * If one of the upstream stations beaconed
@@ -344,9 +342,8 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
                         * must be cleared in order to get in this condition.
                         */
 
-                       DB_RMTN(2,
-                       "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)\n",
-                       tx,smc->r.bn_flag) ;
+                       DB_RMTN(2, "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)",
+                               tx, smc->r.bn_flag);
                }
                /*RM34a*/
                else if (cmd == RM_MY_CLAIM && smc->r.timer0_exp) {
@@ -378,7 +375,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
                start_rmt_timer1(smc,smc->s.rmt_t_stuck,RM_TIMEOUT_T_STUCK) ;
                start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ;
                sm_mac_check_beacon_claim(smc) ;
-               DB_RMTN(1,"RMT : RM4_NON_OP_DUP\n",0,0) ;
+               DB_RMTN(1, "RMT : RM4_NON_OP_DUP");
                ACTIONS_DONE() ;
                break ;
        case RM4_NON_OP_DUP :
@@ -406,7 +403,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
                         * trace !
                         */
                        if ((tx =  sm_mac_get_tx_state(smc)) == 4 || tx == 5) {
-                       DB_RMTN(2,"RMT : NOPDUP && TRT_EXPIRED && T4/T5\n",0,0);
+                       DB_RMTN(2, "RMT : NOPDUP && TRT_EXPIRED && T4/T5");
                                smc->r.bn_flag = TRUE ;
                                /*
                                 * If one of the upstream stations beaconed
@@ -423,9 +420,8 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
                         * must be cleared in order to get in this condition.
                         */
 
-                       DB_RMTN(2,
-                       "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)\n",
-                       tx,smc->r.bn_flag) ;
+                       DB_RMTN(2, "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)",
+                               tx, smc->r.bn_flag);
                }
                /*RM44c*/
                else if (cmd == RM_TIMEOUT_ANNOUNCE && !smc->r.bn_flag) {
@@ -448,7 +444,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
                stop_rmt_timer0(smc) ;
                stop_rmt_timer1(smc) ;
                stop_rmt_timer2(smc) ;
-               DB_RMTN(1,"RMT : RM5_RING_OP_DUP\n",0,0) ;
+               DB_RMTN(1, "RMT : RM5_RING_OP_DUP");
                ACTIONS_DONE() ;
                break;
        case RM5_RING_OP_DUP :
@@ -472,7 +468,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
                start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ;
                sm_ma_control(smc,MA_DIRECTED) ;
                RS_SET(smc,RS_BEACON) ;
-               DB_RMTN(1,"RMT : RM6_DIRECTED\n",0,0) ;
+               DB_RMTN(1, "RMT : RM6_DIRECTED");
                ACTIONS_DONE() ;
                break ;
        case RM6_DIRECTED :
@@ -515,7 +511,7 @@ static void rmt_fsm(struct s_smc *smc, int cmd)
                stop_rmt_timer2(smc) ;
                smc->e.trace_prop |= ENTITY_BIT(ENTITY_MAC) ;
                queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ;
-               DB_RMTN(1,"RMT : RM7_TRACE\n",0,0) ;
+               DB_RMTN(1, "RMT : RM7_TRACE");
                ACTIONS_DONE() ;
                break ;
        case RM7_TRACE :
index e80a08903fcf413ed27a11c04fc3eb8a8683cba6..ab939ae7e5b5c27417f79d635bb38adcd19faa5e 100644 (file)
@@ -35,7 +35,6 @@ static const char ID_sccs[] = "@(#)smt.c      2.43 98/11/23 (C) SK " ;
 
 #define SMT_TID_MAGIC  0x1f0a7b3c
 
-#ifdef DEBUG
 static const char *const smt_type_name[] = {
        "SMT_00??", "SMT_INFO", "SMT_02??", "SMT_03??",
        "SMT_04??", "SMT_05??", "SMT_06??", "SMT_07??",
@@ -47,7 +46,7 @@ static const char *const smt_class_name[] = {
        "UNKNOWN","NIF","SIF_CONFIG","SIF_OPER","ECF","RAF","RDF",
        "SRF","PMF_GET","PMF_SET","ESF"
 } ;
-#endif
+
 #define LAST_CLASS     (SMT_PMF_SET)
 
 static const struct fddi_addr SMT_Unknown = {
@@ -203,7 +202,7 @@ void smt_agent_task(struct s_smc *smc)
 {
        smt_timer_start(smc,&smc->sm.smt_timer, (u_long)1000000L,
                EV_TOKEN(EVENT_SMT,SM_TIMER)) ;
-       DB_SMT("SMT agent task\n",0,0) ;
+       DB_SMT("SMT agent task");
 }
 
 #ifndef SMT_REAL_TOKEN_CT
@@ -396,7 +395,7 @@ void smt_event(struct s_smc *smc, int event)
         */
        if (smc->sm.smt_tvu &&
            time - smc->sm.smt_tvu > 228*TICKS_PER_SECOND) {
-               DB_SMT("SMT : UNA expired\n",0,0) ;
+               DB_SMT("SMT : UNA expired");
                smc->sm.smt_tvu = 0 ;
 
                if (!is_equal(&smc->mib.m[MAC0].fddiMACUpstreamNbr,
@@ -419,7 +418,7 @@ void smt_event(struct s_smc *smc, int event)
        }
        if (smc->sm.smt_tvd &&
            time - smc->sm.smt_tvd > 228*TICKS_PER_SECOND) {
-               DB_SMT("SMT : DNA expired\n",0,0) ;
+               DB_SMT("SMT : DNA expired");
                smc->sm.smt_tvd = 0 ;
                if (!is_equal(&smc->mib.m[MAC0].fddiMACDownstreamNbr,
                        &SMT_Unknown)){
@@ -504,10 +503,11 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
 #endif
 
        smt_swap_para(sm,(int) mb->sm_len,1) ;
-       DB_SMT("SMT : received packet [%s] at 0x%p\n",
-               smt_type_name[m_fc(mb) & 0xf],sm) ;
-       DB_SMT("SMT : version %d, class %s\n",sm->smt_version,
-               smt_class_name[(sm->smt_class>LAST_CLASS)?0 : sm->smt_class]) ;
+       DB_SMT("SMT : received packet [%s] at 0x%p",
+              smt_type_name[m_fc(mb) & 0xf], sm);
+       DB_SMT("SMT : version %d, class %s",
+              sm->smt_version,
+              smt_class_name[sm->smt_class > LAST_CLASS ? 0 : sm->smt_class]);
 
 #ifdef SBA
        /*
@@ -524,8 +524,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
         * ignore any packet with NSA and A-indicator set
         */
        if ( (fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) {
-               DB_SMT("SMT : ignoring NSA with A-indicator set from %s\n",
-                       addr_to_string(&sm->smt_source),0) ;
+               DB_SMT("SMT : ignoring NSA with A-indicator set from %s",
+                      addr_to_string(&sm->smt_source));
                smt_free_mbuf(smc,mb) ;
                return ;
        }
@@ -556,15 +556,15 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
                break ;
        }
        if (illegal) {
-               DB_SMT("SMT : version = %d, dest = %s\n",
-                       sm->smt_version,addr_to_string(&sm->smt_source)) ;
+               DB_SMT("SMT : version = %d, dest = %s",
+                      sm->smt_version, addr_to_string(&sm->smt_source));
                smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_VERSION,local) ;
                smt_free_mbuf(smc,mb) ;
                return ;
        }
        if ((sm->smt_len > mb->sm_len - sizeof(struct smt_header)) ||
            ((sm->smt_len & 3) && (sm->smt_class != SMT_ECF))) {
-               DB_SMT("SMT: info length error, len = %d\n",sm->smt_len,0) ;
+               DB_SMT("SMT: info length error, len = %d", sm->smt_len);
                smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH,local) ;
                smt_free_mbuf(smc,mb) ;
                return ;
@@ -572,7 +572,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
        switch (sm->smt_class) {
        case SMT_NIF :
                if (smt_check_para(smc,sm,plist_nif)) {
-                       DB_SMT("SMT: NIF with para problem, ignoring\n",0,0) ;
+                       DB_SMT("SMT: NIF with para problem, ignoring");
                        break ;
                }
                switch (sm->smt_type) {
@@ -586,8 +586,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
                                if (!is_equal(
                                        &smc->mib.m[MAC0].fddiMACUpstreamNbr,
                                        &sm->smt_source)) {
-                                       DB_SMT("SMT : updated my UNA = %s\n",
-                                       addr_to_string(&sm->smt_source),0) ;
+                                       DB_SMT("SMT : updated my UNA = %s",
+                                              addr_to_string(&sm->smt_source));
                                        if (!is_equal(&smc->mib.m[MAC0].
                                            fddiMACUpstreamNbr,&SMT_Unknown)){
                                         /* Do not update unknown address */
@@ -616,8 +616,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
                            is_individual(&sm->smt_source) &&
                            ((!(fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) ||
                             (m_fc(mb) != FC_SMT_NSA))) {
-                               DB_SMT("SMT : replying to NIF request %s\n",
-                                       addr_to_string(&sm->smt_source),0) ;
+                               DB_SMT("SMT : replying to NIF request %s",
+                                      addr_to_string(&sm->smt_source));
                                smt_send_nif(smc,&sm->smt_source,
                                        FC_SMT_INFO,
                                        sm->smt_tid,
@@ -625,11 +625,11 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
                        }
                        break ;
                case SMT_REPLY :
-                       DB_SMT("SMT : received NIF response from %s\n",
-                               addr_to_string(&sm->smt_source),0) ;
+                       DB_SMT("SMT : received NIF response from %s",
+                              addr_to_string(&sm->smt_source));
                        if (fs & A_INDICATOR) {
                                smc->sm.pend[SMT_TID_NIF] = 0 ;
-                               DB_SMT("SMT : duplicate address\n",0,0) ;
+                               DB_SMT("SMT : duplicate address");
                                smc->mib.m[MAC0].fddiMACDupAddressTest =
                                        DA_FAILED ;
                                smc->r.dup_addr_test = DA_FAILED ;
@@ -644,7 +644,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
                                if (!is_equal(
                                        &smc->mib.m[MAC0].fddiMACDownstreamNbr,
                                        &sm->smt_source)) {
-                                       DB_SMT("SMT : updated my DNA\n",0,0) ;
+                                       DB_SMT("SMT : updated my DNA");
                                        if (!is_equal(&smc->mib.m[MAC0].
                                         fddiMACDownstreamNbr, &SMT_Unknown)){
                                         /* Do not update unknown address */
@@ -671,11 +671,11 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
                        }
                        else if (sm->smt_tid ==
                                smc->sm.pend[SMT_TID_NIF_TEST]) {
-                               DB_SMT("SMT : NIF test TID ok\n",0,0) ;
+                               DB_SMT("SMT : NIF test TID ok");
                        }
                        else {
-                               DB_SMT("SMT : expected TID %lx, got %lx\n",
-                               smc->sm.pend[SMT_TID_NIF],sm->smt_tid) ;
+                               DB_SMT("SMT : expected TID %lx, got %x",
+                                      smc->sm.pend[SMT_TID_NIF], sm->smt_tid);
                        }
                        break ;
                default :
@@ -686,53 +686,53 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
        case SMT_SIF_CONFIG :   /* station information */
                if (sm->smt_type != SMT_REQUEST)
                        break ;
-               DB_SMT("SMT : replying to SIF Config request from %s\n",
-                       addr_to_string(&sm->smt_source),0) ;
+               DB_SMT("SMT : replying to SIF Config request from %s",
+                      addr_to_string(&sm->smt_source));
                smt_send_sif_config(smc,&sm->smt_source,sm->smt_tid,local) ;
                break ;
        case SMT_SIF_OPER :     /* station information */
                if (sm->smt_type != SMT_REQUEST)
                        break ;
-               DB_SMT("SMT : replying to SIF Operation request from %s\n",
-                       addr_to_string(&sm->smt_source),0) ;
+               DB_SMT("SMT : replying to SIF Operation request from %s",
+                      addr_to_string(&sm->smt_source));
                smt_send_sif_operation(smc,&sm->smt_source,sm->smt_tid,local) ;
                break ;
        case SMT_ECF :          /* echo frame */
                switch (sm->smt_type) {
                case SMT_REPLY :
                        smc->mib.priv.fddiPRIVECF_Reply_Rx++ ;
-                       DB_SMT("SMT: received ECF reply from %s\n",
-                               addr_to_string(&sm->smt_source),0) ;
+                       DB_SMT("SMT: received ECF reply from %s",
+                              addr_to_string(&sm->smt_source));
                        if (sm_to_para(smc,sm,SMT_P_ECHODATA) == NULL) {
-                               DB_SMT("SMT: ECHODATA missing\n",0,0) ;
+                               DB_SMT("SMT: ECHODATA missing");
                                break ;
                        }
                        if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF]) {
-                               DB_SMT("SMT : ECF test TID ok\n",0,0) ;
+                               DB_SMT("SMT : ECF test TID ok");
                        }
                        else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_UNA]) {
-                               DB_SMT("SMT : ECF test UNA ok\n",0,0) ;
+                               DB_SMT("SMT : ECF test UNA ok");
                        }
                        else if (sm->smt_tid == smc->sm.pend[SMT_TID_ECF_DNA]) {
-                               DB_SMT("SMT : ECF test DNA ok\n",0,0) ;
+                               DB_SMT("SMT : ECF test DNA ok");
                        }
                        else {
-                               DB_SMT("SMT : expected TID %lx, got %lx\n",
-                                       smc->sm.pend[SMT_TID_ECF],
-                                       sm->smt_tid) ;
+                               DB_SMT("SMT : expected TID %lx, got %x",
+                                      smc->sm.pend[SMT_TID_ECF],
+                                      sm->smt_tid);
                        }
                        break ;
                case SMT_REQUEST :
                        smc->mib.priv.fddiPRIVECF_Req_Rx++ ;
                        {
                        if (sm->smt_len && !sm_to_para(smc,sm,SMT_P_ECHODATA)) {
-                       DB_SMT("SMT: ECF with para problem,sending RDF\n",0,0) ;
+                               DB_SMT("SMT: ECF with para problem,sending RDF");
                                smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_LENGTH,
                                        local) ;
                                break ;
                        }
-                       DB_SMT("SMT - sending ECF reply to %s\n",
-                               addr_to_string(&sm->smt_source),0) ;
+                       DB_SMT("SMT - sending ECF reply to %s",
+                              addr_to_string(&sm->smt_source));
 
                        /* set destination addr.  & reply */
                        sm->smt_dest = sm->smt_source ;
@@ -750,7 +750,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
 #ifndef        BOOT
        case SMT_RAF :          /* resource allocation */
 #ifdef ESS
-               DB_ESSN(2,"ESS: RAF frame received\n",0,0) ;
+               DB_ESSN(2, "ESS: RAF frame received");
                fs = ess_raf_received_pack(smc,mb,sm,fs) ;
 #endif
 
@@ -764,7 +764,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
                break ;
        case SMT_ESF :          /* extended service - not supported */
                if (sm->smt_type == SMT_REQUEST) {
-                       DB_SMT("SMT - received ESF, sending RDF\n",0,0) ;
+                       DB_SMT("SMT - received ESF, sending RDF");
                        smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ;
                }
                break ;
@@ -782,7 +782,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
                 */
                if ((sm->smt_class == SMT_PMF_SET) &&
                        !is_individual(&sm->smt_dest)) {
-                       DB_SMT("SMT: ignoring PMF-SET with I/G set\n",0,0) ;
+                       DB_SMT("SMT: ignoring PMF-SET with I/G set");
                        break ;
                }
                smt_pmf_received_pack(smc,mb, local) ;
@@ -798,16 +798,15 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
                 * we need to send a RDF frame according to 8.1.3.1.1,
                 * only if it is a REQUEST.
                 */
-               DB_SMT("SMT : class = %d, send RDF to %s\n",
-                       sm->smt_class, addr_to_string(&sm->smt_source)) ;
+               DB_SMT("SMT : class = %d, send RDF to %s",
+                      sm->smt_class, addr_to_string(&sm->smt_source));
 
                smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ;
                break ;
 #endif
        }
        if (illegal) {
-               DB_SMT("SMT: discarding invalid frame, reason = %d\n",
-                       illegal,0) ;
+               DB_SMT("SMT: discarding invalid frame, reason = %d", illegal);
        }
        smt_free_mbuf(smc,mb) ;
 }
@@ -869,8 +868,8 @@ static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason,
        if (sm->smt_type != SMT_REQUEST)
                return ;
 
-       DB_SMT("SMT: sending RDF to %s,reason = 0x%x\n",
-               addr_to_string(&sm->smt_source),reason) ;
+       DB_SMT("SMT: sending RDF to %s,reason = 0x%x",
+              addr_to_string(&sm->smt_source), reason);
 
 
        /*
@@ -1653,7 +1652,7 @@ int smt_check_para(struct s_smc *smc, struct smt_header   *sm,
        const u_short           *p = list ;
        while (*p) {
                if (!sm_to_para(smc,sm,(int) *p)) {
-                       DB_SMT("SMT: smt_check_para - missing para %x\n",*p,0);
+                       DB_SMT("SMT: smt_check_para - missing para %hx", *p);
                        return -1;
                }
                p++ ;
@@ -1679,11 +1678,11 @@ void *sm_to_para(struct s_smc *smc, struct smt_header *sm, int para)
                p += plen ;
                len -= plen ;
                if (len < 0) {
-                       DB_SMT("SMT : sm_to_para - length error %d\n",plen,0) ;
+                       DB_SMT("SMT : sm_to_para - length error %d", plen);
                        return NULL;
                }
                if ((plen & 3) && (para != SMT_P_ECHODATA)) {
-                       DB_SMT("SMT : sm_to_para - odd length %d\n",plen,0) ;
+                       DB_SMT("SMT : sm_to_para - odd length %d", plen);
                        return NULL;
                }
                if (found)
@@ -1937,7 +1936,7 @@ int smt_action(struct s_smc *smc, int class, int code, int index)
 {
        int     event ;
        int     port ;
-       DB_SMT("SMT: action %d code %d\n",class,code) ;
+       DB_SMT("SMT: action %d code %d", class, code);
        switch(class) {
        case SMT_STATION_ACTION :
                switch(code) {
index 9956680402de74bbd5ccf04761d797f09489e992..4e286c1ba9cd2d58f1bc1a42d027bb8c6b7dc1af 100644 (file)
@@ -173,7 +173,6 @@ static struct s_srf_evc *smt_get_evc(struct s_smc *smc, int code, int index)
 #define THRESHOLD_2    (2*TICKS_PER_SECOND)
 #define THRESHOLD_32   (32*TICKS_PER_SECOND)
 
-#ifdef DEBUG
 static const char * const srf_names[] = {
        "None","MACPathChangeEvent",    "MACNeighborChangeEvent",
        "PORTPathChangeEvent",          "PORTUndesiredConnectionAttemptEvent",
@@ -182,7 +181,6 @@ static const char * const srf_names[] = {
        "MACNotCopiedCondition",        "PORTEBErrorCondition",
        "PORTLerCondition"
 } ;
-#endif
 
 void smt_srf_event(struct s_smc *smc, int code, int index, int cond)
 {
@@ -198,10 +196,10 @@ void smt_srf_event(struct s_smc *smc, int code, int index, int cond)
        }
 
        if (code) {
-               DB_SMT("SRF: %s index %d\n",srf_names[code],index) ;
+               DB_SMT("SRF: %s index %d", srf_names[code], index);
 
                if (!(evc = smt_get_evc(smc,code,index))) {
-                       DB_SMT("SRF : smt_get_evc() failed\n",0,0) ;
+                       DB_SMT("SRF : smt_get_evc() failed");
                        return ;
                }
                /*
@@ -217,7 +215,7 @@ void smt_srf_event(struct s_smc *smc, int code, int index, int cond)
                 */
                smt_set_timestamp(smc,smc->mib.fddiSMTTransitionTimeStamp) ;
                if (SMT_IS_CONDITION(code)) {
-                       DB_SMT("SRF: condition is %s\n",cond ? "ON":"OFF",0) ;
+                       DB_SMT("SRF: condition is %s", cond ? "ON" : "OFF");
                        if (cond) {
                                *evc->evc_cond_state = TRUE ;
                                evc->evc_rep_required = TRUE ;
@@ -414,9 +412,9 @@ static void smt_send_srf(struct s_smc *smc)
        smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
        mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
 
-       DB_SMT("SRF: sending SRF at %p, len %d\n",smt,mb->sm_len) ;
-       DB_SMT("SRF: state SR%d Threshold %d\n",
-               smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ;
+       DB_SMT("SRF: sending SRF at %p, len %d", smt, mb->sm_len);
+       DB_SMT("SRF: state SR%d Threshold %lu",
+              smc->srf.sr_state, smc->srf.SRThreshold / TICKS_PER_SECOND);
 #ifdef DEBUG
        dump_smt(smc,smt,"SRF Send") ;
 #endif
index b77e4ecf3cf2f5836bfe35c5686ef188859fd9d0..b75d9cdcfb0c415c7abeaa97cebeacbc917e1abd 100644 (file)
@@ -57,8 +57,7 @@ static void fjes_raise_intr_rxdata_task(struct work_struct *);
 static void fjes_tx_stall_task(struct work_struct *);
 static void fjes_force_close_task(struct work_struct *);
 static irqreturn_t fjes_intr(int, void*);
-static struct rtnl_link_stats64 *
-fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
+static void fjes_get_stats64(struct net_device *, struct rtnl_link_stats64 *);
 static int fjes_change_mtu(struct net_device *, int);
 static int fjes_vlan_rx_add_vid(struct net_device *, __be16 proto, u16);
 static int fjes_vlan_rx_kill_vid(struct net_device *, __be16 proto, u16);
@@ -782,14 +781,12 @@ static void fjes_tx_retry(struct net_device *netdev)
        netif_tx_wake_queue(queue);
 }
 
-static struct rtnl_link_stats64 *
+static void
 fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
 {
        struct fjes_adapter *adapter = netdev_priv(netdev);
 
        memcpy(stats, &adapter->stats64, sizeof(struct rtnl_link_stats64));
-
-       return stats;
 }
 
 static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
@@ -1158,7 +1155,7 @@ static int fjes_poll(struct napi_struct *napi, int budget)
        }
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                if (adapter->unset_rx_last) {
                        adapter->rx_last_jiffies = jiffies;
index 99d3df788ce81e6f423c7458466812607df68ed1..bda0c64134508cf5dd786d3f698bd45884dcce57 100644 (file)
@@ -183,7 +183,6 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
                              sizeof(struct gtp0_header);
        struct gtp0_header *gtp0;
        struct pdp_ctx *pctx;
-       int ret = 0;
 
        if (!pskb_may_pull(skb, hdrlen))
                return -1;
@@ -196,26 +195,19 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
        if (gtp0->type != GTP_TPDU)
                return 1;
 
-       rcu_read_lock();
        pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
        if (!pctx) {
                netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
-               ret = -1;
-               goto out_rcu;
+               return 1;
        }
 
        if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
                netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
-               ret = -1;
-               goto out_rcu;
+               return 1;
        }
-       rcu_read_unlock();
 
        /* Get rid of the GTP + UDP headers. */
        return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
-out_rcu:
-       rcu_read_unlock();
-       return ret;
 }
 
 static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
@@ -225,7 +217,6 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
                              sizeof(struct gtp1_header);
        struct gtp1_header *gtp1;
        struct pdp_ctx *pctx;
-       int ret = 0;
 
        if (!pskb_may_pull(skb, hdrlen))
                return -1;
@@ -253,26 +244,19 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
 
        gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
 
-       rcu_read_lock();
        pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
        if (!pctx) {
                netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
-               ret = -1;
-               goto out_rcu;
+               return 1;
        }
 
        if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
                netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
-               ret = -1;
-               goto out_rcu;
+               return 1;
        }
-       rcu_read_unlock();
 
        /* Get rid of the GTP + UDP headers. */
        return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
-out_rcu:
-       rcu_read_unlock();
-       return ret;
 }
 
 static void gtp_encap_disable(struct gtp_dev *gtp)
index 3958adade7eb681d78d8baf301f9f74c30a40cce..d3e73ac158aee6d3958b18618b85f0180f22abf7 100644 (file)
@@ -34,6 +34,7 @@
 
 #define NDIS_OBJECT_TYPE_RSS_CAPABILITIES 0x88
 #define NDIS_OBJECT_TYPE_RSS_PARAMETERS 0x89
+#define NDIS_OBJECT_TYPE_OFFLOAD       0xa7
 
 #define NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2 2
 #define NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2 2
@@ -118,6 +119,7 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
 
 /* Fwd declaration */
 struct ndis_tcp_ip_checksum_info;
+struct ndis_pkt_8021q_info;
 
 /*
  * Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
@@ -135,8 +137,10 @@ struct hv_netvsc_packet {
        u8 page_buf_cnt;
 
        u16 q_idx;
-       u32 send_buf_index;
+       u16 total_packets;
 
+       u32 total_bytes;
+       u32 send_buf_index;
        u32 total_data_buflen;
 };
 
@@ -155,6 +159,8 @@ enum rndis_device_state {
        RNDIS_DEV_DATAINITIALIZED,
 };
 
+#define NETVSC_HASH_KEYLEN 40
+
 struct rndis_device {
        struct net_device *ndev;
 
@@ -165,14 +171,17 @@ struct rndis_device {
        spinlock_t request_lock;
        struct list_head req_list;
 
-       unsigned char hw_mac_adr[ETH_ALEN];
+       u8 hw_mac_adr[ETH_ALEN];
+       u8 rss_key[NETVSC_HASH_KEYLEN];
+       u16 ind_table[ITAB_NUM];
 };
 
 
 /* Interface */
 struct rndis_message;
 struct netvsc_device;
-int netvsc_device_add(struct hv_device *device, void *additional_info);
+int netvsc_device_add(struct hv_device *device,
+                     const struct netvsc_device_info *info);
 void netvsc_device_remove(struct hv_device *device);
 int netvsc_send(struct hv_device *device,
                struct hv_netvsc_packet *packet,
@@ -181,22 +190,25 @@ int netvsc_send(struct hv_device *device,
                struct sk_buff *skb);
 void netvsc_linkstatus_callback(struct hv_device *device_obj,
                                struct rndis_message *resp);
-int netvsc_recv_callback(struct hv_device *device_obj,
-                       struct hv_netvsc_packet *packet,
-                       void **data,
-                       struct ndis_tcp_ip_checksum_info *csum_info,
-                       struct vmbus_channel *channel,
-                       u16 vlan_tci);
+int netvsc_recv_callback(struct net_device *net,
+                        struct vmbus_channel *channel,
+                        void  *data, u32 len,
+                        const struct ndis_tcp_ip_checksum_info *csum_info,
+                        const struct ndis_pkt_8021q_info *vlan);
 void netvsc_channel_cb(void *context);
 int rndis_filter_open(struct netvsc_device *nvdev);
 int rndis_filter_close(struct netvsc_device *nvdev);
 int rndis_filter_device_add(struct hv_device *dev,
-                       void *additional_info);
-void rndis_filter_device_remove(struct hv_device *dev);
-int rndis_filter_receive(struct hv_device *dev,
-                       struct hv_netvsc_packet *pkt,
-                       void **data,
-                       struct vmbus_channel *channel);
+                           struct netvsc_device_info *info);
+void rndis_filter_device_remove(struct hv_device *dev,
+                               struct netvsc_device *nvdev);
+int rndis_filter_set_rss_param(struct rndis_device *rdev,
+                              const u8 *key, int num_queue);
+int rndis_filter_receive(struct net_device *ndev,
+                        struct netvsc_device *net_dev,
+                        struct hv_device *dev,
+                        struct vmbus_channel *channel,
+                        void *data, u32 buflen);
 
 int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
 int rndis_filter_set_device_mac(struct net_device *ndev, char *mac);
@@ -622,6 +634,7 @@ struct nvsp_message {
 
 #define VRSS_SEND_TAB_SIZE 16
 #define VRSS_CHANNEL_MAX 64
+#define VRSS_CHANNEL_DEFAULT 8
 
 #define RNDIS_MAX_PKT_DEFAULT 8
 #define RNDIS_PKT_ALIGN_DEFAULT 8
@@ -685,8 +698,7 @@ struct net_device_context {
        struct work_struct work;
        u32 msg_enable; /* debug level */
 
-       struct netvsc_stats __percpu *tx_stats;
-       struct netvsc_stats __percpu *rx_stats;
+       u32 tx_checksum_mask;
 
        /* Ethtool settings */
        u8 duplex;
@@ -705,11 +717,21 @@ struct net_device_context {
        u32 vf_serial;
 };
 
+/* Per channel data */
+struct netvsc_channel {
+       struct vmbus_channel *channel;
+       struct multi_send_data msd;
+       struct multi_recv_comp mrc;
+       atomic_t queue_sends;
+
+       struct netvsc_stats tx_stats;
+       struct netvsc_stats rx_stats;
+};
+
 /* Per netvsc device */
 struct netvsc_device {
        u32 nvsp_version;
 
-       atomic_t num_outstanding_sends;
        wait_queue_head_t wait_drain;
        bool destroy;
 
@@ -735,32 +757,25 @@ struct netvsc_device {
 
        struct nvsp_message revoke_packet;
 
-       struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX];
        u32 send_table[VRSS_SEND_TAB_SIZE];
        u32 max_chn;
        u32 num_chn;
        spinlock_t sc_lock; /* Protects num_sc_offered variable */
        u32 num_sc_offered;
-       atomic_t queue_sends[VRSS_CHANNEL_MAX];
 
        /* Holds rndis device info */
        void *extension;
 
        int ring_size;
 
-       /* The primary channel callback buffer */
-       unsigned char *cb_buffer;
-       /* The sub channel callback buffer */
-       unsigned char *sub_cb_buf;
-
-       struct multi_send_data msd[VRSS_CHANNEL_MAX];
        u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
        u32 pkt_align; /* alignment bytes, e.g. 8 */
 
-       struct multi_recv_comp mrc[VRSS_CHANNEL_MAX];
        atomic_t num_outstanding_recvs;
 
        atomic_t open_cnt;
+
+       struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
 };
 
 static inline struct netvsc_device *
@@ -939,7 +954,7 @@ struct ndis_pkt_8021q_info {
        };
 };
 
-struct ndis_oject_header {
+struct ndis_object_header {
        u8 type;
        u8 revision;
        u16 size;
@@ -947,6 +962,9 @@ struct ndis_oject_header {
 
 #define NDIS_OBJECT_TYPE_DEFAULT       0x80
 #define NDIS_OFFLOAD_PARAMETERS_REVISION_3 3
+#define NDIS_OFFLOAD_PARAMETERS_REVISION_2 2
+#define NDIS_OFFLOAD_PARAMETERS_REVISION_1 1
+
 #define NDIS_OFFLOAD_PARAMETERS_NO_CHANGE 0
 #define NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED 1
 #define NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED  2
@@ -973,8 +991,135 @@ struct ndis_oject_header {
 #define OID_TCP_CONNECTION_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020F /* query */
 #define OID_OFFLOAD_ENCAPSULATION 0x0101010A /* set/query */
 
+/*
+ * OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES
+ * ndis_type: NDIS_OBJTYPE_OFFLOAD
+ */
+
+#define        NDIS_OFFLOAD_ENCAP_NONE         0x0000
+#define        NDIS_OFFLOAD_ENCAP_NULL         0x0001
+#define        NDIS_OFFLOAD_ENCAP_8023         0x0002
+#define        NDIS_OFFLOAD_ENCAP_8023PQ       0x0004
+#define        NDIS_OFFLOAD_ENCAP_8023PQ_OOB   0x0008
+#define        NDIS_OFFLOAD_ENCAP_RFC1483      0x0010
+
+struct ndis_csum_offload {
+       u32     ip4_txenc;
+       u32     ip4_txcsum;
+#define        NDIS_TXCSUM_CAP_IP4OPT          0x001
+#define        NDIS_TXCSUM_CAP_TCP4OPT         0x004
+#define        NDIS_TXCSUM_CAP_TCP4            0x010
+#define        NDIS_TXCSUM_CAP_UDP4            0x040
+#define        NDIS_TXCSUM_CAP_IP4             0x100
+
+#define NDIS_TXCSUM_ALL_TCP4   (NDIS_TXCSUM_CAP_TCP4 | NDIS_TXCSUM_CAP_TCP4OPT)
+
+       u32     ip4_rxenc;
+       u32     ip4_rxcsum;
+#define        NDIS_RXCSUM_CAP_IP4OPT          0x001
+#define        NDIS_RXCSUM_CAP_TCP4OPT         0x004
+#define        NDIS_RXCSUM_CAP_TCP4            0x010
+#define        NDIS_RXCSUM_CAP_UDP4            0x040
+#define        NDIS_RXCSUM_CAP_IP4             0x100
+       u32     ip6_txenc;
+       u32     ip6_txcsum;
+#define        NDIS_TXCSUM_CAP_IP6EXT          0x001
+#define        NDIS_TXCSUM_CAP_TCP6OPT         0x004
+#define        NDIS_TXCSUM_CAP_TCP6            0x010
+#define        NDIS_TXCSUM_CAP_UDP6            0x040
+       u32     ip6_rxenc;
+       u32     ip6_rxcsum;
+#define        NDIS_RXCSUM_CAP_IP6EXT          0x001
+#define        NDIS_RXCSUM_CAP_TCP6OPT         0x004
+#define        NDIS_RXCSUM_CAP_TCP6            0x010
+#define        NDIS_RXCSUM_CAP_UDP6            0x040
+
+#define NDIS_TXCSUM_ALL_TCP6   (NDIS_TXCSUM_CAP_TCP6 |         \
+                                NDIS_TXCSUM_CAP_TCP6OPT |      \
+                                NDIS_TXCSUM_CAP_IP6EXT)
+};
+
+struct ndis_lsov1_offload {
+       u32     encap;
+       u32     maxsize;
+       u32     minsegs;
+       u32     opts;
+};
+
+struct ndis_ipsecv1_offload {
+       u32     encap;
+       u32     ah_esp;
+       u32     xport_tun;
+       u32     ip4_opts;
+       u32     flags;
+       u32     ip4_ah;
+       u32     ip4_esp;
+};
+
+struct ndis_lsov2_offload {
+       u32     ip4_encap;
+       u32     ip4_maxsz;
+       u32     ip4_minsg;
+       u32     ip6_encap;
+       u32     ip6_maxsz;
+       u32     ip6_minsg;
+       u32     ip6_opts;
+#define        NDIS_LSOV2_CAP_IP6EXT           0x001
+#define        NDIS_LSOV2_CAP_TCP6OPT          0x004
+
+#define NDIS_LSOV2_CAP_IP6             (NDIS_LSOV2_CAP_IP6EXT | \
+                                        NDIS_LSOV2_CAP_TCP6OPT)
+};
+
+struct ndis_ipsecv2_offload {
+       u32     encap;
+       u16     ip6;
+       u16     ip4opt;
+       u16     ip6ext;
+       u16     ah;
+       u16     esp;
+       u16     ah_esp;
+       u16     xport;
+       u16     tun;
+       u16     xport_tun;
+       u16     lso;
+       u16     extseq;
+       u32     udp_esp;
+       u32     auth;
+       u32     crypto;
+       u32     sa_caps;
+};
+
+struct ndis_rsc_offload {
+       u16     ip4;
+       u16     ip6;
+};
+
+struct ndis_encap_offload {
+       u32     flags;
+       u32     maxhdr;
+};
+
+struct ndis_offload {
+       struct ndis_object_header       header;
+       struct ndis_csum_offload        csum;
+       struct ndis_lsov1_offload       lsov1;
+       struct ndis_ipsecv1_offload     ipsecv1;
+       struct ndis_lsov2_offload       lsov2;
+       u32                             flags;
+       /* NDIS >= 6.1 */
+       struct ndis_ipsecv2_offload     ipsecv2;
+       /* NDIS >= 6.30 */
+       struct ndis_rsc_offload         rsc;
+       struct ndis_encap_offload       encap_gre;
+};
+
+#define        NDIS_OFFLOAD_SIZE               sizeof(struct ndis_offload)
+#define        NDIS_OFFLOAD_SIZE_6_0           offsetof(struct ndis_offload, ipsecv2)
+#define        NDIS_OFFLOAD_SIZE_6_1           offsetof(struct ndis_offload, rsc)
+
 struct ndis_offload_params {
-       struct ndis_oject_header header;
+       struct ndis_object_header header;
        u8 ip_v4_csum;
        u8 tcp_ip_v4_csum;
        u8 udp_ip_v4_csum;
@@ -1301,15 +1446,10 @@ struct rndis_message {
 #define NDIS_PACKET_TYPE_FUNCTIONAL    0x00000400
 #define NDIS_PACKET_TYPE_MAC_FRAME     0x00000800
 
-#define INFO_IPV4       2
-#define INFO_IPV6       4
-#define INFO_TCP        2
-#define INFO_UDP        4
-
 #define TRANSPORT_INFO_NOT_IP   0
-#define TRANSPORT_INFO_IPV4_TCP ((INFO_IPV4 << 16) | INFO_TCP)
-#define TRANSPORT_INFO_IPV4_UDP ((INFO_IPV4 << 16) | INFO_UDP)
-#define TRANSPORT_INFO_IPV6_TCP ((INFO_IPV6 << 16) | INFO_TCP)
-#define TRANSPORT_INFO_IPV6_UDP ((INFO_IPV6 << 16) | INFO_UDP)
+#define TRANSPORT_INFO_IPV4_TCP 0x01
+#define TRANSPORT_INFO_IPV4_UDP 0x02
+#define TRANSPORT_INFO_IPV6_TCP 0x10
+#define TRANSPORT_INFO_IPV6_UDP 0x20
 
 #endif /* _HYPERV_NET_H */
index 86e5749226ef4cf65d6070bca1ab0d4be35bf2e0..fd6ebbefd919344e0c7dab94316488a43bc3e2bc 100644 (file)
@@ -67,14 +67,8 @@ static struct netvsc_device *alloc_net_device(void)
        if (!net_device)
                return NULL;
 
-       net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
-       if (!net_device->cb_buffer) {
-               kfree(net_device);
-               return NULL;
-       }
-
-       net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX *
-                                        sizeof(struct recv_comp_data));
+       net_device->chan_table[0].mrc.buf
+               = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
 
        init_waitqueue_head(&net_device->wait_drain);
        net_device->destroy = false;
@@ -91,35 +85,28 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
        int i;
 
        for (i = 0; i < VRSS_CHANNEL_MAX; i++)
-               vfree(nvdev->mrc[i].buf);
+               vfree(nvdev->chan_table[i].mrc.buf);
 
-       kfree(nvdev->cb_buffer);
        kfree(nvdev);
 }
 
-static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
-{
-       struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
 
-       if (net_device && net_device->destroy)
-               net_device = NULL;
+static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
+                                      u16 q_idx)
+{
+       const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
 
-       return net_device;
+       return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
+               atomic_read(&nvchan->queue_sends) == 0;
 }
 
-static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
+static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
 {
        struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
 
-       if (!net_device)
-               goto get_in_err;
-
-       if (net_device->destroy &&
-           atomic_read(&net_device->num_outstanding_sends) == 0 &&
-           atomic_read(&net_device->num_outstanding_recvs) == 0)
+       if (net_device && net_device->destroy)
                net_device = NULL;
 
-get_in_err:
        return net_device;
 }
 
@@ -584,7 +571,6 @@ void netvsc_device_remove(struct hv_device *device)
        vmbus_close(device->channel);
 
        /* Release all resources */
-       vfree(net_device->sub_cb_buf);
        free_netvsc_device(net_device);
 }
 
@@ -620,29 +606,35 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
        struct net_device *ndev = hv_get_drvdata(device);
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
        struct vmbus_channel *channel = device->channel;
-       int num_outstanding_sends;
        u16 q_idx = 0;
        int queue_sends;
 
        /* Notify the layer above us */
        if (likely(skb)) {
-               struct hv_netvsc_packet *nvsc_packet
+               const struct hv_netvsc_packet *packet
                        = (struct hv_netvsc_packet *)skb->cb;
-               u32 send_index = nvsc_packet->send_buf_index;
+               u32 send_index = packet->send_buf_index;
+               struct netvsc_stats *tx_stats;
 
                if (send_index != NETVSC_INVALID_INDEX)
                        netvsc_free_send_slot(net_device, send_index);
-               q_idx = nvsc_packet->q_idx;
+               q_idx = packet->q_idx;
                channel = incoming_channel;
 
+               tx_stats = &net_device->chan_table[q_idx].tx_stats;
+
+               u64_stats_update_begin(&tx_stats->syncp);
+               tx_stats->packets += packet->total_packets;
+               tx_stats->bytes += packet->total_bytes;
+               u64_stats_update_end(&tx_stats->syncp);
+
                dev_consume_skb_any(skb);
        }
 
-       num_outstanding_sends =
-               atomic_dec_return(&net_device->num_outstanding_sends);
-       queue_sends = atomic_dec_return(&net_device->queue_sends[q_idx]);
+       queue_sends =
+               atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
 
-       if (net_device->destroy && num_outstanding_sends == 0)
+       if (net_device->destroy && queue_sends == 0)
                wake_up(&net_device->wait_drain);
 
        if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
@@ -688,27 +680,15 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
 
 static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
 {
-       unsigned long index;
-       u32 max_words = net_device->map_words;
-       unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
-       u32 section_cnt = net_device->send_section_cnt;
-       int ret_val = NETVSC_INVALID_INDEX;
-       int i;
-       int prev_val;
-
-       for (i = 0; i < max_words; i++) {
-               if (!~(map_addr[i]))
-                       continue;
-               index = ffz(map_addr[i]);
-               prev_val = sync_test_and_set_bit(index, &map_addr[i]);
-               if (prev_val)
-                       continue;
-               if ((index + (i * BITS_PER_LONG)) >= section_cnt)
-                       break;
-               ret_val = (index + (i * BITS_PER_LONG));
-               break;
+       unsigned long *map_addr = net_device->send_section_map;
+       unsigned int i;
+
+       for_each_clear_bit(i, map_addr, net_device->map_words) {
+               if (sync_test_and_set_bit(i, map_addr) == 0)
+                       return i;
        }
-       return ret_val;
+
+       return NETVSC_INVALID_INDEX;
 }
 
 static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
@@ -765,9 +745,11 @@ static inline int netvsc_send_pkt(
        struct sk_buff *skb)
 {
        struct nvsp_message nvmsg;
-       u16 q_idx = packet->q_idx;
-       struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
+       struct netvsc_channel *nvchan
+               = &net_device->chan_table[packet->q_idx];
+       struct vmbus_channel *out_channel = nvchan->channel;
        struct net_device *ndev = hv_get_drvdata(device);
+       struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
        u64 req_id;
        int ret;
        struct hv_page_buffer *pgbuf;
@@ -827,23 +809,14 @@ static inline int netvsc_send_pkt(
        }
 
        if (ret == 0) {
-               atomic_inc(&net_device->num_outstanding_sends);
-               atomic_inc(&net_device->queue_sends[q_idx]);
-
-               if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
-                       netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
+               atomic_inc_return(&nvchan->queue_sends);
 
-                       if (atomic_read(&net_device->
-                               queue_sends[q_idx]) < 1)
-                               netif_tx_wake_queue(netdev_get_tx_queue(
-                                                   ndev, q_idx));
-               }
+               if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
+                       netif_tx_stop_queue(txq);
        } else if (ret == -EAGAIN) {
-               netif_tx_stop_queue(netdev_get_tx_queue(
-                                   ndev, q_idx));
-               if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
-                       netif_tx_wake_queue(netdev_get_tx_queue(
-                                           ndev, q_idx));
+               netif_tx_stop_queue(txq);
+               if (atomic_read(&nvchan->queue_sends) < 1) {
+                       netif_tx_wake_queue(txq);
                        ret = -ENOSPC;
                }
        } else {
@@ -874,8 +847,7 @@ int netvsc_send(struct hv_device *device,
 {
        struct netvsc_device *net_device;
        int ret = 0;
-       struct vmbus_channel *out_channel;
-       u16 q_idx = packet->q_idx;
+       struct netvsc_channel *nvchan;
        u32 pktlen = packet->total_data_buflen, msd_len = 0;
        unsigned int section_index = NETVSC_INVALID_INDEX;
        struct multi_send_data *msdp;
@@ -895,8 +867,7 @@ int netvsc_send(struct hv_device *device,
        if (!net_device->send_section_map)
                return -EAGAIN;
 
-       out_channel = net_device->chn_table[q_idx];
-
+       nvchan = &net_device->chan_table[packet->q_idx];
        packet->send_buf_index = NETVSC_INVALID_INDEX;
        packet->cp_partial = false;
 
@@ -908,9 +879,8 @@ int netvsc_send(struct hv_device *device,
                goto send_now;
        }
 
-       msdp = &net_device->msd[q_idx];
-
        /* batch packets in send buffer if possible */
+       msdp = &nvchan->msd;
        if (msdp->pkt)
                msd_len = msdp->pkt->total_data_buflen;
 
@@ -950,6 +920,11 @@ int netvsc_send(struct hv_device *device,
                        packet->total_data_buflen += msd_len;
                }
 
+               if (msdp->pkt) {
+                       packet->total_packets += msdp->pkt->total_packets;
+                       packet->total_bytes += msdp->pkt->total_bytes;
+               }
+
                if (msdp->skb)
                        dev_consume_skb_any(msdp->skb);
 
@@ -1011,8 +986,9 @@ static int netvsc_send_recv_completion(struct vmbus_channel *channel,
 static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
                                        u32 *filled, u32 *avail)
 {
-       u32 first = nvdev->mrc[q_idx].first;
-       u32 next = nvdev->mrc[q_idx].next;
+       struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
+       u32 first = mrc->first;
+       u32 next = mrc->next;
 
        *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
                  next - first;
@@ -1024,26 +1000,26 @@ static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
 static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
                                                         *nvdev, u16 q_idx)
 {
+       struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
        u32 filled, avail;
 
-       if (!nvdev->mrc[q_idx].buf)
+       if (unlikely(!mrc->buf))
                return NULL;
 
        count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
        if (!filled)
                return NULL;
 
-       return nvdev->mrc[q_idx].buf + nvdev->mrc[q_idx].first *
-              sizeof(struct recv_comp_data);
+       return mrc->buf + mrc->first * sizeof(struct recv_comp_data);
 }
 
 /* Put the first filled slot back to available pool */
 static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
 {
+       struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
        int num_recv;
 
-       nvdev->mrc[q_idx].first = (nvdev->mrc[q_idx].first + 1) %
-                                 NETVSC_RECVSLOT_MAX;
+       mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX;
 
        num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);
 
@@ -1078,13 +1054,14 @@ static void netvsc_chk_recv_comp(struct netvsc_device *nvdev,
 static inline struct recv_comp_data *get_recv_comp_slot(
        struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
 {
+       struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
        u32 filled, avail, next;
        struct recv_comp_data *rcd;
 
-       if (!nvdev->recv_section)
+       if (unlikely(!nvdev->recv_section))
                return NULL;
 
-       if (!nvdev->mrc[q_idx].buf)
+       if (unlikely(!mrc->buf))
                return NULL;
 
        if (atomic_read(&nvdev->num_outstanding_recvs) >
@@ -1095,60 +1072,44 @@ static inline struct recv_comp_data *get_recv_comp_slot(
        if (!avail)
                return NULL;
 
-       next = nvdev->mrc[q_idx].next;
-       rcd = nvdev->mrc[q_idx].buf + next * sizeof(struct recv_comp_data);
-       nvdev->mrc[q_idx].next = (next + 1) % NETVSC_RECVSLOT_MAX;
+       next = mrc->next;
+       rcd = mrc->buf + next * sizeof(struct recv_comp_data);
+       mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX;
 
        atomic_inc(&nvdev->num_outstanding_recvs);
 
        return rcd;
 }
 
-static void netvsc_receive(struct netvsc_device *net_device,
-                       struct vmbus_channel *channel,
-                       struct hv_device *device,
-                       struct vmpacket_descriptor *packet)
+static void netvsc_receive(struct net_device *ndev,
+                  struct netvsc_device *net_device,
+                  struct net_device_context *net_device_ctx,
+                  struct hv_device *device,
+                  struct vmbus_channel *channel,
+                  struct vmtransfer_page_packet_header *vmxferpage_packet,
+                  struct nvsp_message *nvsp)
 {
-       struct vmtransfer_page_packet_header *vmxferpage_packet;
-       struct nvsp_message *nvsp_packet;
-       struct hv_netvsc_packet nv_pkt;
-       struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
+       char *recv_buf = net_device->recv_buf;
        u32 status = NVSP_STAT_SUCCESS;
        int i;
        int count = 0;
-       struct net_device *ndev = hv_get_drvdata(device);
-       void *data;
        int ret;
        struct recv_comp_data *rcd;
        u16 q_idx = channel->offermsg.offer.sub_channel_index;
 
-       /*
-        * All inbound packets other than send completion should be xfer page
-        * packet
-        */
-       if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
-               netdev_err(ndev, "Unknown packet type received - %d\n",
-                          packet->type);
-               return;
-       }
-
-       nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
-                       (packet->offset8 << 3));
-
        /* Make sure this is a valid nvsp packet */
-       if (nvsp_packet->hdr.msg_type !=
-           NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
-               netdev_err(ndev, "Unknown nvsp packet type received-"
-                       " %d\n", nvsp_packet->hdr.msg_type);
+       if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
+               netif_err(net_device_ctx, rx_err, ndev,
+                         "Unknown nvsp packet type received %u\n",
+                         nvsp->hdr.msg_type);
                return;
        }
 
-       vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
-
-       if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
-               netdev_err(ndev, "Invalid xfer page set id - "
-                          "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
-                          vmxferpage_packet->xfer_pageset_id);
+       if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
+               netif_err(net_device_ctx, rx_err, ndev,
+                         "Invalid xfer page set id - expecting %x got %x\n",
+                         NETVSC_RECEIVE_BUFFER_ID,
+                         vmxferpage_packet->xfer_pageset_id);
                return;
        }
 
@@ -1156,18 +1117,16 @@ static void netvsc_receive(struct netvsc_device *net_device,
 
        /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
        for (i = 0; i < count; i++) {
-               /* Initialize the netvsc packet */
-               data = (void *)((unsigned long)net_device->
-                       recv_buf + vmxferpage_packet->ranges[i].byte_offset);
-               netvsc_packet->total_data_buflen =
-                                       vmxferpage_packet->ranges[i].byte_count;
+               void *data = recv_buf
+                       + vmxferpage_packet->ranges[i].byte_offset;
+               u32 buflen = vmxferpage_packet->ranges[i].byte_count;
 
                /* Pass it to the upper layer */
-               status = rndis_filter_receive(device, netvsc_packet, &data,
-                                             channel);
+               status = rndis_filter_receive(ndev, net_device, device,
+                                             channel, data, buflen);
        }
 
-       if (!net_device->mrc[q_idx].buf) {
+       if (!net_device->chan_table[q_idx].mrc.buf) {
                ret = netvsc_send_recv_completion(channel,
                                                  vmxferpage_packet->d.trans_id,
                                                  status);
@@ -1243,11 +1202,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
                                   u64 request_id,
                                   struct vmpacket_descriptor *desc)
 {
-       struct nvsp_message *nvmsg;
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
-
-       nvmsg = (struct nvsp_message *)((unsigned long)
-               desc + (desc->offset8 << 3));
+       struct nvsp_message *nvmsg
+               = (struct nvsp_message *)((unsigned long)desc
+                                         + (desc->offset8 << 3));
 
        switch (desc->type) {
        case VM_PKT_COMP:
@@ -1255,7 +1213,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
                break;
 
        case VM_PKT_DATA_USING_XFER_PAGES:
-               netvsc_receive(net_device, channel, device, desc);
+               netvsc_receive(ndev, net_device, net_device_ctx,
+                              device, channel,
+                              (struct vmtransfer_page_packet_header *)desc,
+                              nvmsg);
                break;
 
        case VM_PKT_DATA_INBAND:
@@ -1271,16 +1232,11 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
 
 void netvsc_channel_cb(void *context)
 {
-       int ret;
-       struct vmbus_channel *channel = (struct vmbus_channel *)context;
+       struct vmbus_channel *channel = context;
        u16 q_idx = channel->offermsg.offer.sub_channel_index;
        struct hv_device *device;
        struct netvsc_device *net_device;
-       u32 bytes_recvd;
-       u64 request_id;
        struct vmpacket_descriptor *desc;
-       unsigned char *buffer;
-       int bufferlen = NETVSC_PACKET_SIZE;
        struct net_device *ndev;
        bool need_to_commit = false;
 
@@ -1289,74 +1245,28 @@ void netvsc_channel_cb(void *context)
        else
                device = channel->device_obj;
 
-       net_device = get_inbound_net_device(device);
-       if (!net_device)
-               return;
        ndev = hv_get_drvdata(device);
-       buffer = get_per_channel_state(channel);
+       if (unlikely(!ndev))
+               return;
+
+       net_device = net_device_to_netvsc_device(ndev);
+       if (unlikely(net_device->destroy) &&
+           netvsc_channel_idle(net_device, q_idx))
+               return;
 
        /* commit_rd_index() -> hv_signal_on_read() needs this. */
        init_cached_read_index(channel);
 
-       do {
-               desc = get_next_pkt_raw(channel);
-               if (desc != NULL) {
-                       netvsc_process_raw_pkt(device,
-                                              channel,
-                                              net_device,
-                                              ndev,
-                                              desc->trans_id,
-                                              desc);
-
-                       put_pkt_raw(channel, desc);
-                       need_to_commit = true;
-                       continue;
-               }
-               if (need_to_commit) {
-                       need_to_commit = false;
-                       commit_rd_index(channel);
-               }
-
-               ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
-                                          &bytes_recvd, &request_id);
-               if (ret == 0) {
-                       if (bytes_recvd > 0) {
-                               desc = (struct vmpacket_descriptor *)buffer;
-                               netvsc_process_raw_pkt(device,
-                                                      channel,
-                                                      net_device,
-                                                      ndev,
-                                                      request_id,
-                                                      desc);
-                       } else {
-                               /*
-                                * We are done for this pass.
-                                */
-                               break;
-                       }
-
-               } else if (ret == -ENOBUFS) {
-                       if (bufferlen > NETVSC_PACKET_SIZE)
-                               kfree(buffer);
-                       /* Handle large packet */
-                       buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
-                       if (buffer == NULL) {
-                               /* Try again next time around */
-                               netdev_err(ndev,
-                                          "unable to allocate buffer of size "
-                                          "(%d)!!\n", bytes_recvd);
-                               break;
-                       }
-
-                       bufferlen = bytes_recvd;
-               }
-
-               init_cached_read_index(channel);
+       while ((desc = get_next_pkt_raw(channel)) != NULL) {
+               netvsc_process_raw_pkt(device, channel, net_device,
+                                      ndev, desc->trans_id, desc);
 
-       } while (1);
+               put_pkt_raw(channel, desc);
+               need_to_commit = true;
+       }
 
-       if (bufferlen > NETVSC_PACKET_SIZE)
-               kfree(buffer);
+       if (need_to_commit)
+               commit_rd_index(channel);
 
        netvsc_chk_recv_comp(net_device, channel, q_idx);
 }
@@ -1365,11 +1275,11 @@ void netvsc_channel_cb(void *context)
  * netvsc_device_add - Callback when the device belonging to this
  * driver is added
  */
-int netvsc_device_add(struct hv_device *device, void *additional_info)
+int netvsc_device_add(struct hv_device *device,
+                     const struct netvsc_device_info *device_info)
 {
        int i, ret = 0;
-       int ring_size =
-       ((struct netvsc_device_info *)additional_info)->ring_size;
+       int ring_size = device_info->ring_size;
        struct netvsc_device *net_device;
        struct net_device *ndev = hv_get_drvdata(device);
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
@@ -1380,8 +1290,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
 
        net_device->ring_size = ring_size;
 
-       set_per_channel_state(device->channel, net_device->cb_buffer);
-
        /* Open the channel */
        ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
                         ring_size * PAGE_SIZE, NULL, 0,
@@ -1400,7 +1308,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
         * opened.
         */
        for (i = 0; i < VRSS_CHANNEL_MAX; i++)
-               net_device->chn_table[i] = device->channel;
+               net_device->chan_table[i].channel = device->channel;
 
        /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
         * populated.
index fcab8019dda08ad430d126f9f044f39291a7a154..2d3cdb026a9959bf611425d7a6ece54f8d2e2abf 100644 (file)
 
 #define RING_SIZE_MIN 64
 #define LINKCHANGE_INT (2 * HZ)
-#define NETVSC_HW_FEATURES     (NETIF_F_RXCSUM | \
-                                NETIF_F_SG | \
-                                NETIF_F_TSO | \
-                                NETIF_F_TSO6 | \
-                                NETIF_F_HW_CSUM)
-
-/* Restrict GSO size to account for NVGRE */
-#define NETVSC_GSO_MAX_SIZE    62768
 
 static int ring_size = 128;
 module_param(ring_size, int, S_IRUGO);
 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
 
-static int max_num_vrss_chns = 8;
-
 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
                                NETIF_MSG_LINK | NETIF_MSG_IFUP |
                                NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
@@ -145,7 +135,7 @@ static int netvsc_close(struct net_device *net)
        while (true) {
                aread = 0;
                for (i = 0; i < nvdev->num_chn; i++) {
-                       chn = nvdev->chn_table[i];
+                       chn = nvdev->chan_table[i].channel;
                        if (!chn)
                                continue;
 
@@ -201,22 +191,41 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
        return ppi;
 }
 
+/*
+ * Select queue for transmit.
+ *
+ * If a valid queue has already been assigned, then use that.
+ * Otherwise compute tx queue based on hash and the send table.
+ *
+ * This is basically similar to default (__netdev_pick_tx) with the added step
+ * of using the host send_table when no other queue has been assigned.
+ *
+ * TODO support XPS - but get_xps_queue not exported
+ */
 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
                        void *accel_priv, select_queue_fallback_t fallback)
 {
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
        struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
-       u32 hash;
-       u16 q_idx = 0;
+       struct sock *sk = skb->sk;
+       int q_idx = sk_tx_queue_get(sk);
 
-       if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
-               return 0;
+       if (q_idx < 0 || skb->ooo_okay ||
+           q_idx >= ndev->real_num_tx_queues) {
+               u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
+               int new_idx;
+
+               new_idx = nvsc_dev->send_table[hash]
+                       % nvsc_dev->num_chn;
+
+               if (q_idx != new_idx && sk &&
+                   sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
+                       sk_tx_queue_set(sk, new_idx);
 
-       hash = skb_get_hash(skb);
-       q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
-               ndev->real_num_tx_queues;
+               q_idx = new_idx;
+       }
 
-       if (!nvsc_dev->chn_table[q_idx])
+       if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
                q_idx = 0;
 
        return q_idx;
@@ -323,33 +332,25 @@ static int netvsc_get_slots(struct sk_buff *skb)
        return slots + frag_slots;
 }
 
-static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
+static u32 net_checksum_info(struct sk_buff *skb)
 {
-       u32 ret_val = TRANSPORT_INFO_NOT_IP;
-
-       if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
-               (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
-               goto not_ip;
-       }
+       if (skb->protocol == htons(ETH_P_IP)) {
+               struct iphdr *ip = ip_hdr(skb);
 
-       *trans_off = skb_transport_offset(skb);
-
-       if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
-               struct iphdr *iphdr = ip_hdr(skb);
-
-               if (iphdr->protocol == IPPROTO_TCP)
-                       ret_val = TRANSPORT_INFO_IPV4_TCP;
-               else if (iphdr->protocol == IPPROTO_UDP)
-                       ret_val = TRANSPORT_INFO_IPV4_UDP;
+               if (ip->protocol == IPPROTO_TCP)
+                       return TRANSPORT_INFO_IPV4_TCP;
+               else if (ip->protocol == IPPROTO_UDP)
+                       return TRANSPORT_INFO_IPV4_UDP;
        } else {
-               if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
-                       ret_val = TRANSPORT_INFO_IPV6_TCP;
+               struct ipv6hdr *ip6 = ipv6_hdr(skb);
+
+               if (ip6->nexthdr == IPPROTO_TCP)
+                       return TRANSPORT_INFO_IPV6_TCP;
                else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
-                       ret_val = TRANSPORT_INFO_IPV6_UDP;
+                       return TRANSPORT_INFO_IPV6_UDP;
        }
 
-not_ip:
-       return ret_val;
+       return TRANSPORT_INFO_NOT_IP;
 }
 
 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
@@ -362,11 +363,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        struct rndis_packet *rndis_pkt;
        u32 rndis_msg_size;
        struct rndis_per_packet_info *ppi;
-       struct ndis_tcp_ip_checksum_info *csum_info;
-       int  hdr_offset;
-       u32 net_trans_info;
        u32 hash;
-       u32 skb_length;
        struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
        struct hv_page_buffer *pb = page_buf;
 
@@ -376,7 +373,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
         * more pages we try linearizing it.
         */
 
-       skb_length = skb->len;
        num_data_pgs = netvsc_get_slots(skb) + 2;
 
        if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
@@ -409,6 +405,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        packet->q_idx = skb_get_queue_mapping(skb);
 
        packet->total_data_buflen = skb->len;
+       packet->total_bytes = skb->len;
+       packet->total_packets = 1;
 
        rndis_msg = (struct rndis_message *)skb->head;
 
@@ -445,13 +443,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
                                VLAN_PRIO_SHIFT;
        }
 
-       net_trans_info = get_net_transport_info(skb, &hdr_offset);
-
-       /*
-        * Setup the sendside checksum offload only if this is not a
-        * GSO packet.
-        */
-       if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) {
+       if (skb_is_gso(skb)) {
                struct ndis_tcp_lso_info *lso_info;
 
                rndis_msg_size += NDIS_LSO_PPI_SIZE;
@@ -462,7 +454,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
                                                        ppi->ppi_offset);
 
                lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
-               if (net_trans_info & (INFO_IPV4 << 16)) {
+               if (skb->protocol == htons(ETH_P_IP)) {
                        lso_info->lso_v2_transmit.ip_version =
                                NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
                        ip_hdr(skb)->tot_len = 0;
@@ -478,10 +470,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
                                ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
                                                 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
                }
-               lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
+               lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
                lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               if (net_trans_info & INFO_TCP) {
+               if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
+                       struct ndis_tcp_ip_checksum_info *csum_info;
+
                        rndis_msg_size += NDIS_CSUM_PPI_SIZE;
                        ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
                                            TCPIP_CHKSUM_PKTINFO);
@@ -489,15 +483,25 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
                        csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
                                                                         ppi->ppi_offset);
 
-                       if (net_trans_info & (INFO_IPV4 << 16))
+                       csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
+
+                       if (skb->protocol == htons(ETH_P_IP)) {
                                csum_info->transmit.is_ipv4 = 1;
-                       else
+
+                               if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+                                       csum_info->transmit.tcp_checksum = 1;
+                               else
+                                       csum_info->transmit.udp_checksum = 1;
+                       } else {
                                csum_info->transmit.is_ipv6 = 1;
 
-                       csum_info->transmit.tcp_checksum = 1;
-                       csum_info->transmit.tcp_header_offset = hdr_offset;
+                               if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+                                       csum_info->transmit.tcp_checksum = 1;
+                               else
+                                       csum_info->transmit.udp_checksum = 1;
+                       }
                } else {
-                       /* UDP checksum (and other) offload is not supported. */
+                       /* Can't do offload of this type of checksum */
                        if (skb_checksum_help(skb))
                                goto drop;
                }
@@ -513,15 +517,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
        skb_tx_timestamp(skb);
        ret = netvsc_send(net_device_ctx->device_ctx, packet,
                          rndis_msg, &pb, skb);
-       if (likely(ret == 0)) {
-               struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
-
-               u64_stats_update_begin(&tx_stats->syncp);
-               tx_stats->packets++;
-               tx_stats->bytes += skb_length;
-               u64_stats_update_end(&tx_stats->syncp);
+       if (likely(ret == 0))
                return NETDEV_TX_OK;
-       }
 
        if (ret == -EAGAIN) {
                ++net_device_ctx->eth_stats.tx_busy;
@@ -541,7 +538,6 @@ no_memory:
        ++net_device_ctx->eth_stats.tx_no_memory;
        goto drop;
 }
-
 /*
  * netvsc_linkstatus_callback - Link up/down notification
  */
@@ -593,13 +589,13 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
 }
 
 static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
-                               struct hv_netvsc_packet *packet,
-                               struct ndis_tcp_ip_checksum_info *csum_info,
-                               void *data, u16 vlan_tci)
+                                            const struct ndis_tcp_ip_checksum_info *csum_info,
+                                            const struct ndis_pkt_8021q_info *vlan,
+                                            void *data, u32 buflen)
 {
        struct sk_buff *skb;
 
-       skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
+       skb = netdev_alloc_skb_ip_align(net, buflen);
        if (!skb)
                return skb;
 
@@ -607,8 +603,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
         * Copy to skb. This copy is needed here since the memory pointed by
         * hv_netvsc_packet cannot be deallocated
         */
-       memcpy(skb_put(skb, packet->total_data_buflen), data,
-              packet->total_data_buflen);
+       memcpy(skb_put(skb, buflen), data, buflen);
 
        skb->protocol = eth_type_trans(skb, net);
 
@@ -625,9 +620,12 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
        }
 
-       if (vlan_tci & VLAN_TAG_PRESENT)
+       if (vlan) {
+               u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
+
                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                       vlan_tci);
+       }
 
        return skb;
 }
@@ -636,18 +634,19 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
  * netvsc_recv_callback -  Callback when we receive a packet from the
  * "wire" on the specified device.
  */
-int netvsc_recv_callback(struct hv_device *device_obj,
-                               struct hv_netvsc_packet *packet,
-                               void **data,
-                               struct ndis_tcp_ip_checksum_info *csum_info,
-                               struct vmbus_channel *channel,
-                               u16 vlan_tci)
+int netvsc_recv_callback(struct net_device *net,
+                        struct vmbus_channel *channel,
+                        void  *data, u32 len,
+                        const struct ndis_tcp_ip_checksum_info *csum_info,
+                        const struct ndis_pkt_8021q_info *vlan)
 {
-       struct net_device *net = hv_get_drvdata(device_obj);
        struct net_device_context *net_device_ctx = netdev_priv(net);
+       struct netvsc_device *net_device = net_device_ctx->nvdev;
        struct net_device *vf_netdev;
        struct sk_buff *skb;
        struct netvsc_stats *rx_stats;
+       u16 q_idx = channel->offermsg.offer.sub_channel_index;
+
 
        if (net->reg_state != NETREG_REGISTERED)
                return NVSP_STAT_FAIL;
@@ -665,7 +664,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
                net = vf_netdev;
 
        /* Allocate a skb - TODO direct I/O to pages? */
-       skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
+       skb = netvsc_alloc_recv_skb(net, csum_info, vlan, data, len);
        if (unlikely(!skb)) {
                ++net->stats.rx_dropped;
                rcu_read_unlock();
@@ -673,18 +672,17 @@ int netvsc_recv_callback(struct hv_device *device_obj,
        }
 
        if (net != vf_netdev)
-               skb_record_rx_queue(skb,
-                                   channel->offermsg.offer.sub_channel_index);
+               skb_record_rx_queue(skb, q_idx);
 
        /*
         * Even if injecting the packet, record the statistics
         * on the synthetic device because modifying the VF device
         * statistics will not work correctly.
         */
-       rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
+       rx_stats = &net_device->chan_table[q_idx].rx_stats;
        u64_stats_update_begin(&rx_stats->syncp);
        rx_stats->packets++;
-       rx_stats->bytes += packet->total_data_buflen;
+       rx_stats->bytes += len;
 
        if (skb->pkt_type == PACKET_BROADCAST)
                ++rx_stats->broadcast;
@@ -697,7 +695,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
         * is done.
         * TODO - use NAPI?
         */
-       netif_rx(skb);
+       netif_receive_skb(skb);
        rcu_read_unlock();
 
        return 0;
@@ -722,102 +720,76 @@ static void netvsc_get_channels(struct net_device *net,
        }
 }
 
+static int netvsc_set_queues(struct net_device *net, struct hv_device *dev,
+                            u32 num_chn)
+{
+       struct netvsc_device_info device_info;
+       int ret;
+
+       memset(&device_info, 0, sizeof(device_info));
+       device_info.num_chn = num_chn;
+       device_info.ring_size = ring_size;
+       device_info.max_num_vrss_chns = num_chn;
+
+       ret = rndis_filter_device_add(dev, &device_info);
+       if (ret)
+               return ret;
+
+       ret = netif_set_real_num_tx_queues(net, num_chn);
+       if (ret)
+               return ret;
+
+       ret = netif_set_real_num_rx_queues(net, num_chn);
+
+       return ret;
+}
+
 static int netvsc_set_channels(struct net_device *net,
                               struct ethtool_channels *channels)
 {
        struct net_device_context *net_device_ctx = netdev_priv(net);
        struct hv_device *dev = net_device_ctx->device_ctx;
        struct netvsc_device *nvdev = net_device_ctx->nvdev;
-       struct netvsc_device_info device_info;
-       u32 num_chn;
-       u32 max_chn;
-       int ret = 0;
-       bool recovering = false;
+       unsigned int count = channels->combined_count;
+       int ret;
+
+       /* We do not support separate count for rx, tx, or other */
+       if (count == 0 ||
+           channels->rx_count || channels->tx_count || channels->other_count)
+               return -EINVAL;
+
+       if (count > net->num_tx_queues || count > net->num_rx_queues)
+               return -EINVAL;
 
        if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
                return -ENODEV;
 
-       num_chn = nvdev->num_chn;
-       max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
-
-       if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
-               pr_info("vRSS unsupported before NVSP Version 5\n");
+       if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
                return -EINVAL;
-       }
 
-       /* We do not support rx, tx, or other */
-       if (!channels ||
-           channels->rx_count ||
-           channels->tx_count ||
-           channels->other_count ||
-           (channels->combined_count < 1))
+       if (count > nvdev->max_chn)
                return -EINVAL;
 
-       if (channels->combined_count > max_chn) {
-               pr_info("combined channels too high, using %d\n", max_chn);
-               channels->combined_count = max_chn;
-       }
-
        ret = netvsc_close(net);
        if (ret)
-               goto out;
+               return ret;
 
- do_set:
        net_device_ctx->start_remove = true;
-       rndis_filter_device_remove(dev);
-
-       nvdev->num_chn = channels->combined_count;
-
-       memset(&device_info, 0, sizeof(device_info));
-       device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
-       device_info.ring_size = ring_size;
-       device_info.max_num_vrss_chns = max_num_vrss_chns;
+       rndis_filter_device_remove(dev, nvdev);
 
-       ret = rndis_filter_device_add(dev, &device_info);
-       if (ret) {
-               if (recovering) {
-                       netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
-                       return ret;
-               }
-               goto recover;
-       }
-
-       nvdev = net_device_ctx->nvdev;
-
-       ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
-       if (ret) {
-               if (recovering) {
-                       netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
-                       return ret;
-               }
-               goto recover;
-       }
-
-       ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
-       if (ret) {
-               if (recovering) {
-                       netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
-                       return ret;
-               }
-               goto recover;
-       }
+       ret = netvsc_set_queues(net, dev, count);
+       if (ret == 0)
+               nvdev->num_chn = count;
+       else
+               netvsc_set_queues(net, dev, nvdev->num_chn);
 
- out:
        netvsc_open(net);
        net_device_ctx->start_remove = false;
+
        /* We may have missed link change notifications */
        schedule_delayed_work(&net_device_ctx->dwork, 0);
 
        return ret;
-
- recover:
-       /* If the above failed, we attempt to recover through the same
-        * process but with the original number of channels.
-        */
-       netdev_err(net, "could not set channels, recovering\n");
-       recovering = true;
-       channels->combined_count = num_chn;
-       goto do_set;
 }
 
 static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
@@ -878,8 +850,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
        struct netvsc_device *nvdev = ndevctx->nvdev;
        struct hv_device *hdev = ndevctx->device_ctx;
        struct netvsc_device_info device_info;
-       u32 num_chn;
-       int ret = 0;
+       int ret;
 
        if (ndevctx->start_remove || !nvdev || nvdev->destroy)
                return -ENODEV;
@@ -888,17 +859,15 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
        if (ret)
                goto out;
 
-       num_chn = nvdev->num_chn;
-
        ndevctx->start_remove = true;
-       rndis_filter_device_remove(hdev);
+       rndis_filter_device_remove(hdev, nvdev);
 
        ndev->mtu = mtu;
 
        memset(&device_info, 0, sizeof(device_info));
        device_info.ring_size = ring_size;
-       device_info.num_chn = num_chn;
-       device_info.max_num_vrss_chns = max_num_vrss_chns;
+       device_info.num_chn = nvdev->num_chn;
+       device_info.max_num_vrss_chns = nvdev->num_chn;
        rndis_filter_device_add(hdev, &device_info);
 
 out:
@@ -911,47 +880,50 @@ out:
        return ret;
 }
 
-static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
-                                                   struct rtnl_link_stats64 *t)
+static void netvsc_get_stats64(struct net_device *net,
+                              struct rtnl_link_stats64 *t)
 {
        struct net_device_context *ndev_ctx = netdev_priv(net);
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
-                                                           cpu);
-               struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
-                                                           cpu);
-               u64 tx_packets, tx_bytes, rx_packets, rx_bytes, rx_multicast;
+       struct netvsc_device *nvdev = ndev_ctx->nvdev;
+       int i;
+
+       if (!nvdev)
+               return;
+
+       for (i = 0; i < nvdev->num_chn; i++) {
+               const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
+               const struct netvsc_stats *stats;
+               u64 packets, bytes, multicast;
                unsigned int start;
 
+               stats = &nvchan->tx_stats;
                do {
-                       start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
-                       tx_packets = tx_stats->packets;
-                       tx_bytes = tx_stats->bytes;
-               } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+                       start = u64_stats_fetch_begin_irq(&stats->syncp);
+                       packets = stats->packets;
+                       bytes = stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
 
+               t->tx_bytes     += bytes;
+               t->tx_packets   += packets;
+
+               stats = &nvchan->rx_stats;
                do {
-                       start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
-                       rx_packets = rx_stats->packets;
-                       rx_bytes = rx_stats->bytes;
-                       rx_multicast = rx_stats->multicast + rx_stats->broadcast;
-               } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
-
-               t->tx_bytes     += tx_bytes;
-               t->tx_packets   += tx_packets;
-               t->rx_bytes     += rx_bytes;
-               t->rx_packets   += rx_packets;
-               t->multicast    += rx_multicast;
+                       start = u64_stats_fetch_begin_irq(&stats->syncp);
+                       packets = stats->packets;
+                       bytes = stats->bytes;
+                       multicast = stats->multicast + stats->broadcast;
+               } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+               t->rx_bytes     += bytes;
+               t->rx_packets   += packets;
+               t->multicast    += multicast;
        }
 
        t->tx_dropped   = net->stats.tx_dropped;
-       t->tx_errors    = net->stats.tx_dropped;
+       t->tx_errors    = net->stats.tx_errors;
 
        t->rx_dropped   = net->stats.rx_dropped;
        t->rx_errors    = net->stats.rx_errors;
-
-       return t;
 }
 
 static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
@@ -989,11 +961,19 @@ static const struct {
        { "tx_busy",      offsetof(struct netvsc_ethtool_stats, tx_busy) },
 };
 
+#define NETVSC_GLOBAL_STATS_LEN        ARRAY_SIZE(netvsc_stats)
+
+/* 4 statistics per queue (rx/tx packets/bytes) */
+#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
+
 static int netvsc_get_sset_count(struct net_device *dev, int string_set)
 {
+       struct net_device_context *ndc = netdev_priv(dev);
+       struct netvsc_device *nvdev = ndc->nvdev;
+
        switch (string_set) {
        case ETH_SS_STATS:
-               return ARRAY_SIZE(netvsc_stats);
+               return NETVSC_GLOBAL_STATS_LEN + NETVSC_QUEUE_STATS_LEN(nvdev);
        default:
                return -EINVAL;
        }
@@ -1003,24 +983,107 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
                                     struct ethtool_stats *stats, u64 *data)
 {
        struct net_device_context *ndc = netdev_priv(dev);
+       struct netvsc_device *nvdev = ndc->nvdev;
        const void *nds = &ndc->eth_stats;
-       int i;
+       const struct netvsc_stats *qstats;
+       unsigned int start;
+       u64 packets, bytes;
+       int i, j;
 
-       for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
+       for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
                data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
+
+       for (j = 0; j < nvdev->num_chn; j++) {
+               qstats = &nvdev->chan_table[j].tx_stats;
+
+               do {
+                       start = u64_stats_fetch_begin_irq(&qstats->syncp);
+                       packets = qstats->packets;
+                       bytes = qstats->bytes;
+               } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+               data[i++] = packets;
+               data[i++] = bytes;
+
+               qstats = &nvdev->chan_table[j].rx_stats;
+               do {
+                       start = u64_stats_fetch_begin_irq(&qstats->syncp);
+                       packets = qstats->packets;
+                       bytes = qstats->bytes;
+               } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+               data[i++] = packets;
+               data[i++] = bytes;
+       }
 }
 
 static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
 {
+       struct net_device_context *ndc = netdev_priv(dev);
+       struct netvsc_device *nvdev = ndc->nvdev;
+       u8 *p = data;
        int i;
 
        switch (stringset) {
        case ETH_SS_STATS:
                for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
-                       memcpy(data + i * ETH_GSTRING_LEN,
+                       memcpy(p + i * ETH_GSTRING_LEN,
                               netvsc_stats[i].name, ETH_GSTRING_LEN);
+
+               p += i * ETH_GSTRING_LEN;
+               for (i = 0; i < nvdev->num_chn; i++) {
+                       sprintf(p, "tx_queue_%u_packets", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "tx_queue_%u_bytes", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "rx_queue_%u_packets", i);
+                       p += ETH_GSTRING_LEN;
+                       sprintf(p, "rx_queue_%u_bytes", i);
+                       p += ETH_GSTRING_LEN;
+               }
+
+               break;
+       }
+}
+
+static int
+netvsc_get_rss_hash_opts(struct netvsc_device *nvdev,
+                        struct ethtool_rxnfc *info)
+{
+       info->data = RXH_IP_SRC | RXH_IP_DST;
+
+       switch (info->flow_type) {
+       case TCP_V4_FLOW:
+       case TCP_V6_FLOW:
+               info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+               /* fallthrough */
+       case UDP_V4_FLOW:
+       case UDP_V6_FLOW:
+       case IPV4_FLOW:
+       case IPV6_FLOW:
                break;
+       default:
+               info->data = 0;
+               break;
+       }
+
+       return 0;
+}
+
+static int
+netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+                u32 *rules)
+{
+       struct net_device_context *ndc = netdev_priv(dev);
+       struct netvsc_device *nvdev = ndc->nvdev;
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXRINGS:
+               info->data = nvdev->num_chn;
+               return 0;
+
+       case ETHTOOL_GRXFH:
+               return netvsc_get_rss_hash_opts(nvdev, info);
        }
+       return -EOPNOTSUPP;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1032,6 +1095,68 @@ static void netvsc_poll_controller(struct net_device *net)
 }
 #endif
 
+static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
+{
+       return NETVSC_HASH_KEYLEN;
+}
+
+static u32 netvsc_rss_indir_size(struct net_device *dev)
+{
+       return ITAB_NUM;
+}
+
+static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+                          u8 *hfunc)
+{
+       struct net_device_context *ndc = netdev_priv(dev);
+       struct netvsc_device *ndev = ndc->nvdev;
+       struct rndis_device *rndis_dev = ndev->extension;
+       int i;
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;      /* Toeplitz */
+
+       if (indir) {
+               for (i = 0; i < ITAB_NUM; i++)
+                       indir[i] = rndis_dev->ind_table[i];
+       }
+
+       if (key)
+               memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
+
+       return 0;
+}
+
+static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
+                          const u8 *key, const u8 hfunc)
+{
+       struct net_device_context *ndc = netdev_priv(dev);
+       struct netvsc_device *ndev = ndc->nvdev;
+       struct rndis_device *rndis_dev = ndev->extension;
+       int i;
+
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+               return -EOPNOTSUPP;
+
+       if (indir) {
+               for (i = 0; i < ITAB_NUM; i++)
+                       if (indir[i] >= dev->num_rx_queues)
+                               return -EINVAL;
+
+               for (i = 0; i < ITAB_NUM; i++)
+                       rndis_dev->ind_table[i] = indir[i];
+       }
+
+       if (!key) {
+               if (!indir)
+                       return 0;
+
+               key = rndis_dev->rss_key;
+       }
+
+       return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn);
+}
+
 static const struct ethtool_ops ethtool_ops = {
        .get_drvinfo    = netvsc_get_drvinfo,
        .get_link       = ethtool_op_get_link,
@@ -1043,6 +1168,11 @@ static const struct ethtool_ops ethtool_ops = {
        .get_ts_info    = ethtool_op_get_ts_info,
        .get_settings   = netvsc_get_settings,
        .set_settings   = netvsc_set_settings,
+       .get_rxnfc      = netvsc_get_rxnfc,
+       .get_rxfh_key_size = netvsc_get_rxfh_key_size,
+       .get_rxfh_indir_size = netvsc_rss_indir_size,
+       .get_rxfh       = netvsc_get_rxfh,
+       .set_rxfh       = netvsc_set_rxfh,
 };
 
 static const struct net_device_ops device_ops = {
@@ -1163,15 +1293,6 @@ out_unlock:
        rtnl_unlock();
 }
 
-static void netvsc_free_netdev(struct net_device *netdev)
-{
-       struct net_device_context *net_device_ctx = netdev_priv(netdev);
-
-       free_percpu(net_device_ctx->tx_stats);
-       free_percpu(net_device_ctx->rx_stats);
-       free_netdev(netdev);
-}
-
 static struct net_device *get_netvsc_bymac(const u8 *mac)
 {
        struct net_device *dev;
@@ -1308,7 +1429,6 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
 static int netvsc_unregister_vf(struct net_device *vf_netdev)
 {
        struct net_device *ndev;
-       struct netvsc_device *netvsc_dev;
        struct net_device_context *net_device_ctx;
 
        ndev = get_netvsc_byref(vf_netdev);
@@ -1316,7 +1436,6 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
                return NOTIFY_DONE;
 
        net_device_ctx = netdev_priv(ndev);
-       netvsc_dev = net_device_ctx->nvdev;
 
        netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
 
@@ -1336,7 +1455,7 @@ static int netvsc_probe(struct hv_device *dev,
        int ret;
 
        net = alloc_etherdev_mq(sizeof(struct net_device_context),
-                               num_online_cpus());
+                               VRSS_CHANNEL_MAX);
        if (!net)
                return -ENOMEM;
 
@@ -1351,18 +1470,6 @@ static int netvsc_probe(struct hv_device *dev,
                netdev_dbg(net, "netvsc msg_enable: %d\n",
                           net_device_ctx->msg_enable);
 
-       net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
-       if (!net_device_ctx->tx_stats) {
-               free_netdev(net);
-               return -ENOMEM;
-       }
-       net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
-       if (!net_device_ctx->rx_stats) {
-               free_percpu(net_device_ctx->tx_stats);
-               free_netdev(net);
-               return -ENOMEM;
-       }
-
        hv_set_drvdata(dev, net);
 
        net_device_ctx->start_remove = false;
@@ -1374,10 +1481,6 @@ static int netvsc_probe(struct hv_device *dev,
        INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
 
        net->netdev_ops = &device_ops;
-
-       net->hw_features = NETVSC_HW_FEATURES;
-       net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
-
        net->ethtool_ops = &ethtool_ops;
        SET_NETDEV_DEV(net, &dev->device);
 
@@ -1387,20 +1490,26 @@ static int netvsc_probe(struct hv_device *dev,
        /* Notify the netvsc driver of the new device */
        memset(&device_info, 0, sizeof(device_info));
        device_info.ring_size = ring_size;
-       device_info.max_num_vrss_chns = max_num_vrss_chns;
+       device_info.max_num_vrss_chns = min_t(u32, VRSS_CHANNEL_DEFAULT,
+                                             num_online_cpus());
        ret = rndis_filter_device_add(dev, &device_info);
        if (ret != 0) {
                netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
-               netvsc_free_netdev(net);
+               free_netdev(net);
                hv_set_drvdata(dev, NULL);
                return ret;
        }
        memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
 
+       /* hw_features computed in rndis_filter_device_add */
+       net->features = net->hw_features |
+               NETIF_F_HIGHDMA | NETIF_F_SG |
+               NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+       net->vlan_features = net->features;
+
        nvdev = net_device_ctx->nvdev;
        netif_set_real_num_tx_queues(net, nvdev->num_chn);
        netif_set_real_num_rx_queues(net, nvdev->num_chn);
-       netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
 
        /* MTU range: 68 - 1500 or 65521 */
        net->min_mtu = NETVSC_MTU_MIN;
@@ -1412,8 +1521,8 @@ static int netvsc_probe(struct hv_device *dev,
        ret = register_netdev(net);
        if (ret != 0) {
                pr_err("Unable to register netdev.\n");
-               rndis_filter_device_remove(dev);
-               netvsc_free_netdev(net);
+               rndis_filter_device_remove(dev, nvdev);
+               free_netdev(net);
        }
 
        return ret;
@@ -1423,7 +1532,6 @@ static int netvsc_remove(struct hv_device *dev)
 {
        struct net_device *net;
        struct net_device_context *ndev_ctx;
-       struct netvsc_device *net_device;
 
        net = hv_get_drvdata(dev);
 
@@ -1433,7 +1541,6 @@ static int netvsc_remove(struct hv_device *dev)
        }
 
        ndev_ctx = netdev_priv(net);
-       net_device = ndev_ctx->nvdev;
 
        /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
         * removing the device.
@@ -1454,11 +1561,11 @@ static int netvsc_remove(struct hv_device *dev)
         * Call to the vsc driver to let it know that the device is being
         * removed
         */
-       rndis_filter_device_remove(dev);
+       rndis_filter_device_remove(dev, ndev_ctx->nvdev);
 
        hv_set_drvdata(dev, NULL);
 
-       netvsc_free_netdev(net);
+       free_netdev(net);
        return 0;
 }
 
@@ -1498,7 +1605,7 @@ static int netvsc_netdev_event(struct notifier_block *this,
                return NOTIFY_DONE;
 
        /* Avoid Vlan dev with same MAC registering as VF */
-       if (event_dev->priv_flags & IFF_802_1Q_VLAN)
+       if (is_vlan_dev(event_dev))
                return NOTIFY_DONE;
 
        /* Avoid Bonding master dev with same MAC registering as VF */
index 8d90904e0e49f4333bac23d7128af1255f8e8a54..19356f56b7b144f40184c5766a8f9be33b4a3079 100644 (file)
@@ -57,6 +57,14 @@ struct rndis_request {
        u8 request_ext[RNDIS_EXT_LEN];
 };
 
+static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
+       0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+       0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+       0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+       0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+       0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
+};
+
 static struct rndis_device *get_rndis_device(void)
 {
        struct rndis_device *device;
@@ -124,7 +132,7 @@ static void put_rndis_request(struct rndis_device *dev,
 }
 
 static void dump_rndis_message(struct hv_device *hv_dev,
-                       struct rndis_message *rndis_msg)
+                              const struct rndis_message *rndis_msg)
 {
        struct net_device *netdev = hv_get_drvdata(hv_dev);
 
@@ -339,102 +347,78 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
        return NULL;
 }
 
-static int rndis_filter_receive_data(struct rndis_device *dev,
-                                  struct rndis_message *msg,
-                                  struct hv_netvsc_packet *pkt,
-                                  void **data,
-                                  struct vmbus_channel *channel)
+static int rndis_filter_receive_data(struct net_device *ndev,
+                                    struct rndis_device *dev,
+                                    struct rndis_message *msg,
+                                    struct vmbus_channel *channel,
+                                    void *data, u32 data_buflen)
 {
-       struct rndis_packet *rndis_pkt;
+       struct rndis_packet *rndis_pkt = &msg->msg.pkt;
+       const struct ndis_tcp_ip_checksum_info *csum_info;
+       const struct ndis_pkt_8021q_info *vlan;
        u32 data_offset;
-       struct ndis_pkt_8021q_info *vlan;
-       struct ndis_tcp_ip_checksum_info *csum_info;
-       u16 vlan_tci = 0;
-       struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
-
-       rndis_pkt = &msg->msg.pkt;
 
        /* Remove the rndis header and pass it back up the stack */
        data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
 
-       pkt->total_data_buflen -= data_offset;
+       data_buflen -= data_offset;
 
        /*
         * Make sure we got a valid RNDIS message, now total_data_buflen
         * should be the data packet size plus the trailer padding size
         */
-       if (pkt->total_data_buflen < rndis_pkt->data_len) {
+       if (unlikely(data_buflen < rndis_pkt->data_len)) {
                netdev_err(dev->ndev, "rndis message buffer "
                           "overflow detected (got %u, min %u)"
                           "...dropping this message!\n",
-                          pkt->total_data_buflen, rndis_pkt->data_len);
+                          data_buflen, rndis_pkt->data_len);
                return NVSP_STAT_FAIL;
        }
 
+       vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
+
        /*
         * Remove the rndis trailer padding from rndis packet message
         * rndis_pkt->data_len tell us the real data length, we only copy
         * the data packet to the stack, without the rndis trailer padding
         */
-       pkt->total_data_buflen = rndis_pkt->data_len;
-       *data = (void *)((unsigned long)(*data) + data_offset);
-
-       vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
-       if (vlan) {
-               vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid |
-                       (vlan->pri << VLAN_PRIO_SHIFT);
-       }
-
+       data = (void *)((unsigned long)data + data_offset);
        csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
-       return netvsc_recv_callback(net_device_ctx->device_ctx, pkt, data,
-                                   csum_info, channel, vlan_tci);
+       return netvsc_recv_callback(ndev, channel,
+                                   data, rndis_pkt->data_len,
+                                   csum_info, vlan);
 }
 
-int rndis_filter_receive(struct hv_device *dev,
-                               struct hv_netvsc_packet *pkt,
-                               void **data,
-                               struct vmbus_channel *channel)
+int rndis_filter_receive(struct net_device *ndev,
+                        struct netvsc_device *net_dev,
+                        struct hv_device *dev,
+                        struct vmbus_channel *channel,
+                        void *data, u32 buflen)
 {
-       struct net_device *ndev = hv_get_drvdata(dev);
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
-       struct netvsc_device *net_dev = net_device_ctx->nvdev;
-       struct rndis_device *rndis_dev;
-       struct rndis_message *rndis_msg;
-       int ret = 0;
-
-       if (!net_dev) {
-               ret = NVSP_STAT_FAIL;
-               goto exit;
-       }
+       struct rndis_device *rndis_dev = net_dev->extension;
+       struct rndis_message *rndis_msg = data;
 
        /* Make sure the rndis device state is initialized */
-       if (!net_dev->extension) {
-               netdev_err(ndev, "got rndis message but no rndis device - "
-                         "dropping this message!\n");
-               ret = NVSP_STAT_FAIL;
-               goto exit;
+       if (unlikely(!rndis_dev)) {
+               netif_err(net_device_ctx, rx_err, ndev,
+                         "got rndis message but no rndis device!\n");
+               return NVSP_STAT_FAIL;
        }
 
-       rndis_dev = (struct rndis_device *)net_dev->extension;
-       if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) {
-               netdev_err(ndev, "got rndis message but rndis device "
-                          "uninitialized...dropping this message!\n");
-               ret = NVSP_STAT_FAIL;
-               goto exit;
+       if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) {
+               netif_err(net_device_ctx, rx_err, ndev,
+                         "got rndis message uninitialized\n");
+               return NVSP_STAT_FAIL;
        }
 
-       rndis_msg = *data;
-
-       if (netif_msg_rx_err(net_device_ctx))
+       if (netif_msg_rx_status(net_device_ctx))
                dump_rndis_message(dev, rndis_msg);
 
        switch (rndis_msg->ndis_msg_type) {
        case RNDIS_MSG_PACKET:
-               /* data msg */
-               ret = rndis_filter_receive_data(rndis_dev, rndis_msg, pkt,
-                                               data, channel);
-               break;
-
+               return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg,
+                                                channel, data, buflen);
        case RNDIS_MSG_INIT_C:
        case RNDIS_MSG_QUERY_C:
        case RNDIS_MSG_SET_C:
@@ -454,8 +438,7 @@ int rndis_filter_receive(struct hv_device *dev,
                break;
        }
 
-exit:
-       return ret;
+       return 0;
 }
 
 static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
@@ -485,7 +468,35 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
        query->info_buflen = 0;
        query->dev_vc_handle = 0;
 
-       if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
+       if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
+               struct net_device_context *ndevctx = netdev_priv(dev->ndev);
+               struct netvsc_device *nvdev = ndevctx->nvdev;
+               struct ndis_offload *hwcaps;
+               u32 nvsp_version = nvdev->nvsp_version;
+               u8 ndis_rev;
+               size_t size;
+
+               if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
+                       ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
+                       size = NDIS_OFFLOAD_SIZE;
+               } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
+                       ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
+                       size = NDIS_OFFLOAD_SIZE_6_1;
+               } else {
+                       ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
+                       size = NDIS_OFFLOAD_SIZE_6_0;
+               }
+
+               request->request_msg.msg_len += size;
+               query->info_buflen = size;
+               hwcaps = (struct ndis_offload *)
+                       ((unsigned long)query + query->info_buf_offset);
+
+               hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
+               hwcaps->header.revision = ndis_rev;
+               hwcaps->header.size = size;
+
+       } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
                struct ndis_recv_scale_cap *cap;
 
                request->request_msg.msg_len +=
@@ -526,6 +537,44 @@ cleanup:
        return ret;
 }
 
+/* Get the hardware offload capabilities */
+static int
+rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps)
+{
+       u32 caps_len = sizeof(*caps);
+       int ret;
+
+       memset(caps, 0, sizeof(*caps));
+
+       ret = rndis_filter_query_device(dev,
+                                       OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
+                                       caps, &caps_len);
+       if (ret)
+               return ret;
+
+       if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
+               netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
+                           caps->header.type);
+               return -EINVAL;
+       }
+
+       if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
+               netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
+                           caps->header.revision);
+               return -EINVAL;
+       }
+
+       if (caps->header.size > caps_len ||
+           caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
+               netdev_warn(dev->ndev,
+                           "invalid NDIS objsize %u, data size %u\n",
+                           caps->header.size, caps_len);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int rndis_filter_query_device_mac(struct rndis_device *dev)
 {
        u32 size = ETH_ALEN;
@@ -663,23 +712,15 @@ cleanup:
        return ret;
 }
 
-static const u8 netvsc_hash_key[] = {
-       0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
-       0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
-       0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
-       0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
-       0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
-};
-#define HASH_KEYLEN ARRAY_SIZE(netvsc_hash_key)
-
-static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
+int rndis_filter_set_rss_param(struct rndis_device *rdev,
+                              const u8 *rss_key, int num_queue)
 {
        struct net_device *ndev = rdev->ndev;
        struct rndis_request *request;
        struct rndis_set_request *set;
        struct rndis_set_complete *set_complete;
        u32 extlen = sizeof(struct ndis_recv_scale_param) +
-                    4*ITAB_NUM + HASH_KEYLEN;
+                    4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
        struct ndis_recv_scale_param *rssp;
        u32 *itab;
        u8 *keyp;
@@ -707,19 +748,18 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
                         NDIS_HASH_TCP_IPV6;
        rssp->indirect_tabsize = 4*ITAB_NUM;
        rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
-       rssp->hashkey_size = HASH_KEYLEN;
+       rssp->hashkey_size = NETVSC_HASH_KEYLEN;
        rssp->kashkey_offset = rssp->indirect_taboffset +
                               rssp->indirect_tabsize;
 
        /* Set indirection table entries */
        itab = (u32 *)(rssp + 1);
        for (i = 0; i < ITAB_NUM; i++)
-               itab[i] = i % num_queue;
+               itab[i] = rdev->ind_table[i];
 
        /* Set hask key values */
        keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
-       for (i = 0; i < HASH_KEYLEN; i++)
-               keyp[i] = netvsc_hash_key[i];
+       memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
 
        ret = rndis_filter_send_request(rdev, request);
        if (ret != 0)
@@ -727,7 +767,9 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
 
        wait_for_completion(&request->wait_event);
        set_complete = &request->response_msg.msg.set_complete;
-       if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+       if (set_complete->status == RNDIS_STATUS_SUCCESS)
+               memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
+       else {
                netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
                           set_complete->status);
                ret = -EINVAL;
@@ -778,7 +820,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
        struct rndis_request *request;
        struct rndis_set_request *set;
        struct rndis_set_complete *set_complete;
-       u32 status;
        int ret;
 
        request = get_rndis_request(dev, RNDIS_MSG_SET,
@@ -805,8 +846,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
        wait_for_completion(&request->wait_event);
 
        set_complete = &request->response_msg.msg.set_complete;
-       status = set_complete->status;
-
 cleanup:
        if (request)
                put_rndis_request(dev, request);
@@ -864,6 +903,23 @@ cleanup:
        return ret;
 }
 
+static bool netvsc_device_idle(const struct netvsc_device *nvdev)
+{
+       int i;
+
+       if (atomic_read(&nvdev->num_outstanding_recvs) > 0)
+               return false;
+
+       for (i = 0; i < nvdev->num_chn; i++) {
+               const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
+
+               if (atomic_read(&nvchan->queue_sends) > 0)
+                       return false;
+       }
+
+       return true;
+}
+
 static void rndis_filter_halt_device(struct rndis_device *dev)
 {
        struct rndis_request *request;
@@ -894,9 +950,7 @@ cleanup:
        spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
 
        /* Wait for all send completions */
-       wait_event(nvdev->wait_drain,
-                  atomic_read(&nvdev->num_outstanding_sends) == 0 &&
-                  atomic_read(&nvdev->num_outstanding_recvs) == 0);
+       wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
 
        if (request)
                put_rndis_request(dev, request);
@@ -948,18 +1002,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
        if (chn_index >= nvscdev->num_chn)
                return;
 
-       set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) *
-                             NETVSC_PACKET_SIZE);
-
-       nvscdev->mrc[chn_index].buf = vzalloc(NETVSC_RECVSLOT_MAX *
-                                             sizeof(struct recv_comp_data));
+       nvscdev->chan_table[chn_index].mrc.buf
+               = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
 
        ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
                         nvscdev->ring_size * PAGE_SIZE, NULL, 0,
                         netvsc_channel_cb, new_sc);
 
        if (ret == 0)
-               nvscdev->chn_table[chn_index] = new_sc;
+               nvscdev->chan_table[chn_index].channel = new_sc;
 
        spin_lock_irqsave(&nvscdev->sc_lock, flags);
        nvscdev->num_sc_offered--;
@@ -969,24 +1020,25 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
 }
 
 int rndis_filter_device_add(struct hv_device *dev,
-                           void *additional_info)
+                           struct netvsc_device_info *device_info)
 {
-       int ret;
        struct net_device *net = hv_get_drvdata(dev);
        struct net_device_context *net_device_ctx = netdev_priv(net);
        struct netvsc_device *net_device;
        struct rndis_device *rndis_device;
-       struct netvsc_device_info *device_info = additional_info;
+       struct ndis_offload hwcaps;
        struct ndis_offload_params offloads;
        struct nvsp_message *init_packet;
        struct ndis_recv_scale_cap rsscap;
        u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
+       unsigned int gso_max_size = GSO_MAX_SIZE;
        u32 mtu, size;
        u32 num_rss_qs;
        u32 sc_delta;
        const struct cpumask *node_cpu_mask;
        u32 num_possible_rss_qs;
        unsigned long flags;
+       int i, ret;
 
        rndis_device = get_rndis_device();
        if (!rndis_device)
@@ -997,7 +1049,7 @@ int rndis_filter_device_add(struct hv_device *dev,
         * NOTE! Once the channel is created, we may get a receive callback
         * (RndisFilterOnReceive()) before this call is completed
         */
-       ret = netvsc_device_add(dev, additional_info);
+       ret = netvsc_device_add(dev, device_info);
        if (ret != 0) {
                kfree(rndis_device);
                return ret;
@@ -1016,7 +1068,7 @@ int rndis_filter_device_add(struct hv_device *dev,
        /* Send the rndis initialization message */
        ret = rndis_filter_init_device(rndis_device);
        if (ret != 0) {
-               rndis_filter_device_remove(dev);
+               rndis_filter_device_remove(dev, net_device);
                return ret;
        }
 
@@ -1031,25 +1083,71 @@ int rndis_filter_device_add(struct hv_device *dev,
        /* Get the mac address */
        ret = rndis_filter_query_device_mac(rndis_device);
        if (ret != 0) {
-               rndis_filter_device_remove(dev);
+               rndis_filter_device_remove(dev, net_device);
                return ret;
        }
 
        memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
 
-       /* Turn on the offloads; the host supports all of the relevant
-        * offloads.
-        */
+       /* Find HW offload capabilities */
+       ret = rndis_query_hwcaps(rndis_device, &hwcaps);
+       if (ret != 0) {
+               rndis_filter_device_remove(dev, net_device);
+               return ret;
+       }
+
+       /* A value of zero means "no change"; now turn on what we want. */
        memset(&offloads, 0, sizeof(struct ndis_offload_params));
-       /* A value of zero means "no change"; now turn on what we
-        * want.
-        */
-       offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
-       offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
-       offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
-       offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
-       offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
-       offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
+
+       /* Linux does not care about IP checksum, always does in kernel */
+       offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
+
+       /* Compute tx offload settings based on hw capabilities */
+       net->hw_features = NETIF_F_RXCSUM;
+
+       if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
+               /* Can checksum TCP */
+               net->hw_features |= NETIF_F_IP_CSUM;
+               net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
+
+               offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+
+               if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
+                       offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
+                       net->hw_features |= NETIF_F_TSO;
+
+                       if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
+                               gso_max_size = hwcaps.lsov2.ip4_maxsz;
+               }
+
+               if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
+                       offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+                       net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
+               }
+       }
+
+       if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
+               net->hw_features |= NETIF_F_IPV6_CSUM;
+
+               offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+               net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
+
+               if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
+                   (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
+                       offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
+                       net->hw_features |= NETIF_F_TSO6;
+
+                       if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
+                               gso_max_size = hwcaps.lsov2.ip6_maxsz;
+               }
+
+               if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
+                       offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+                       net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
+               }
+       }
+
+       netif_set_gso_max_size(net, gso_max_size);
 
        ret = rndis_filter_set_offload_params(net, &offloads);
        if (ret)
@@ -1094,19 +1192,16 @@ int rndis_filter_device_add(struct hv_device *dev,
                net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
 
        num_rss_qs = net_device->num_chn - 1;
+
+       for (i = 0; i < ITAB_NUM; i++)
+               rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
+                                                       net_device->num_chn);
+
        net_device->num_sc_offered = num_rss_qs;
 
        if (net_device->num_chn == 1)
                goto out;
 
-       net_device->sub_cb_buf = vzalloc((net_device->num_chn - 1) *
-                                        NETVSC_PACKET_SIZE);
-       if (!net_device->sub_cb_buf) {
-               net_device->num_chn = 1;
-               dev_info(&dev->device, "No memory for subchannels.\n");
-               goto out;
-       }
-
        vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
 
        init_packet = &net_device->channel_init_pkt;
@@ -1132,7 +1227,8 @@ int rndis_filter_device_add(struct hv_device *dev,
        net_device->num_chn = 1 +
                init_packet->msg.v5_msg.subchn_comp.num_subchannels;
 
-       ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
+       ret = rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
+                                        net_device->num_chn);
 
        /*
         * Set the number of sub-channels to be received.
@@ -1152,13 +1248,13 @@ out:
        return 0; /* return 0 because primary channel can be used alone */
 
 err_dev_remv:
-       rndis_filter_device_remove(dev);
+       rndis_filter_device_remove(dev, net_device);
        return ret;
 }
 
-void rndis_filter_device_remove(struct hv_device *dev)
+void rndis_filter_device_remove(struct hv_device *dev,
+                               struct netvsc_device *net_dev)
 {
-       struct netvsc_device *net_dev = hv_device_to_netvsc_device(dev);
        struct rndis_device *rndis_dev = net_dev->extension;
 
        /* If not all subchannel offers are complete, wait for them until
index 66c0eeafcb5d7c66d7f7d88614a2e5393cf48f7b..312fce7302d3252903282599223063e7d97bb863 100644 (file)
@@ -78,10 +78,8 @@ static void ifb_ri_tasklet(unsigned long _txp)
        }
 
        while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
-               u32 from = G_TC_FROM(skb->tc_verd);
-
-               skb->tc_verd = 0;
-               skb->tc_verd = SET_TC_NCLS(skb->tc_verd);
+               skb->tc_redirected = 0;
+               skb->tc_skip_classify = 1;
 
                u64_stats_update_begin(&txp->tsync);
                txp->tx_packets++;
@@ -101,13 +99,12 @@ static void ifb_ri_tasklet(unsigned long _txp)
                rcu_read_unlock();
                skb->skb_iif = txp->dev->ifindex;
 
-               if (from & AT_EGRESS) {
+               if (!skb->tc_from_ingress) {
                        dev_queue_xmit(skb);
-               } else if (from & AT_INGRESS) {
+               } else {
                        skb_pull(skb, skb->mac_len);
                        netif_receive_skb(skb);
-               } else
-                       BUG();
+               }
        }
 
        if (__netif_tx_trylock(txq)) {
@@ -129,8 +126,8 @@ resched:
 
 }
 
-static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
-                                            struct rtnl_link_stats64 *stats)
+static void ifb_stats64(struct net_device *dev,
+                       struct rtnl_link_stats64 *stats)
 {
        struct ifb_dev_private *dp = netdev_priv(dev);
        struct ifb_q_private *txp = dp->tx_private;
@@ -157,8 +154,6 @@ static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev,
        }
        stats->rx_dropped = dev->stats.rx_dropped;
        stats->tx_dropped = dev->stats.tx_dropped;
-
-       return stats;
 }
 
 static int ifb_dev_init(struct net_device *dev)
@@ -241,7 +236,6 @@ static void ifb_setup(struct net_device *dev)
 static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ifb_dev_private *dp = netdev_priv(dev);
-       u32 from = G_TC_FROM(skb->tc_verd);
        struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
 
        u64_stats_update_begin(&txp->rsync);
@@ -249,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
        txp->rx_bytes += skb->len;
        u64_stats_update_end(&txp->rsync);
 
-       if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
+       if (!skb->tc_redirected || !skb->skb_iif) {
                dev_kfree_skb(skb);
                dev->stats.rx_dropped++;
                return NETDEV_TX_OK;
index df79910192d6c92b489c6c71f2f5fbdc795a7782..8a2c64dc964105d378b56be5a99477599e011b57 100644 (file)
@@ -3,5 +3,6 @@
 #
 
 obj-$(CONFIG_IPVLAN) += ipvlan.o
+obj-$(CONFIG_IPVTAP) += ipvtap.o
 
 ipvlan-objs := ipvlan_core.o ipvlan_main.o
index dbfbb33ac66c2caf05e22d7e4b0bd54629bec635..800a46c8d26c25f74244ee30872097982ebc775e 100644 (file)
@@ -94,9 +94,11 @@ struct ipvl_port {
        struct hlist_head       hlhead[IPVLAN_HASH_SIZE];
        struct list_head        ipvlans;
        u16                     mode;
+       u16                     dev_id_start;
        struct work_struct      wq;
        struct sk_buff_head     backlog;
        int                     count;
+       struct ida              ida;
 };
 
 struct ipvl_skb_cb {
@@ -133,4 +135,11 @@ struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
                              u16 proto);
 unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb,
                             const struct nf_hook_state *state);
+void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
+                    unsigned int len, bool success, bool mcast);
+int ipvlan_link_new(struct net *src_net, struct net_device *dev,
+                   struct nlattr *tb[], struct nlattr *data[]);
+void ipvlan_link_delete(struct net_device *dev, struct list_head *head);
+void ipvlan_link_setup(struct net_device *dev);
+int ipvlan_link_register(struct rtnl_link_ops *ops);
 #endif /* __IPVLAN_H */
index 83ce74acf82d5e08676cb0d796aac1fe1b524071..1f3295e274d0f5fbb36211b71023320327a86f12 100644 (file)
@@ -16,12 +16,9 @@ void ipvlan_init_secret(void)
        net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
 }
 
-static void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
+void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
                            unsigned int len, bool success, bool mcast)
 {
-       if (!ipvlan)
-               return;
-
        if (likely(success)) {
                struct ipvl_pcpu_stats *pcptr;
 
@@ -36,6 +33,7 @@ static void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
                this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
        }
 }
+EXPORT_SYMBOL_GPL(ipvlan_count_rx);
 
 static u8 ipvlan_get_v6_hash(const void *iaddr)
 {
index 8b0f99300cbc97d8c8b93c3dfa99cd841914c086..aa8575ccbce392426ab1ceb65ecadee5175dcb1b 100644 (file)
@@ -102,8 +102,8 @@ static int ipvlan_port_create(struct net_device *dev)
                return -EINVAL;
        }
 
-       if (netif_is_macvlan_port(dev)) {
-               netdev_err(dev, "Master is a macvlan port.\n");
+       if (netdev_is_rx_handler_busy(dev)) {
+               netdev_err(dev, "Device is already in use.\n");
                return -EBUSY;
        }
 
@@ -119,6 +119,8 @@ static int ipvlan_port_create(struct net_device *dev)
 
        skb_queue_head_init(&port->backlog);
        INIT_WORK(&port->wq, ipvlan_process_multicast);
+       ida_init(&port->ida);
+       port->dev_id_start = 1;
 
        err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port);
        if (err)
@@ -150,6 +152,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
                        dev_put(skb->dev);
                kfree_skb(skb);
        }
+       ida_destroy(&port->ida);
        kfree(port);
 }
 
@@ -301,8 +304,8 @@ static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
        dev_mc_sync(ipvlan->phy_dev, dev);
 }
 
-static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev,
-                                                   struct rtnl_link_stats64 *s)
+static void ipvlan_get_stats64(struct net_device *dev,
+                              struct rtnl_link_stats64 *s)
 {
        struct ipvl_dev *ipvlan = netdev_priv(dev);
 
@@ -339,7 +342,6 @@ static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev,
                s->rx_dropped = rx_errs;
                s->tx_dropped = tx_drps;
        }
-       return s;
 }
 
 static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
@@ -494,8 +496,8 @@ err:
        return ret;
 }
 
-static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
-                          struct nlattr *tb[], struct nlattr *data[])
+int ipvlan_link_new(struct net *src_net, struct net_device *dev,
+                   struct nlattr *tb[], struct nlattr *data[])
 {
        struct ipvl_dev *ipvlan = netdev_priv(dev);
        struct ipvl_port *port;
@@ -533,6 +535,29 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
        ipvlan_adjust_mtu(ipvlan, phy_dev);
        INIT_LIST_HEAD(&ipvlan->addrs);
 
+       /* If the port-id base is at the MAX value, then wrap it around and
+        * begin from 0x1 again. This may be due to a busy system where lots
+        * of slaves are getting created and deleted.
+        */
+       if (port->dev_id_start == 0xFFFE)
+               port->dev_id_start = 0x1;
+
+       /* Since L2 address is shared among all IPvlan slaves including
+        * master, use unique 16 bit dev-ids to diffentiate among them.
+        * Assign IDs between 0x1 and 0xFFFE (used by the master) to each
+        * slave link [see addrconf_ifid_eui48()].
+        */
+       err = ida_simple_get(&port->ida, port->dev_id_start, 0xFFFE,
+                            GFP_KERNEL);
+       if (err < 0)
+               err = ida_simple_get(&port->ida, 0x1, port->dev_id_start,
+                                    GFP_KERNEL);
+       if (err < 0)
+               goto destroy_ipvlan_port;
+       dev->dev_id = err;
+       /* Increment id-base to the next slot for the future assignment */
+       port->dev_id_start = err + 1;
+
        /* TODO Probably put random address here to be presented to the
         * world but keep using the physical-dev address for the outgoing
         * packets.
@@ -543,7 +568,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
 
        err = register_netdevice(dev);
        if (err < 0)
-               goto destroy_ipvlan_port;
+               goto remove_ida;
 
        err = netdev_upper_dev_link(phy_dev, dev);
        if (err) {
@@ -562,13 +587,16 @@ unlink_netdev:
        netdev_upper_dev_unlink(phy_dev, dev);
 unregister_netdev:
        unregister_netdevice(dev);
+remove_ida:
+       ida_simple_remove(&port->ida, dev->dev_id);
 destroy_ipvlan_port:
        if (create)
                ipvlan_port_destroy(phy_dev);
        return err;
 }
+EXPORT_SYMBOL_GPL(ipvlan_link_new);
 
-static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
+void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
 {
        struct ipvl_dev *ipvlan = netdev_priv(dev);
        struct ipvl_addr *addr, *next;
@@ -579,12 +607,14 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
                kfree_rcu(addr, rcu);
        }
 
+       ida_simple_remove(&ipvlan->port->ida, dev->dev_id);
        list_del_rcu(&ipvlan->pnode);
        unregister_netdevice_queue(dev, head);
        netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
 }
+EXPORT_SYMBOL_GPL(ipvlan_link_delete);
 
-static void ipvlan_link_setup(struct net_device *dev)
+void ipvlan_link_setup(struct net_device *dev)
 {
        ether_setup(dev);
 
@@ -595,6 +625,7 @@ static void ipvlan_link_setup(struct net_device *dev)
        dev->header_ops = &ipvlan_header_ops;
        dev->ethtool_ops = &ipvlan_ethtool_ops;
 }
+EXPORT_SYMBOL_GPL(ipvlan_link_setup);
 
 static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
 {
@@ -605,22 +636,22 @@ static struct rtnl_link_ops ipvlan_link_ops = {
        .kind           = "ipvlan",
        .priv_size      = sizeof(struct ipvl_dev),
 
-       .get_size       = ipvlan_nl_getsize,
-       .policy         = ipvlan_nl_policy,
-       .validate       = ipvlan_nl_validate,
-       .fill_info      = ipvlan_nl_fillinfo,
-       .changelink     = ipvlan_nl_changelink,
-       .maxtype        = IFLA_IPVLAN_MAX,
-
        .setup          = ipvlan_link_setup,
        .newlink        = ipvlan_link_new,
        .dellink        = ipvlan_link_delete,
 };
 
-static int ipvlan_link_register(struct rtnl_link_ops *ops)
+int ipvlan_link_register(struct rtnl_link_ops *ops)
 {
+       ops->get_size   = ipvlan_nl_getsize;
+       ops->policy     = ipvlan_nl_policy;
+       ops->validate   = ipvlan_nl_validate;
+       ops->fill_info  = ipvlan_nl_fillinfo;
+       ops->changelink = ipvlan_nl_changelink;
+       ops->maxtype    = IFLA_IPVLAN_MAX;
        return rtnl_link_register(ops);
 }
+EXPORT_SYMBOL_GPL(ipvlan_link_register);
 
 static int ipvlan_device_event(struct notifier_block *unused,
                               unsigned long event, void *ptr)
@@ -674,23 +705,22 @@ static int ipvlan_device_event(struct notifier_block *unused,
        return NOTIFY_DONE;
 }
 
-static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
+static int ipvlan_add_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
 {
        struct ipvl_addr *addr;
 
-       if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) {
-               netif_err(ipvlan, ifup, ipvlan->dev,
-                         "Failed to add IPv6=%pI6c addr for %s intf\n",
-                         ip6_addr, ipvlan->dev->name);
-               return -EINVAL;
-       }
        addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC);
        if (!addr)
                return -ENOMEM;
 
        addr->master = ipvlan;
-       memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
-       addr->atype = IPVL_IPV6;
+       if (is_v6) {
+               memcpy(&addr->ip6addr, iaddr, sizeof(struct in6_addr));
+               addr->atype = IPVL_IPV6;
+       } else {
+               memcpy(&addr->ip4addr, iaddr, sizeof(struct in_addr));
+               addr->atype = IPVL_IPV4;
+       }
        list_add_tail(&addr->anode, &ipvlan->addrs);
 
        /* If the interface is not up, the address will be added to the hash
@@ -702,11 +732,11 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
        return 0;
 }
 
-static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
+static void ipvlan_del_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
 {
        struct ipvl_addr *addr;
 
-       addr = ipvlan_find_addr(ipvlan, ip6_addr, true);
+       addr = ipvlan_find_addr(ipvlan, iaddr, is_v6);
        if (!addr)
                return;
 
@@ -717,6 +747,23 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
        return;
 }
 
+static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
+{
+       if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) {
+               netif_err(ipvlan, ifup, ipvlan->dev,
+                         "Failed to add IPv6=%pI6c addr for %s intf\n",
+                         ip6_addr, ipvlan->dev->name);
+               return -EINVAL;
+       }
+
+       return ipvlan_add_addr(ipvlan, ip6_addr, true);
+}
+
+static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
+{
+       return ipvlan_del_addr(ipvlan, ip6_addr, true);
+}
+
 static int ipvlan_addr6_event(struct notifier_block *unused,
                              unsigned long event, void *ptr)
 {
@@ -750,45 +797,19 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
 
 static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
 {
-       struct ipvl_addr *addr;
-
        if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) {
                netif_err(ipvlan, ifup, ipvlan->dev,
                          "Failed to add IPv4=%pI4 on %s intf.\n",
                          ip4_addr, ipvlan->dev->name);
                return -EINVAL;
        }
-       addr = kzalloc(sizeof(struct ipvl_addr), GFP_KERNEL);
-       if (!addr)
-               return -ENOMEM;
-
-       addr->master = ipvlan;
-       memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
-       addr->atype = IPVL_IPV4;
-       list_add_tail(&addr->anode, &ipvlan->addrs);
 
-       /* If the interface is not up, the address will be added to the hash
-        * list by ipvlan_open.
-        */
-       if (netif_running(ipvlan->dev))
-               ipvlan_ht_addr_add(ipvlan, addr);
-
-       return 0;
+       return ipvlan_add_addr(ipvlan, ip4_addr, false);
 }
 
 static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
 {
-       struct ipvl_addr *addr;
-
-       addr = ipvlan_find_addr(ipvlan, ip4_addr, false);
-       if (!addr)
-               return;
-
-       ipvlan_ht_addr_del(addr);
-       list_del(&addr->anode);
-       kfree_rcu(addr, rcu);
-
-       return;
+       return ipvlan_del_addr(ipvlan, ip4_addr, false);
 }
 
 static int ipvlan_addr4_event(struct notifier_block *unused,
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
new file mode 100644 (file)
index 0000000..2b713b6
--- /dev/null
@@ -0,0 +1,241 @@
+#include <linux/etherdevice.h>
+#include "ipvlan.h"
+#include <linux/if_vlan.h>
+#include <linux/if_tap.h>
+#include <linux/interrupt.h>
+#include <linux/nsproxy.h>
+#include <linux/compat.h>
+#include <linux/if_tun.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include <linux/idr.h>
+#include <linux/fs.h>
+#include <linux/uio.h>
+
+#include <net/net_namespace.h>
+#include <net/rtnetlink.h>
+#include <net/sock.h>
+#include <linux/virtio_net.h>
+
+#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
+                     NETIF_F_TSO6 | NETIF_F_UFO)
+
+static dev_t ipvtap_major;
+static struct cdev ipvtap_cdev;
+
+static const void *ipvtap_net_namespace(struct device *d)
+{
+       struct net_device *dev = to_net_dev(d->parent);
+       return dev_net(dev);
+}
+
+static struct class ipvtap_class = {
+        .name = "ipvtap",
+        .owner = THIS_MODULE,
+        .ns_type = &net_ns_type_operations,
+        .namespace = ipvtap_net_namespace,
+};
+
+struct ipvtap_dev {
+       struct ipvl_dev vlan;
+       struct tap_dev    tap;
+};
+
+static void ipvtap_count_tx_dropped(struct tap_dev *tap)
+{
+       struct ipvtap_dev *vlantap = container_of(tap, struct ipvtap_dev, tap);
+       struct ipvl_dev *vlan = &vlantap->vlan;
+
+       this_cpu_inc(vlan->pcpu_stats->tx_drps);
+}
+
+static void ipvtap_count_rx_dropped(struct tap_dev *tap)
+{
+       struct ipvtap_dev *vlantap = container_of(tap, struct ipvtap_dev, tap);
+       struct ipvl_dev *vlan = &vlantap->vlan;
+
+       ipvlan_count_rx(vlan, 0, 0, 0);
+}
+
+static void ipvtap_update_features(struct tap_dev *tap,
+                                  netdev_features_t features)
+{
+       struct ipvtap_dev *vlantap = container_of(tap, struct ipvtap_dev, tap);
+       struct ipvl_dev *vlan = &vlantap->vlan;
+
+       vlan->sfeatures = features;
+       netdev_update_features(vlan->dev);
+}
+
+static int ipvtap_newlink(struct net *src_net,
+                         struct net_device *dev,
+                         struct nlattr *tb[],
+                         struct nlattr *data[])
+{
+       struct ipvtap_dev *vlantap = netdev_priv(dev);
+       int err;
+
+       INIT_LIST_HEAD(&vlantap->tap.queue_list);
+
+       /* Since macvlan supports all offloads by default, make
+        * tap support all offloads also.
+        */
+       vlantap->tap.tap_features = TUN_OFFLOADS;
+       vlantap->tap.count_tx_dropped = ipvtap_count_tx_dropped;
+       vlantap->tap.update_features =  ipvtap_update_features;
+       vlantap->tap.count_rx_dropped = ipvtap_count_rx_dropped;
+
+       err = netdev_rx_handler_register(dev, tap_handle_frame, &vlantap->tap);
+       if (err)
+               return err;
+
+       /* Don't put anything that may fail after macvlan_common_newlink
+        * because we can't undo what it does.
+        */
+       err =  ipvlan_link_new(src_net, dev, tb, data);
+       if (err) {
+               netdev_rx_handler_unregister(dev);
+               return err;
+       }
+
+       vlantap->tap.dev = vlantap->vlan.dev;
+
+       return err;
+}
+
+static void ipvtap_dellink(struct net_device *dev,
+                          struct list_head *head)
+{
+       struct ipvtap_dev *vlan = netdev_priv(dev);
+
+       netdev_rx_handler_unregister(dev);
+       tap_del_queues(&vlan->tap);
+       ipvlan_link_delete(dev, head);
+}
+
+static void ipvtap_setup(struct net_device *dev)
+{
+       ipvlan_link_setup(dev);
+       dev->tx_queue_len = TUN_READQ_SIZE;
+       dev->priv_flags &= ~IFF_NO_QUEUE;
+}
+
+static struct rtnl_link_ops ipvtap_link_ops __read_mostly = {
+       .kind           = "ipvtap",
+       .setup          = ipvtap_setup,
+       .newlink        = ipvtap_newlink,
+       .dellink        = ipvtap_dellink,
+       .priv_size      = sizeof(struct ipvtap_dev),
+};
+
+static int ipvtap_device_event(struct notifier_block *unused,
+                              unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct ipvtap_dev *vlantap;
+       struct device *classdev;
+       dev_t devt;
+       int err;
+       char tap_name[IFNAMSIZ];
+
+       if (dev->rtnl_link_ops != &ipvtap_link_ops)
+               return NOTIFY_DONE;
+
+       snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex);
+       vlantap = netdev_priv(dev);
+
+       switch (event) {
+       case NETDEV_REGISTER:
+               /* Create the device node here after the network device has
+                * been registered but before register_netdevice has
+                * finished running.
+                */
+               err = tap_get_minor(ipvtap_major, &vlantap->tap);
+               if (err)
+                       return notifier_from_errno(err);
+
+               devt = MKDEV(MAJOR(ipvtap_major), vlantap->tap.minor);
+               classdev = device_create(&ipvtap_class, &dev->dev, devt,
+                                        dev, tap_name);
+               if (IS_ERR(classdev)) {
+                       tap_free_minor(ipvtap_major, &vlantap->tap);
+                       return notifier_from_errno(PTR_ERR(classdev));
+               }
+               err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj,
+                                       tap_name);
+               if (err)
+                       return notifier_from_errno(err);
+               break;
+       case NETDEV_UNREGISTER:
+               /* vlan->minor == 0 if NETDEV_REGISTER above failed */
+               if (vlantap->tap.minor == 0)
+                       break;
+               sysfs_remove_link(&dev->dev.kobj, tap_name);
+               devt = MKDEV(MAJOR(ipvtap_major), vlantap->tap.minor);
+               device_destroy(&ipvtap_class, devt);
+               tap_free_minor(ipvtap_major, &vlantap->tap);
+               break;
+       case NETDEV_CHANGE_TX_QUEUE_LEN:
+               if (tap_queue_resize(&vlantap->tap))
+                       return NOTIFY_BAD;
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block ipvtap_notifier_block __read_mostly = {
+       .notifier_call  = ipvtap_device_event,
+};
+
+static int ipvtap_init(void)
+{
+       int err;
+
+       err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap");
+
+       if (err)
+               goto out1;
+
+       err = class_register(&ipvtap_class);
+       if (err)
+               goto out2;
+
+       err = register_netdevice_notifier(&ipvtap_notifier_block);
+       if (err)
+               goto out3;
+
+       err = ipvlan_link_register(&ipvtap_link_ops);
+       if (err)
+               goto out4;
+
+       return 0;
+
+out4:
+       unregister_netdevice_notifier(&ipvtap_notifier_block);
+out3:
+       class_unregister(&ipvtap_class);
+out2:
+       tap_destroy_cdev(ipvtap_major, &ipvtap_cdev);
+out1:
+       return err;
+}
+module_init(ipvtap_init);
+
+static void ipvtap_exit(void)
+{
+       rtnl_link_unregister(&ipvtap_link_ops);
+       unregister_netdevice_notifier(&ipvtap_notifier_block);
+       class_unregister(&ipvtap_class);
+       tap_destroy_cdev(ipvtap_major, &ipvtap_cdev);
+}
+module_exit(ipvtap_exit);
+MODULE_ALIAS_RTNL_LINK("ipvtap");
+MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>");
+MODULE_LICENSE("GPL");
index 44e4f386a5dc5e99757e329c8b105b7a0a3d8976..be4ea6aa57a9b4695d2fd8dd344c3fce2277c5c9 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/types.h>
-#include <linux/ioport.h>
 
 #include <net/irda/irda.h>
 #include <net/irda/irmod.h>
@@ -169,8 +168,6 @@ struct au1k_private {
        u32 speed;
        u32 newspeed;
 
-       struct timer_list timer;
-
        struct resource *ioarea;
        struct au1k_irda_platform_data *platdata;
        struct clk *irda_clk;
@@ -178,8 +175,6 @@ struct au1k_private {
 
 static int qos_mtt_bits = 0x07;  /* 1 ms or more */
 
-#define RUN_AT(x) (jiffies + (x))
-
 static void au1k_irda_plat_set_phy_mode(struct au1k_private *p, int mode)
 {
        if (p->platdata && p->platdata->set_phy_mode)
@@ -620,8 +615,6 @@ static int au1k_irda_start(struct net_device *dev)
        /* power up */
        au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
 
-       aup->timer.expires = RUN_AT((3 * HZ));
-       aup->timer.data = (unsigned long)dev;
        return 0;
 }
 
@@ -642,7 +635,6 @@ static int au1k_irda_stop(struct net_device *dev)
        }
 
        netif_stop_queue(dev);
-       del_timer(&aup->timer);
 
        /* disable the interrupt */
        free_irq(aup->irq_tx, dev);
index be5bb0b7f29ca5f90d622523ff97baaad9921b53..3151b580dbd64093fec8c9007b8ff1b8dc30ffcf 100644 (file)
@@ -22,7 +22,7 @@ static int max_rate = 57600;
 static int max_rate = 115200;
 #endif
 
-static void turnaround_delay(unsigned long last_jif, int mtt)
+static void turnaround_delay(int mtt)
 {
        long ticks;
 
@@ -209,7 +209,6 @@ static void bfin_sir_rx_chars(struct net_device *dev)
        UART_CLEAR_LSR(port);
        ch = UART_GET_CHAR(port);
        async_unwrap_char(dev, &self->stats, &self->rx_buff, ch);
-       dev->last_rx = jiffies;
 }
 
 static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id)
@@ -510,7 +509,7 @@ static void bfin_sir_send_work(struct work_struct *work)
        int tx_cnt = 10;
 
        while (bfin_sir_is_receiving(dev) && --tx_cnt)
-               turnaround_delay(dev->last_rx, self->mtt);
+               turnaround_delay(self->mtt);
 
        bfin_sir_stop_rx(port);
 
index e3fe9a286136a92a09708727295f42ef4b1254aa..fede6864c737bd2710a842845bccc306cc6d2b7b 100644 (file)
@@ -547,7 +547,6 @@ static void sh_sir_rx(struct sh_sir_self *self)
 
                async_unwrap_char(self->ndev, &self->ndev->stats,
                                  &self->rx_buff, (u8)data);
-               self->ndev->last_rx = jiffies;
 
                if (EOFD & sh_sir_read(self, IRIF_SIR_FRM))
                        continue;
index 0844f849641346b092e83923c1d196709b355e4c..b23b71981fd55689daa817d41191063300bdaaf5 100644 (file)
@@ -97,8 +97,8 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
        return NETDEV_TX_OK;
 }
 
-static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
-                                                     struct rtnl_link_stats64 *stats)
+static void loopback_get_stats64(struct net_device *dev,
+                                struct rtnl_link_stats64 *stats)
 {
        u64 bytes = 0;
        u64 packets = 0;
@@ -122,7 +122,6 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
        stats->tx_packets = packets;
        stats->rx_bytes   = bytes;
        stats->tx_bytes   = bytes;
-       return stats;
 }
 
 static u32 always_on(struct net_device *dev)
index f83cf6696820cca338b02c4bfa49479a1108fa68..778a77303c495597bb1a776fd75f53b90d804d05 100644 (file)
@@ -2888,13 +2888,13 @@ static int macsec_change_mtu(struct net_device *dev, int new_mtu)
        return 0;
 }
 
-static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev,
-                                                   struct rtnl_link_stats64 *s)
+static void macsec_get_stats64(struct net_device *dev,
+                              struct rtnl_link_stats64 *s)
 {
        int cpu;
 
        if (!dev->tstats)
-               return s;
+               return;
 
        for_each_possible_cpu(cpu) {
                struct pcpu_sw_netstats *stats;
@@ -2918,8 +2918,6 @@ static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev,
 
        s->rx_dropped = dev->stats.rx_dropped;
        s->tx_dropped = dev->stats.tx_dropped;
-
-       return s;
 }
 
 static int macsec_get_iflink(const struct net_device *dev)
index 20b3fdf282c50063e56577dcb67e85fa8b59fb91..9261722960a719a8e6d46f4ea5b39f500a0817e8 100644 (file)
@@ -855,8 +855,8 @@ static void macvlan_uninit(struct net_device *dev)
                macvlan_port_destroy(port->dev);
 }
 
-static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
-                                                        struct rtnl_link_stats64 *stats)
+static void macvlan_dev_get_stats64(struct net_device *dev,
+                                   struct rtnl_link_stats64 *stats)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
 
@@ -893,7 +893,6 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
                stats->rx_dropped       = rx_errors;
                stats->tx_dropped       = tx_dropped;
        }
-       return stats;
 }
 
 static int macvlan_vlan_rx_add_vid(struct net_device *dev,
@@ -1111,7 +1110,7 @@ static int macvlan_port_create(struct net_device *dev)
        if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
                return -EINVAL;
 
-       if (netif_is_ipvlan_port(dev))
+       if (netdev_is_rx_handler_busy(dev))
                return -EBUSY;
 
        port = kzalloc(sizeof(*port), GFP_KERNEL);
@@ -1526,7 +1525,6 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
 int macvlan_link_register(struct rtnl_link_ops *ops)
 {
        /* common fields */
-       ops->priv_size          = sizeof(struct macvlan_dev);
        ops->validate           = macvlan_validate;
        ops->maxtype            = IFLA_MACVLAN_MAX;
        ops->policy             = macvlan_policy;
@@ -1549,6 +1547,7 @@ static struct rtnl_link_ops macvlan_link_ops = {
        .newlink        = macvlan_newlink,
        .dellink        = macvlan_dellink,
        .get_link_net   = macvlan_get_link_net,
+       .priv_size      = sizeof(struct macvlan_dev),
 };
 
 static int macvlan_device_event(struct notifier_block *unused,
index c27011bbe30c52d2eb892ab0d86f8cf3d6f4deb9..a4bfc10b61ddd7872dfa89634f20478be1e2f29a 100644 (file)
@@ -1,5 +1,6 @@
 #include <linux/etherdevice.h>
 #include <linux/if_macvlan.h>
+#include <linux/if_tap.h>
 #include <linux/if_vlan.h>
 #include <linux/interrupt.h>
 #include <linux/nsproxy.h>
 #include <linux/virtio_net.h>
 #include <linux/skb_array.h>
 
-/*
- * A macvtap queue is the central object of this driver, it connects
- * an open character device to a macvlan interface. There can be
- * multiple queues on one interface, which map back to queues
- * implemented in hardware on the underlying device.
- *
- * macvtap_proto is used to allocate queues through the sock allocation
- * mechanism.
- *
- */
-struct macvtap_queue {
-       struct sock sk;
-       struct socket sock;
-       struct socket_wq wq;
-       int vnet_hdr_sz;
-       struct macvlan_dev __rcu *vlan;
-       struct file *file;
-       unsigned int flags;
-       u16 queue_index;
-       bool enabled;
-       struct list_head next;
-       struct skb_array skb_array;
-};
-
-#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
-
-#define MACVTAP_VNET_LE 0x80000000
-#define MACVTAP_VNET_BE 0x40000000
-
-#ifdef CONFIG_TUN_VNET_CROSS_LE
-static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
-{
-       return q->flags & MACVTAP_VNET_BE ? false :
-               virtio_legacy_is_little_endian();
-}
-
-static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *sp)
-{
-       int s = !!(q->flags & MACVTAP_VNET_BE);
-
-       if (put_user(s, sp))
-               return -EFAULT;
-
-       return 0;
-}
-
-static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *sp)
-{
-       int s;
-
-       if (get_user(s, sp))
-               return -EFAULT;
-
-       if (s)
-               q->flags |= MACVTAP_VNET_BE;
-       else
-               q->flags &= ~MACVTAP_VNET_BE;
-
-       return 0;
-}
-#else
-static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
-{
-       return virtio_legacy_is_little_endian();
-}
-
-static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *argp)
-{
-       return -EINVAL;
-}
-
-static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *argp)
-{
-       return -EINVAL;
-}
-#endif /* CONFIG_TUN_VNET_CROSS_LE */
-
-static inline bool macvtap_is_little_endian(struct macvtap_queue *q)
-{
-       return q->flags & MACVTAP_VNET_LE ||
-               macvtap_legacy_is_little_endian(q);
-}
-
-static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val)
-{
-       return __virtio16_to_cpu(macvtap_is_little_endian(q), val);
-}
-
-static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val)
-{
-       return __cpu_to_virtio16(macvtap_is_little_endian(q), val);
-}
-
-static struct proto macvtap_proto = {
-       .name = "macvtap",
-       .owner = THIS_MODULE,
-       .obj_size = sizeof (struct macvtap_queue),
+struct macvtap_dev {
+       struct macvlan_dev vlan;
+       struct tap_dev    tap;
 };
 
 /*
  * Variables for dealing with macvtaps device numbers.
  */
 static dev_t macvtap_major;
-#define MACVTAP_NUM_DEVS (1U << MINORBITS)
-static DEFINE_MUTEX(minor_lock);
-static DEFINE_IDR(minor_idr);
 
-#define GOODCOPY_LEN 128
 static const void *macvtap_net_namespace(struct device *d)
 {
        struct net_device *dev = to_net_dev(d->parent);
@@ -145,328 +48,33 @@ static struct class macvtap_class = {
 };
 static struct cdev macvtap_cdev;
 
-static const struct proto_ops macvtap_socket_ops;
-
 #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
                      NETIF_F_TSO6 | NETIF_F_UFO)
-#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
-#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
-
-static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
-{
-       return rcu_dereference(dev->rx_handler_data);
-}
-
-/*
- * RCU usage:
- * The macvtap_queue and the macvlan_dev are loosely coupled, the
- * pointers from one to the other can only be read while rcu_read_lock
- * or rtnl is held.
- *
- * Both the file and the macvlan_dev hold a reference on the macvtap_queue
- * through sock_hold(&q->sk). When the macvlan_dev goes away first,
- * q->vlan becomes inaccessible. When the files gets closed,
- * macvtap_get_queue() fails.
- *
- * There may still be references to the struct sock inside of the
- * queue from outbound SKBs, but these never reference back to the
- * file or the dev. The data structure is freed through __sk_free
- * when both our references and any pending SKBs are gone.
- */
-
-static int macvtap_enable_queue(struct net_device *dev, struct file *file,
-                               struct macvtap_queue *q)
-{
-       struct macvlan_dev *vlan = netdev_priv(dev);
-       int err = -EINVAL;
-
-       ASSERT_RTNL();
-
-       if (q->enabled)
-               goto out;
-
-       err = 0;
-       rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
-       q->queue_index = vlan->numvtaps;
-       q->enabled = true;
-
-       vlan->numvtaps++;
-out:
-       return err;
-}
-
-/* Requires RTNL */
-static int macvtap_set_queue(struct net_device *dev, struct file *file,
-                            struct macvtap_queue *q)
-{
-       struct macvlan_dev *vlan = netdev_priv(dev);
-
-       if (vlan->numqueues == MAX_MACVTAP_QUEUES)
-               return -EBUSY;
-
-       rcu_assign_pointer(q->vlan, vlan);
-       rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
-       sock_hold(&q->sk);
-
-       q->file = file;
-       q->queue_index = vlan->numvtaps;
-       q->enabled = true;
-       file->private_data = q;
-       list_add_tail(&q->next, &vlan->queue_list);
-
-       vlan->numvtaps++;
-       vlan->numqueues++;
-
-       return 0;
-}
-
-static int macvtap_disable_queue(struct macvtap_queue *q)
-{
-       struct macvlan_dev *vlan;
-       struct macvtap_queue *nq;
-
-       ASSERT_RTNL();
-       if (!q->enabled)
-               return -EINVAL;
-
-       vlan = rtnl_dereference(q->vlan);
-
-       if (vlan) {
-               int index = q->queue_index;
-               BUG_ON(index >= vlan->numvtaps);
-               nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
-               nq->queue_index = index;
-
-               rcu_assign_pointer(vlan->taps[index], nq);
-               RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
-               q->enabled = false;
-
-               vlan->numvtaps--;
-       }
-
-       return 0;
-}
-
-/*
- * The file owning the queue got closed, give up both
- * the reference that the files holds as well as the
- * one from the macvlan_dev if that still exists.
- *
- * Using the spinlock makes sure that we don't get
- * to the queue again after destroying it.
- */
-static void macvtap_put_queue(struct macvtap_queue *q)
-{
-       struct macvlan_dev *vlan;
-
-       rtnl_lock();
-       vlan = rtnl_dereference(q->vlan);
-
-       if (vlan) {
-               if (q->enabled)
-                       BUG_ON(macvtap_disable_queue(q));
-
-               vlan->numqueues--;
-               RCU_INIT_POINTER(q->vlan, NULL);
-               sock_put(&q->sk);
-               list_del_init(&q->next);
-       }
-
-       rtnl_unlock();
-
-       synchronize_rcu();
-       sock_put(&q->sk);
-}
-
-/*
- * Select a queue based on the rxq of the device on which this packet
- * arrived. If the incoming device is not mq, calculate a flow hash
- * to select a queue. If all fails, find the first available queue.
- * Cache vlan->numvtaps since it can become zero during the execution
- * of this function.
- */
-static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
-                                              struct sk_buff *skb)
-{
-       struct macvlan_dev *vlan = netdev_priv(dev);
-       struct macvtap_queue *tap = NULL;
-       /* Access to taps array is protected by rcu, but access to numvtaps
-        * isn't. Below we use it to lookup a queue, but treat it as a hint
-        * and validate that the result isn't NULL - in case we are
-        * racing against queue removal.
-        */
-       int numvtaps = ACCESS_ONCE(vlan->numvtaps);
-       __u32 rxq;
-
-       if (!numvtaps)
-               goto out;
-
-       if (numvtaps == 1)
-               goto single;
-
-       /* Check if we can use flow to select a queue */
-       rxq = skb_get_hash(skb);
-       if (rxq) {
-               tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
-               goto out;
-       }
-
-       if (likely(skb_rx_queue_recorded(skb))) {
-               rxq = skb_get_rx_queue(skb);
-
-               while (unlikely(rxq >= numvtaps))
-                       rxq -= numvtaps;
 
-               tap = rcu_dereference(vlan->taps[rxq]);
-               goto out;
-       }
-
-single:
-       tap = rcu_dereference(vlan->taps[0]);
-out:
-       return tap;
-}
-
-/*
- * The net_device is going away, give up the reference
- * that it holds on all queues and safely set the pointer
- * from the queues to NULL.
- */
-static void macvtap_del_queues(struct net_device *dev)
+static void macvtap_count_tx_dropped(struct tap_dev *tap)
 {
-       struct macvlan_dev *vlan = netdev_priv(dev);
-       struct macvtap_queue *q, *tmp;
+       struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap);
+       struct macvlan_dev *vlan = &vlantap->vlan;
 
-       ASSERT_RTNL();
-       list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
-               list_del_init(&q->next);
-               RCU_INIT_POINTER(q->vlan, NULL);
-               if (q->enabled)
-                       vlan->numvtaps--;
-               vlan->numqueues--;
-               sock_put(&q->sk);
-       }
-       BUG_ON(vlan->numvtaps);
-       BUG_ON(vlan->numqueues);
-       /* guarantee that any future macvtap_set_queue will fail */
-       vlan->numvtaps = MAX_MACVTAP_QUEUES;
+       this_cpu_inc(vlan->pcpu_stats->tx_dropped);
 }
 
-static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
+static void macvtap_count_rx_dropped(struct tap_dev *tap)
 {
-       struct sk_buff *skb = *pskb;
-       struct net_device *dev = skb->dev;
-       struct macvlan_dev *vlan;
-       struct macvtap_queue *q;
-       netdev_features_t features = TAP_FEATURES;
-
-       vlan = macvtap_get_vlan_rcu(dev);
-       if (!vlan)
-               return RX_HANDLER_PASS;
-
-       q = macvtap_get_queue(dev, skb);
-       if (!q)
-               return RX_HANDLER_PASS;
-
-       if (__skb_array_full(&q->skb_array))
-               goto drop;
-
-       skb_push(skb, ETH_HLEN);
-
-       /* Apply the forward feature mask so that we perform segmentation
-        * according to users wishes.  This only works if VNET_HDR is
-        * enabled.
-        */
-       if (q->flags & IFF_VNET_HDR)
-               features |= vlan->tap_features;
-       if (netif_needs_gso(skb, features)) {
-               struct sk_buff *segs = __skb_gso_segment(skb, features, false);
-
-               if (IS_ERR(segs))
-                       goto drop;
+       struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap);
+       struct macvlan_dev *vlan = &vlantap->vlan;
 
-               if (!segs) {
-                       if (skb_array_produce(&q->skb_array, skb))
-                               goto drop;
-                       goto wake_up;
-               }
-
-               consume_skb(skb);
-               while (segs) {
-                       struct sk_buff *nskb = segs->next;
-
-                       segs->next = NULL;
-                       if (skb_array_produce(&q->skb_array, segs)) {
-                               kfree_skb(segs);
-                               kfree_skb_list(nskb);
-                               break;
-                       }
-                       segs = nskb;
-               }
-       } else {
-               /* If we receive a partial checksum and the tap side
-                * doesn't support checksum offload, compute the checksum.
-                * Note: it doesn't matter which checksum feature to
-                *        check, we either support them all or none.
-                */
-               if (skb->ip_summed == CHECKSUM_PARTIAL &&
-                   !(features & NETIF_F_CSUM_MASK) &&
-                   skb_checksum_help(skb))
-                       goto drop;
-               if (skb_array_produce(&q->skb_array, skb))
-                       goto drop;
-       }
-
-wake_up:
-       wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
-       return RX_HANDLER_CONSUMED;
-
-drop:
-       /* Count errors/drops only here, thus don't care about args. */
        macvlan_count_rx(vlan, 0, 0, 0);
-       kfree_skb(skb);
-       return RX_HANDLER_CONSUMED;
-}
-
-static int macvtap_get_minor(struct macvlan_dev *vlan)
-{
-       int retval = -ENOMEM;
-
-       mutex_lock(&minor_lock);
-       retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
-       if (retval >= 0) {
-               vlan->minor = retval;
-       } else if (retval == -ENOSPC) {
-               netdev_err(vlan->dev, "Too many macvtap devices\n");
-               retval = -EINVAL;
-       }
-       mutex_unlock(&minor_lock);
-       return retval < 0 ? retval : 0;
-}
-
-static void macvtap_free_minor(struct macvlan_dev *vlan)
-{
-       mutex_lock(&minor_lock);
-       if (vlan->minor) {
-               idr_remove(&minor_idr, vlan->minor);
-               vlan->minor = 0;
-       }
-       mutex_unlock(&minor_lock);
 }
 
-static struct net_device *dev_get_by_macvtap_minor(int minor)
+static void macvtap_update_features(struct tap_dev *tap,
+                                   netdev_features_t features)
 {
-       struct net_device *dev = NULL;
-       struct macvlan_dev *vlan;
+       struct macvtap_dev *vlantap = container_of(tap, struct macvtap_dev, tap);
+       struct macvlan_dev *vlan = &vlantap->vlan;
 
-       mutex_lock(&minor_lock);
-       vlan = idr_find(&minor_idr, minor);
-       if (vlan) {
-               dev = vlan->dev;
-               dev_hold(dev);
-       }
-       mutex_unlock(&minor_lock);
-       return dev;
+       vlan->set_features = features;
+       netdev_update_features(vlan->dev);
 }
 
 static int macvtap_newlink(struct net *src_net,
@@ -474,17 +82,24 @@ static int macvtap_newlink(struct net *src_net,
                           struct nlattr *tb[],
                           struct nlattr *data[])
 {
-       struct macvlan_dev *vlan = netdev_priv(dev);
+       struct macvtap_dev *vlantap = netdev_priv(dev);
        int err;
 
-       INIT_LIST_HEAD(&vlan->queue_list);
+       INIT_LIST_HEAD(&vlantap->tap.queue_list);
 
        /* Since macvlan supports all offloads by default, make
         * tap support all offloads also.
         */
-       vlan->tap_features = TUN_OFFLOADS;
+       vlantap->tap.tap_features = TUN_OFFLOADS;
+
+       /* Register callbacks for rx/tx drops accounting and updating
+        * net_device features
+        */
+       vlantap->tap.count_tx_dropped = macvtap_count_tx_dropped;
+       vlantap->tap.count_rx_dropped = macvtap_count_rx_dropped;
+       vlantap->tap.update_features  = macvtap_update_features;
 
-       err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
+       err = netdev_rx_handler_register(dev, tap_handle_frame, &vlantap->tap);
        if (err)
                return err;
 
@@ -497,14 +112,18 @@ static int macvtap_newlink(struct net *src_net,
                return err;
        }
 
+       vlantap->tap.dev = vlantap->vlan.dev;
+
        return 0;
 }
 
 static void macvtap_dellink(struct net_device *dev,
                            struct list_head *head)
 {
+       struct macvtap_dev *vlantap = netdev_priv(dev);
+
        netdev_rx_handler_unregister(dev);
-       macvtap_del_queues(dev);
+       tap_del_queues(&vlantap->tap);
        macvlan_dellink(dev, head);
 }
 
@@ -519,749 +138,14 @@ static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
        .setup          = macvtap_setup,
        .newlink        = macvtap_newlink,
        .dellink        = macvtap_dellink,
+       .priv_size      = sizeof(struct macvtap_dev),
 };
 
-
-static void macvtap_sock_write_space(struct sock *sk)
-{
-       wait_queue_head_t *wqueue;
-
-       if (!sock_writeable(sk) ||
-           !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
-               return;
-
-       wqueue = sk_sleep(sk);
-       if (wqueue && waitqueue_active(wqueue))
-               wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
-}
-
-static void macvtap_sock_destruct(struct sock *sk)
-{
-       struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk);
-
-       skb_array_cleanup(&q->skb_array);
-}
-
-static int macvtap_open(struct inode *inode, struct file *file)
-{
-       struct net *net = current->nsproxy->net_ns;
-       struct net_device *dev;
-       struct macvtap_queue *q;
-       int err = -ENODEV;
-
-       rtnl_lock();
-       dev = dev_get_by_macvtap_minor(iminor(inode));
-       if (!dev)
-               goto err;
-
-       err = -ENOMEM;
-       q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
-                                            &macvtap_proto, 0);
-       if (!q)
-               goto err;
-
-       RCU_INIT_POINTER(q->sock.wq, &q->wq);
-       init_waitqueue_head(&q->wq.wait);
-       q->sock.type = SOCK_RAW;
-       q->sock.state = SS_CONNECTED;
-       q->sock.file = file;
-       q->sock.ops = &macvtap_socket_ops;
-       sock_init_data(&q->sock, &q->sk);
-       q->sk.sk_write_space = macvtap_sock_write_space;
-       q->sk.sk_destruct = macvtap_sock_destruct;
-       q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
-       q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
-
-       /*
-        * so far only KVM virtio_net uses macvtap, enable zero copy between
-        * guest kernel and host kernel when lower device supports zerocopy
-        *
-        * The macvlan supports zerocopy iff the lower device supports zero
-        * copy so we don't have to look at the lower device directly.
-        */
-       if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
-               sock_set_flag(&q->sk, SOCK_ZEROCOPY);
-
-       err = -ENOMEM;
-       if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL))
-               goto err_array;
-
-       err = macvtap_set_queue(dev, file, q);
-       if (err)
-               goto err_queue;
-
-       dev_put(dev);
-
-       rtnl_unlock();
-       return err;
-
-err_queue:
-       skb_array_cleanup(&q->skb_array);
-err_array:
-       sock_put(&q->sk);
-err:
-       if (dev)
-               dev_put(dev);
-
-       rtnl_unlock();
-       return err;
-}
-
-static int macvtap_release(struct inode *inode, struct file *file)
-{
-       struct macvtap_queue *q = file->private_data;
-       macvtap_put_queue(q);
-       return 0;
-}
-
-static unsigned int macvtap_poll(struct file *file, poll_table * wait)
-{
-       struct macvtap_queue *q = file->private_data;
-       unsigned int mask = POLLERR;
-
-       if (!q)
-               goto out;
-
-       mask = 0;
-       poll_wait(file, &q->wq.wait, wait);
-
-       if (!skb_array_empty(&q->skb_array))
-               mask |= POLLIN | POLLRDNORM;
-
-       if (sock_writeable(&q->sk) ||
-           (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
-            sock_writeable(&q->sk)))
-               mask |= POLLOUT | POLLWRNORM;
-
-out:
-       return mask;
-}
-
-static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
-                                               size_t len, size_t linear,
-                                               int noblock, int *err)
-{
-       struct sk_buff *skb;
-
-       /* Under a page?  Don't bother with paged skb. */
-       if (prepad + len < PAGE_SIZE || !linear)
-               linear = len;
-
-       skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
-                                  err, 0);
-       if (!skb)
-               return NULL;
-
-       skb_reserve(skb, prepad);
-       skb_put(skb, linear);
-       skb->data_len = len - linear;
-       skb->len += len - linear;
-
-       return skb;
-}
-
-/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
-#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
-
-/* Get packet from user space buffer */
-static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
-                               struct iov_iter *from, int noblock)
-{
-       int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
-       struct sk_buff *skb;
-       struct macvlan_dev *vlan;
-       unsigned long total_len = iov_iter_count(from);
-       unsigned long len = total_len;
-       int err;
-       struct virtio_net_hdr vnet_hdr = { 0 };
-       int vnet_hdr_len = 0;
-       int copylen = 0;
-       int depth;
-       bool zerocopy = false;
-       size_t linear;
-
-       if (q->flags & IFF_VNET_HDR) {
-               vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
-
-               err = -EINVAL;
-               if (len < vnet_hdr_len)
-                       goto err;
-               len -= vnet_hdr_len;
-
-               err = -EFAULT;
-               if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
-                       goto err;
-               iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
-               if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
-                    macvtap16_to_cpu(q, vnet_hdr.csum_start) +
-                    macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
-                            macvtap16_to_cpu(q, vnet_hdr.hdr_len))
-                       vnet_hdr.hdr_len = cpu_to_macvtap16(q,
-                                macvtap16_to_cpu(q, vnet_hdr.csum_start) +
-                                macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
-               err = -EINVAL;
-               if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len)
-                       goto err;
-       }
-
-       err = -EINVAL;
-       if (unlikely(len < ETH_HLEN))
-               goto err;
-
-       if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
-               struct iov_iter i;
-
-               copylen = vnet_hdr.hdr_len ?
-                       macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
-               if (copylen > good_linear)
-                       copylen = good_linear;
-               else if (copylen < ETH_HLEN)
-                       copylen = ETH_HLEN;
-               linear = copylen;
-               i = *from;
-               iov_iter_advance(&i, copylen);
-               if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
-                       zerocopy = true;
-       }
-
-       if (!zerocopy) {
-               copylen = len;
-               linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
-               if (linear > good_linear)
-                       linear = good_linear;
-               else if (linear < ETH_HLEN)
-                       linear = ETH_HLEN;
-       }
-
-       skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
-                               linear, noblock, &err);
-       if (!skb)
-               goto err;
-
-       if (zerocopy)
-               err = zerocopy_sg_from_iter(skb, from);
-       else
-               err = skb_copy_datagram_from_iter(skb, 0, from, len);
-
-       if (err)
-               goto err_kfree;
-
-       skb_set_network_header(skb, ETH_HLEN);
-       skb_reset_mac_header(skb);
-       skb->protocol = eth_hdr(skb)->h_proto;
-
-       if (vnet_hdr_len) {
-               err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
-                                           macvtap_is_little_endian(q));
-               if (err)
-                       goto err_kfree;
-       }
-
-       skb_probe_transport_header(skb, ETH_HLEN);
-
-       /* Move network header to the right position for VLAN tagged packets */
-       if ((skb->protocol == htons(ETH_P_8021Q) ||
-            skb->protocol == htons(ETH_P_8021AD)) &&
-           __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
-               skb_set_network_header(skb, depth);
-
-       rcu_read_lock();
-       vlan = rcu_dereference(q->vlan);
-       /* copy skb_ubuf_info for callback when skb has no error */
-       if (zerocopy) {
-               skb_shinfo(skb)->destructor_arg = m->msg_control;
-               skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
-               skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
-       } else if (m && m->msg_control) {
-               struct ubuf_info *uarg = m->msg_control;
-               uarg->callback(uarg, false);
-       }
-
-       if (vlan) {
-               skb->dev = vlan->dev;
-               dev_queue_xmit(skb);
-       } else {
-               kfree_skb(skb);
-       }
-       rcu_read_unlock();
-
-       return total_len;
-
-err_kfree:
-       kfree_skb(skb);
-
-err:
-       rcu_read_lock();
-       vlan = rcu_dereference(q->vlan);
-       if (vlan)
-               this_cpu_inc(vlan->pcpu_stats->tx_dropped);
-       rcu_read_unlock();
-
-       return err;
-}
-
-static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
-       struct file *file = iocb->ki_filp;
-       struct macvtap_queue *q = file->private_data;
-
-       return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
-}
-
-/* Put packet to the user space buffer */
-static ssize_t macvtap_put_user(struct macvtap_queue *q,
-                               const struct sk_buff *skb,
-                               struct iov_iter *iter)
-{
-       int ret;
-       int vnet_hdr_len = 0;
-       int vlan_offset = 0;
-       int total;
-
-       if (q->flags & IFF_VNET_HDR) {
-               struct virtio_net_hdr vnet_hdr;
-               vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
-               if (iov_iter_count(iter) < vnet_hdr_len)
-                       return -EINVAL;
-
-               if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
-                                           macvtap_is_little_endian(q), true))
-                       BUG();
-
-               if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
-                   sizeof(vnet_hdr))
-                       return -EFAULT;
-
-               iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
-       }
-       total = vnet_hdr_len;
-       total += skb->len;
-
-       if (skb_vlan_tag_present(skb)) {
-               struct {
-                       __be16 h_vlan_proto;
-                       __be16 h_vlan_TCI;
-               } veth;
-               veth.h_vlan_proto = skb->vlan_proto;
-               veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
-
-               vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
-               total += VLAN_HLEN;
-
-               ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
-               if (ret || !iov_iter_count(iter))
-                       goto done;
-
-               ret = copy_to_iter(&veth, sizeof(veth), iter);
-               if (ret != sizeof(veth) || !iov_iter_count(iter))
-                       goto done;
-       }
-
-       ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
-                                    skb->len - vlan_offset);
-
-done:
-       return ret ? ret : total;
-}
-
-static ssize_t macvtap_do_read(struct macvtap_queue *q,
-                              struct iov_iter *to,
-                              int noblock)
-{
-       DEFINE_WAIT(wait);
-       struct sk_buff *skb;
-       ssize_t ret = 0;
-
-       if (!iov_iter_count(to))
-               return 0;
-
-       while (1) {
-               if (!noblock)
-                       prepare_to_wait(sk_sleep(&q->sk), &wait,
-                                       TASK_INTERRUPTIBLE);
-
-               /* Read frames from the queue */
-               skb = skb_array_consume(&q->skb_array);
-               if (skb)
-                       break;
-               if (noblock) {
-                       ret = -EAGAIN;
-                       break;
-               }
-               if (signal_pending(current)) {
-                       ret = -ERESTARTSYS;
-                       break;
-               }
-               /* Nothing to read, let's sleep */
-               schedule();
-       }
-       if (!noblock)
-               finish_wait(sk_sleep(&q->sk), &wait);
-
-       if (skb) {
-               ret = macvtap_put_user(q, skb, to);
-               if (unlikely(ret < 0))
-                       kfree_skb(skb);
-               else
-                       consume_skb(skb);
-       }
-       return ret;
-}
-
-static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to)
-{
-       struct file *file = iocb->ki_filp;
-       struct macvtap_queue *q = file->private_data;
-       ssize_t len = iov_iter_count(to), ret;
-
-       ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK);
-       ret = min_t(ssize_t, ret, len);
-       if (ret > 0)
-               iocb->ki_pos = ret;
-       return ret;
-}
-
-static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q)
-{
-       struct macvlan_dev *vlan;
-
-       ASSERT_RTNL();
-       vlan = rtnl_dereference(q->vlan);
-       if (vlan)
-               dev_hold(vlan->dev);
-
-       return vlan;
-}
-
-static void macvtap_put_vlan(struct macvlan_dev *vlan)
-{
-       dev_put(vlan->dev);
-}
-
-static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags)
-{
-       struct macvtap_queue *q = file->private_data;
-       struct macvlan_dev *vlan;
-       int ret;
-
-       vlan = macvtap_get_vlan(q);
-       if (!vlan)
-               return -EINVAL;
-
-       if (flags & IFF_ATTACH_QUEUE)
-               ret = macvtap_enable_queue(vlan->dev, file, q);
-       else if (flags & IFF_DETACH_QUEUE)
-               ret = macvtap_disable_queue(q);
-       else
-               ret = -EINVAL;
-
-       macvtap_put_vlan(vlan);
-       return ret;
-}
-
-static int set_offload(struct macvtap_queue *q, unsigned long arg)
-{
-       struct macvlan_dev *vlan;
-       netdev_features_t features;
-       netdev_features_t feature_mask = 0;
-
-       vlan = rtnl_dereference(q->vlan);
-       if (!vlan)
-               return -ENOLINK;
-
-       features = vlan->dev->features;
-
-       if (arg & TUN_F_CSUM) {
-               feature_mask = NETIF_F_HW_CSUM;
-
-               if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
-                       if (arg & TUN_F_TSO_ECN)
-                               feature_mask |= NETIF_F_TSO_ECN;
-                       if (arg & TUN_F_TSO4)
-                               feature_mask |= NETIF_F_TSO;
-                       if (arg & TUN_F_TSO6)
-                               feature_mask |= NETIF_F_TSO6;
-               }
-
-               if (arg & TUN_F_UFO)
-                       feature_mask |= NETIF_F_UFO;
-       }
-
-       /* tun/tap driver inverts the usage for TSO offloads, where
-        * setting the TSO bit means that the userspace wants to
-        * accept TSO frames and turning it off means that user space
-        * does not support TSO.
-        * For macvtap, we have to invert it to mean the same thing.
-        * When user space turns off TSO, we turn off GSO/LRO so that
-        * user-space will not receive TSO frames.
-        */
-       if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
-               features |= RX_OFFLOADS;
-       else
-               features &= ~RX_OFFLOADS;
-
-       /* tap_features are the same as features on tun/tap and
-        * reflect user expectations.
-        */
-       vlan->tap_features = feature_mask;
-       vlan->set_features = features;
-       netdev_update_features(vlan->dev);
-
-       return 0;
-}
-
-/*
- * provide compatibility with generic tun/tap interface
- */
-static long macvtap_ioctl(struct file *file, unsigned int cmd,
-                         unsigned long arg)
-{
-       struct macvtap_queue *q = file->private_data;
-       struct macvlan_dev *vlan;
-       void __user *argp = (void __user *)arg;
-       struct ifreq __user *ifr = argp;
-       unsigned int __user *up = argp;
-       unsigned short u;
-       int __user *sp = argp;
-       struct sockaddr sa;
-       int s;
-       int ret;
-
-       switch (cmd) {
-       case TUNSETIFF:
-               /* ignore the name, just look at flags */
-               if (get_user(u, &ifr->ifr_flags))
-                       return -EFAULT;
-
-               ret = 0;
-               if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP))
-                       ret = -EINVAL;
-               else
-                       q->flags = (q->flags & ~MACVTAP_FEATURES) | u;
-
-               return ret;
-
-       case TUNGETIFF:
-               rtnl_lock();
-               vlan = macvtap_get_vlan(q);
-               if (!vlan) {
-                       rtnl_unlock();
-                       return -ENOLINK;
-               }
-
-               ret = 0;
-               u = q->flags;
-               if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
-                   put_user(u, &ifr->ifr_flags))
-                       ret = -EFAULT;
-               macvtap_put_vlan(vlan);
-               rtnl_unlock();
-               return ret;
-
-       case TUNSETQUEUE:
-               if (get_user(u, &ifr->ifr_flags))
-                       return -EFAULT;
-               rtnl_lock();
-               ret = macvtap_ioctl_set_queue(file, u);
-               rtnl_unlock();
-               return ret;
-
-       case TUNGETFEATURES:
-               if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up))
-                       return -EFAULT;
-               return 0;
-
-       case TUNSETSNDBUF:
-               if (get_user(s, sp))
-                       return -EFAULT;
-
-               q->sk.sk_sndbuf = s;
-               return 0;
-
-       case TUNGETVNETHDRSZ:
-               s = q->vnet_hdr_sz;
-               if (put_user(s, sp))
-                       return -EFAULT;
-               return 0;
-
-       case TUNSETVNETHDRSZ:
-               if (get_user(s, sp))
-                       return -EFAULT;
-               if (s < (int)sizeof(struct virtio_net_hdr))
-                       return -EINVAL;
-
-               q->vnet_hdr_sz = s;
-               return 0;
-
-       case TUNGETVNETLE:
-               s = !!(q->flags & MACVTAP_VNET_LE);
-               if (put_user(s, sp))
-                       return -EFAULT;
-               return 0;
-
-       case TUNSETVNETLE:
-               if (get_user(s, sp))
-                       return -EFAULT;
-               if (s)
-                       q->flags |= MACVTAP_VNET_LE;
-               else
-                       q->flags &= ~MACVTAP_VNET_LE;
-               return 0;
-
-       case TUNGETVNETBE:
-               return macvtap_get_vnet_be(q, sp);
-
-       case TUNSETVNETBE:
-               return macvtap_set_vnet_be(q, sp);
-
-       case TUNSETOFFLOAD:
-               /* let the user check for future flags */
-               if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
-                           TUN_F_TSO_ECN | TUN_F_UFO))
-                       return -EINVAL;
-
-               rtnl_lock();
-               ret = set_offload(q, arg);
-               rtnl_unlock();
-               return ret;
-
-       case SIOCGIFHWADDR:
-               rtnl_lock();
-               vlan = macvtap_get_vlan(q);
-               if (!vlan) {
-                       rtnl_unlock();
-                       return -ENOLINK;
-               }
-               ret = 0;
-               u = vlan->dev->type;
-               if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
-                   copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) ||
-                   put_user(u, &ifr->ifr_hwaddr.sa_family))
-                       ret = -EFAULT;
-               macvtap_put_vlan(vlan);
-               rtnl_unlock();
-               return ret;
-
-       case SIOCSIFHWADDR:
-               if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
-                       return -EFAULT;
-               rtnl_lock();
-               vlan = macvtap_get_vlan(q);
-               if (!vlan) {
-                       rtnl_unlock();
-                       return -ENOLINK;
-               }
-               ret = dev_set_mac_address(vlan->dev, &sa);
-               macvtap_put_vlan(vlan);
-               rtnl_unlock();
-               return ret;
-
-       default:
-               return -EINVAL;
-       }
-}
-
-#ifdef CONFIG_COMPAT
-static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
-                                unsigned long arg)
-{
-       return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
-static const struct file_operations macvtap_fops = {
-       .owner          = THIS_MODULE,
-       .open           = macvtap_open,
-       .release        = macvtap_release,
-       .read_iter      = macvtap_read_iter,
-       .write_iter     = macvtap_write_iter,
-       .poll           = macvtap_poll,
-       .llseek         = no_llseek,
-       .unlocked_ioctl = macvtap_ioctl,
-#ifdef CONFIG_COMPAT
-       .compat_ioctl   = macvtap_compat_ioctl,
-#endif
-};
-
-static int macvtap_sendmsg(struct socket *sock, struct msghdr *m,
-                          size_t total_len)
-{
-       struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
-       return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
-}
-
-static int macvtap_recvmsg(struct socket *sock, struct msghdr *m,
-                          size_t total_len, int flags)
-{
-       struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
-       int ret;
-       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
-               return -EINVAL;
-       ret = macvtap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT);
-       if (ret > total_len) {
-               m->msg_flags |= MSG_TRUNC;
-               ret = flags & MSG_TRUNC ? ret : total_len;
-       }
-       return ret;
-}
-
-static int macvtap_peek_len(struct socket *sock)
-{
-       struct macvtap_queue *q = container_of(sock, struct macvtap_queue,
-                                              sock);
-       return skb_array_peek_len(&q->skb_array);
-}
-
-/* Ops structure to mimic raw sockets with tun */
-static const struct proto_ops macvtap_socket_ops = {
-       .sendmsg = macvtap_sendmsg,
-       .recvmsg = macvtap_recvmsg,
-       .peek_len = macvtap_peek_len,
-};
-
-/* Get an underlying socket object from tun file.  Returns error unless file is
- * attached to a device.  The returned object works like a packet socket, it
- * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
- * holding a reference to the file for as long as the socket is in use. */
-struct socket *macvtap_get_socket(struct file *file)
-{
-       struct macvtap_queue *q;
-       if (file->f_op != &macvtap_fops)
-               return ERR_PTR(-EINVAL);
-       q = file->private_data;
-       if (!q)
-               return ERR_PTR(-EBADFD);
-       return &q->sock;
-}
-EXPORT_SYMBOL_GPL(macvtap_get_socket);
-
-static int macvtap_queue_resize(struct macvlan_dev *vlan)
-{
-       struct net_device *dev = vlan->dev;
-       struct macvtap_queue *q;
-       struct skb_array **arrays;
-       int n = vlan->numqueues;
-       int ret, i = 0;
-
-       arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL);
-       if (!arrays)
-               return -ENOMEM;
-
-       list_for_each_entry(q, &vlan->queue_list, next)
-               arrays[i++] = &q->skb_array;
-
-       ret = skb_array_resize_multiple(arrays, n,
-                                       dev->tx_queue_len, GFP_KERNEL);
-
-       kfree(arrays);
-       return ret;
-}
-
 static int macvtap_device_event(struct notifier_block *unused,
                                unsigned long event, void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       struct macvlan_dev *vlan;
+       struct macvtap_dev *vlantap;
        struct device *classdev;
        dev_t devt;
        int err;
@@ -1271,7 +155,7 @@ static int macvtap_device_event(struct notifier_block *unused,
                return NOTIFY_DONE;
 
        snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex);
-       vlan = netdev_priv(dev);
+       vlantap = netdev_priv(dev);
 
        switch (event) {
        case NETDEV_REGISTER:
@@ -1279,15 +163,15 @@ static int macvtap_device_event(struct notifier_block *unused,
                 * been registered but before register_netdevice has
                 * finished running.
                 */
-               err = macvtap_get_minor(vlan);
+               err = tap_get_minor(macvtap_major, &vlantap->tap);
                if (err)
                        return notifier_from_errno(err);
 
-               devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
+               devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor);
                classdev = device_create(&macvtap_class, &dev->dev, devt,
                                         dev, tap_name);
                if (IS_ERR(classdev)) {
-                       macvtap_free_minor(vlan);
+                       tap_free_minor(macvtap_major, &vlantap->tap);
                        return notifier_from_errno(PTR_ERR(classdev));
                }
                err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj,
@@ -1297,15 +181,15 @@ static int macvtap_device_event(struct notifier_block *unused,
                break;
        case NETDEV_UNREGISTER:
                /* vlan->minor == 0 if NETDEV_REGISTER above failed */
-               if (vlan->minor == 0)
+               if (vlantap->tap.minor == 0)
                        break;
                sysfs_remove_link(&dev->dev.kobj, tap_name);
-               devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
+               devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor);
                device_destroy(&macvtap_class, devt);
-               macvtap_free_minor(vlan);
+               tap_free_minor(macvtap_major, &vlantap->tap);
                break;
        case NETDEV_CHANGE_TX_QUEUE_LEN:
-               if (macvtap_queue_resize(vlan))
+               if (tap_queue_resize(&vlantap->tap))
                        return NOTIFY_BAD;
                break;
        }
@@ -1321,38 +205,31 @@ static int macvtap_init(void)
 {
        int err;
 
-       err = alloc_chrdev_region(&macvtap_major, 0,
-                               MACVTAP_NUM_DEVS, "macvtap");
-       if (err)
-               goto out1;
+       err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap");
 
-       cdev_init(&macvtap_cdev, &macvtap_fops);
-       err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
        if (err)
-               goto out2;
+               goto out1;
 
        err = class_register(&macvtap_class);
        if (err)
-               goto out3;
+               goto out2;
 
        err = register_netdevice_notifier(&macvtap_notifier_block);
        if (err)
-               goto out4;
+               goto out3;
 
        err = macvlan_link_register(&macvtap_link_ops);
        if (err)
-               goto out5;
+               goto out4;
 
        return 0;
 
-out5:
-       unregister_netdevice_notifier(&macvtap_notifier_block);
 out4:
-       class_unregister(&macvtap_class);
+       unregister_netdevice_notifier(&macvtap_notifier_block);
 out3:
-       cdev_del(&macvtap_cdev);
+       class_unregister(&macvtap_class);
 out2:
-       unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
+       tap_destroy_cdev(macvtap_major, &macvtap_cdev);
 out1:
        return err;
 }
@@ -1363,9 +240,7 @@ static void macvtap_exit(void)
        rtnl_link_unregister(&macvtap_link_ops);
        unregister_netdevice_notifier(&macvtap_notifier_block);
        class_unregister(&macvtap_class);
-       cdev_del(&macvtap_cdev);
-       unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
-       idr_destroy(&minor_idr);
+       tap_destroy_cdev(macvtap_major, &macvtap_cdev);
 }
 module_exit(macvtap_exit);
 
index 3e027ed0b3bbccaf399e2800bcdd0983979b7c4a..077364cbf439109d8d1f877d4581607eba80dc55 100644 (file)
@@ -341,6 +341,184 @@ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
 }
 EXPORT_SYMBOL(mdio45_ethtool_gset_npage);
 
+/**
+ * mdio45_ethtool_ksettings_get_npage - get settings for ETHTOOL_GLINKSETTINGS
+ * @mdio: MDIO interface
+ * @cmd: Ethtool request structure
+ * @npage_adv: Modes currently advertised on next pages
+ * @npage_lpa: Modes advertised by link partner on next pages
+ *
+ * The @cmd parameter is expected to have been cleared before calling
+ * mdio45_ethtool_ksettings_get_npage().
+ *
+ * Since the CSRs for auto-negotiation using next pages are not fully
+ * standardised, this function does not attempt to decode them.  The
+ * caller must pass them in.
+ */
+void mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio,
+                                       struct ethtool_link_ksettings *cmd,
+                                       u32 npage_adv, u32 npage_lpa)
+{
+       int reg;
+       u32 speed, supported = 0, advertising = 0, lp_advertising = 0;
+
+       BUILD_BUG_ON(MDIO_SUPPORTS_C22 != ETH_MDIO_SUPPORTS_C22);
+       BUILD_BUG_ON(MDIO_SUPPORTS_C45 != ETH_MDIO_SUPPORTS_C45);
+
+       cmd->base.phy_address = mdio->prtad;
+       cmd->base.mdio_support =
+               mdio->mode_support & (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22);
+
+       reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+                             MDIO_CTRL2);
+       switch (reg & MDIO_PMA_CTRL2_TYPE) {
+       case MDIO_PMA_CTRL2_10GBT:
+       case MDIO_PMA_CTRL2_1000BT:
+       case MDIO_PMA_CTRL2_100BTX:
+       case MDIO_PMA_CTRL2_10BT:
+               cmd->base.port = PORT_TP;
+               supported = SUPPORTED_TP;
+               reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+                                     MDIO_SPEED);
+               if (reg & MDIO_SPEED_10G)
+                       supported |= SUPPORTED_10000baseT_Full;
+               if (reg & MDIO_PMA_SPEED_1000)
+                       supported |= (SUPPORTED_1000baseT_Full |
+                                           SUPPORTED_1000baseT_Half);
+               if (reg & MDIO_PMA_SPEED_100)
+                       supported |= (SUPPORTED_100baseT_Full |
+                                           SUPPORTED_100baseT_Half);
+               if (reg & MDIO_PMA_SPEED_10)
+                       supported |= (SUPPORTED_10baseT_Full |
+                                           SUPPORTED_10baseT_Half);
+               advertising = ADVERTISED_TP;
+               break;
+
+       case MDIO_PMA_CTRL2_10GBCX4:
+               cmd->base.port = PORT_OTHER;
+               supported = 0;
+               advertising = 0;
+               break;
+
+       case MDIO_PMA_CTRL2_10GBKX4:
+       case MDIO_PMA_CTRL2_10GBKR:
+       case MDIO_PMA_CTRL2_1000BKX:
+               cmd->base.port = PORT_OTHER;
+               supported = SUPPORTED_Backplane;
+               reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+                                     MDIO_PMA_EXTABLE);
+               if (reg & MDIO_PMA_EXTABLE_10GBKX4)
+                       supported |= SUPPORTED_10000baseKX4_Full;
+               if (reg & MDIO_PMA_EXTABLE_10GBKR)
+                       supported |= SUPPORTED_10000baseKR_Full;
+               if (reg & MDIO_PMA_EXTABLE_1000BKX)
+                       supported |= SUPPORTED_1000baseKX_Full;
+               reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+                                     MDIO_PMA_10GBR_FECABLE);
+               if (reg & MDIO_PMA_10GBR_FECABLE_ABLE)
+                       supported |= SUPPORTED_10000baseR_FEC;
+               advertising = ADVERTISED_Backplane;
+               break;
+
+       /* All the other defined modes are flavours of optical */
+       default:
+               cmd->base.port = PORT_FIBRE;
+               supported = SUPPORTED_FIBRE;
+               advertising = ADVERTISED_FIBRE;
+               break;
+       }
+
+       if (mdio->mmds & MDIO_DEVS_AN) {
+               supported |= SUPPORTED_Autoneg;
+               reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN,
+                                     MDIO_CTRL1);
+               if (reg & MDIO_AN_CTRL1_ENABLE) {
+                       cmd->base.autoneg = AUTONEG_ENABLE;
+                       advertising |=
+                               ADVERTISED_Autoneg |
+                               mdio45_get_an(mdio, MDIO_AN_ADVERTISE) |
+                               npage_adv;
+               } else {
+                       cmd->base.autoneg = AUTONEG_DISABLE;
+               }
+       } else {
+               cmd->base.autoneg = AUTONEG_DISABLE;
+       }
+
+       if (cmd->base.autoneg) {
+               u32 modes = 0;
+               int an_stat = mdio->mdio_read(mdio->dev, mdio->prtad,
+                                             MDIO_MMD_AN, MDIO_STAT1);
+
+               /* If AN is complete and successful, report best common
+                * mode, otherwise report best advertised mode.
+                */
+               if (an_stat & MDIO_AN_STAT1_COMPLETE) {
+                       lp_advertising =
+                               mdio45_get_an(mdio, MDIO_AN_LPA) | npage_lpa;
+                       if (an_stat & MDIO_AN_STAT1_LPABLE)
+                               lp_advertising |= ADVERTISED_Autoneg;
+                       modes = advertising & lp_advertising;
+               }
+               if ((modes & ~ADVERTISED_Autoneg) == 0)
+                       modes = advertising;
+
+               if (modes & (ADVERTISED_10000baseT_Full |
+                            ADVERTISED_10000baseKX4_Full |
+                            ADVERTISED_10000baseKR_Full)) {
+                       speed = SPEED_10000;
+                       cmd->base.duplex = DUPLEX_FULL;
+               } else if (modes & (ADVERTISED_1000baseT_Full |
+                                   ADVERTISED_1000baseT_Half |
+                                   ADVERTISED_1000baseKX_Full)) {
+                       speed = SPEED_1000;
+                       cmd->base.duplex = !(modes & ADVERTISED_1000baseT_Half);
+               } else if (modes & (ADVERTISED_100baseT_Full |
+                                   ADVERTISED_100baseT_Half)) {
+                       speed = SPEED_100;
+                       cmd->base.duplex = !!(modes & ADVERTISED_100baseT_Full);
+               } else {
+                       speed = SPEED_10;
+                       cmd->base.duplex = !!(modes & ADVERTISED_10baseT_Full);
+               }
+       } else {
+               /* Report forced settings */
+               reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+                                     MDIO_CTRL1);
+               speed = (((reg & MDIO_PMA_CTRL1_SPEED1000) ? 100 : 1)
+                        * ((reg & MDIO_PMA_CTRL1_SPEED100) ? 100 : 10));
+               cmd->base.duplex = (reg & MDIO_CTRL1_FULLDPLX ||
+                                   speed == SPEED_10000);
+       }
+
+       cmd->base.speed = speed;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+                                               advertising);
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+                                               lp_advertising);
+
+       /* 10GBASE-T MDI/MDI-X */
+       if (cmd->base.port == PORT_TP && (cmd->base.speed == SPEED_10000)) {
+               switch (mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
+                                       MDIO_PMA_10GBT_SWAPPOL)) {
+               case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX:
+                       cmd->base.eth_tp_mdix = ETH_TP_MDI;
+                       break;
+               case 0:
+                       cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
+                       break;
+               default:
+                       /* It's complicated... */
+                       cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+                       break;
+               }
+       }
+}
+EXPORT_SYMBOL(mdio45_ethtool_ksettings_get_npage);
+
 /**
  * mdio_mii_ioctl - MII ioctl interface for MDIO (clause 22 or 45) PHYs
  * @mdio: MDIO interface
index 2de7faee9b19eb7ee0a009f133314e64c36772f1..b91603835d2680aa08911da7870ea176c5dd0792 100644 (file)
@@ -58,7 +58,7 @@ static int nlmon_close(struct net_device *dev)
        return netlink_remove_tap(&nlmon->nt);
 }
 
-static struct rtnl_link_stats64 *
+static void
 nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        int i;
@@ -86,8 +86,6 @@ nlmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 
        stats->rx_bytes = bytes;
        stats->tx_bytes = 0;
-
-       return stats;
 }
 
 static u32 always_on(struct net_device *dev)
index 356859ac7c18be8e41da7b40aad9a71832a8a1e9..407b0b601ea8264b0ac8bf32a609271d43be995a 100644 (file)
@@ -1,6 +1,7 @@
 # Makefile for Linux PHY drivers and MDIO bus drivers
 
-libphy-y                       := phy.o phy_device.o mdio_bus.o mdio_device.o
+libphy-y                       := phy.o phy_device.o mdio_bus.o mdio_device.o \
+                                  mdio-boardinfo.o
 libphy-$(CONFIG_SWPHY)         += swphy.o
 libphy-$(CONFIG_LED_TRIGGER_PHY)       += phy_led_triggers.o
 
index 264b085d796b4f048551fa033b7c28d302675e24..d1c2614dad3a60860f33c0ff1851f18e0d1a4253 100644 (file)
@@ -167,6 +167,31 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev)
        return 0;
 }
 
+static int bcm7xxx_28nm_a0_patch_afe_config_init(struct phy_device *phydev)
+{
+       /* +1 RC_CAL codes for RL centering for both LT and HT conditions */
+       bcm_phy_write_misc(phydev, AFE_RXCONFIG_2, 0xd003);
+
+       /* Cut master bias current by 2% to compensate for RC_CAL offset */
+       bcm_phy_write_misc(phydev, DSP_TAP10, 0x791b);
+
+       /* Improve hybrid leakage */
+       bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x10e3);
+
+       /* Change rx_on_tune 8 to 0xf */
+       bcm_phy_write_misc(phydev, 0x21, 0x2, 0x87f6);
+
+       /* Change 100Tx EEE bandwidth */
+       bcm_phy_write_misc(phydev, 0x22, 0x2, 0x017d);
+
+       /* Enable ffe zero detection for Vitesse interoperability */
+       bcm_phy_write_misc(phydev, 0x26, 0x2, 0x0015);
+
+       r_rc_cal_reset(phydev);
+
+       return 0;
+}
+
 static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
 {
        u8 rev = PHY_BRCM_7XXX_REV(phydev->dev_flags);
@@ -174,6 +199,12 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
        u8 count;
        int ret = 0;
 
+       /* Newer devices have moved the revision information back into a
+        * standard location in MII_PHYS_ID[23]
+        */
+       if (rev == 0)
+               rev = phydev->phy_id & ~phydev->drv->phy_id_mask;
+
        pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n",
                     phydev_name(phydev), phydev->drv->name, rev, patch);
 
@@ -197,6 +228,9 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
        case 0x10:
                ret = bcm7xxx_28nm_e0_plus_afe_config_init(phydev);
                break;
+       case 0x01:
+               ret = bcm7xxx_28nm_a0_patch_afe_config_init(phydev);
+               break;
        default:
                break;
        }
@@ -416,8 +450,10 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
 
 static struct phy_driver bcm7xxx_driver[] = {
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
+       BCM7XXX_28NM_GPHY(PHY_ID_BCM7278, "Broadcom BCM7278"),
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"),
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"),
+       BCM7XXX_28NM_GPHY(PHY_ID_BCM74371, "Broadcom BCM74371"),
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
        BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
@@ -430,12 +466,14 @@ static struct phy_driver bcm7xxx_driver[] = {
 
 static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
        { PHY_ID_BCM7250, 0xfffffff0, },
+       { PHY_ID_BCM7278, 0xfffffff0, },
        { PHY_ID_BCM7364, 0xfffffff0, },
        { PHY_ID_BCM7366, 0xfffffff0, },
        { PHY_ID_BCM7346, 0xfffffff0, },
        { PHY_ID_BCM7362, 0xfffffff0, },
        { PHY_ID_BCM7425, 0xfffffff0, },
        { PHY_ID_BCM7429, 0xfffffff0, },
+       { PHY_ID_BCM74371, 0xfffffff0, },
        { PHY_ID_BCM7439, 0xfffffff0, },
        { PHY_ID_BCM7435, 0xfffffff0, },
        { PHY_ID_BCM7445, 0xfffffff0, },
index 4223e35490b0c897ef7a8767189898ef917094bb..9cd8b27d12923c78b6f68bb1bf7e2afe993b30a6 100644 (file)
@@ -30,6 +30,50 @@ MODULE_DESCRIPTION("Broadcom PHY driver");
 MODULE_AUTHOR("Maciej W. Rozycki");
 MODULE_LICENSE("GPL");
 
+static int bcm54210e_config_init(struct phy_device *phydev)
+{
+       int val;
+
+       val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+       val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
+       val |= MII_BCM54XX_AUXCTL_MISC_WREN;
+       bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, val);
+
+       val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
+       val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
+       bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
+
+       return 0;
+}
+
+static int bcm54612e_config_init(struct phy_device *phydev)
+{
+       /* Clear TX internal delay unless requested. */
+       if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
+           (phydev->interface != PHY_INTERFACE_MODE_RGMII_TXID)) {
+               /* Disable TXD to GTXCLK clock delay (default set) */
+               /* Bit 9 is the only field in shadow register 00011 */
+               bcm_phy_write_shadow(phydev, 0x03, 0);
+       }
+
+       /* Clear RX internal delay unless requested. */
+       if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
+           (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) {
+               u16 reg;
+
+               reg = bcm54xx_auxctl_read(phydev,
+                                         MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+               /* Disable RXD to RXC delay (default set) */
+               reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
+               /* Clear shadow selector field */
+               reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MASK;
+               bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
+                                    MII_BCM54XX_AUXCTL_MISC_WREN | reg);
+       }
+
+       return 0;
+}
+
 static int bcm54810_config(struct phy_device *phydev)
 {
        int rc, val;
@@ -230,7 +274,15 @@ static int bcm54xx_config_init(struct phy_device *phydev)
            (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
                bcm54xx_adjust_rxrefclk(phydev);
 
-       if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) {
+       if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E) {
+               err = bcm54210e_config_init(phydev);
+               if (err)
+                       return err;
+       } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54612E) {
+               err = bcm54612e_config_init(phydev);
+               if (err)
+                       return err;
+       } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) {
                err = bcm54810_config(phydev);
                if (err)
                        return err;
@@ -375,41 +427,6 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
        return ret;
 }
 
-static int bcm54612e_config_aneg(struct phy_device *phydev)
-{
-       int ret;
-
-       /* First, auto-negotiate. */
-       ret = genphy_config_aneg(phydev);
-
-       /* Clear TX internal delay unless requested. */
-       if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
-           (phydev->interface != PHY_INTERFACE_MODE_RGMII_TXID)) {
-               /* Disable TXD to GTXCLK clock delay (default set) */
-               /* Bit 9 is the only field in shadow register 00011 */
-               bcm_phy_write_shadow(phydev, 0x03, 0);
-       }
-
-       /* Clear RX internal delay unless requested. */
-       if ((phydev->interface != PHY_INTERFACE_MODE_RGMII_ID) &&
-           (phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) {
-               u16 reg;
-
-               /* Errata: reads require filling in the write selector field */
-               bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
-                                    MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC);
-               reg = phy_read(phydev, MII_BCM54XX_AUX_CTL);
-               /* Disable RXD to RXC delay (default set) */
-               reg &= ~MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW;
-               /* Clear shadow selector field */
-               reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MASK;
-               bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
-                                    MII_BCM54XX_AUXCTL_MISC_WREN | reg);
-       }
-
-       return ret;
-}
-
 static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set)
 {
        int val;
@@ -543,6 +560,17 @@ static struct phy_driver broadcom_drivers[] = {
        .read_status    = genphy_read_status,
        .ack_interrupt  = bcm_phy_ack_intr,
        .config_intr    = bcm_phy_config_intr,
+}, {
+       .phy_id         = PHY_ID_BCM54210E,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Broadcom BCM54210E",
+       .features       = PHY_GBIT_FEATURES,
+       .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+       .config_init    = bcm54xx_config_init,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
+       .ack_interrupt  = bcm_phy_ack_intr,
+       .config_intr    = bcm_phy_config_intr,
 }, {
        .phy_id         = PHY_ID_BCM5461,
        .phy_id_mask    = 0xfffffff0,
@@ -561,7 +589,7 @@ static struct phy_driver broadcom_drivers[] = {
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
        .config_init    = bcm54xx_config_init,
-       .config_aneg    = bcm54612e_config_aneg,
+       .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
        .ack_interrupt  = bcm_phy_ack_intr,
        .config_intr    = bcm_phy_config_intr,
@@ -682,6 +710,7 @@ module_phy_driver(broadcom_drivers);
 static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
        { PHY_ID_BCM5411, 0xfffffff0 },
        { PHY_ID_BCM5421, 0xfffffff0 },
+       { PHY_ID_BCM54210E, 0xfffffff0 },
        { PHY_ID_BCM5461, 0xfffffff0 },
        { PHY_ID_BCM54612E, 0xfffffff0 },
        { PHY_ID_BCM54616S, 0xfffffff0 },
index ca1b462bf7b2782412014b63a0578aa328d9d4e1..19865530e0b13c24e5d74def6cbd55e77b0ad378 100644 (file)
@@ -32,7 +32,9 @@
 #define DP83867_CFG3           0x1e
 
 /* Extended Registers */
+#define DP83867_CFG4            0x0031
 #define DP83867_RGMIICTL       0x0032
+#define DP83867_STRAP_STS1     0x006E
 #define DP83867_RGMIIDCTL      0x0086
 #define DP83867_IO_MUX_CFG     0x0170
 
 #define DP83867_RGMII_TX_CLK_DELAY_EN          BIT(1)
 #define DP83867_RGMII_RX_CLK_DELAY_EN          BIT(0)
 
+/* STRAP_STS1 bits */
+#define DP83867_STRAP_STS1_RESERVED            BIT(11)
+
 /* PHY CTRL bits */
 #define DP83867_PHYCR_FIFO_DEPTH_SHIFT         14
 #define DP83867_PHYCR_FIFO_DEPTH_MASK          (3 << 14)
+#define DP83867_PHYCR_RESERVED_MASK            BIT(11)
 
 /* RGMIIDCTL bits */
 #define DP83867_RGMII_TX_CLK_DELAY_SHIFT       4
 #define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX    0x0
 #define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN    0x1f
 
+/* CFG4 bits */
+#define DP83867_CFG4_PORT_MIRROR_EN              BIT(0)
+
+enum {
+       DP83867_PORT_MIRROING_KEEP,
+       DP83867_PORT_MIRROING_EN,
+       DP83867_PORT_MIRROING_DIS,
+};
+
 struct dp83867_private {
        int rx_id_delay;
        int tx_id_delay;
        int fifo_depth;
        int io_impedance;
+       int port_mirroring;
 };
 
 static int dp83867_ack_interrupt(struct phy_device *phydev)
@@ -111,6 +127,24 @@ static int dp83867_config_intr(struct phy_device *phydev)
        return phy_write(phydev, MII_DP83867_MICR, micr_status);
 }
 
+static int dp83867_config_port_mirroring(struct phy_device *phydev)
+{
+       struct dp83867_private *dp83867 =
+               (struct dp83867_private *)phydev->priv;
+       u16 val;
+
+       val = phy_read_mmd_indirect(phydev, DP83867_CFG4, DP83867_DEVADDR);
+
+       if (dp83867->port_mirroring == DP83867_PORT_MIRROING_EN)
+               val |= DP83867_CFG4_PORT_MIRROR_EN;
+       else
+               val &= ~DP83867_CFG4_PORT_MIRROR_EN;
+
+       phy_write_mmd_indirect(phydev, DP83867_CFG4, DP83867_DEVADDR, val);
+
+       return 0;
+}
+
 #ifdef CONFIG_OF_MDIO
 static int dp83867_of_init(struct phy_device *phydev)
 {
@@ -144,6 +178,12 @@ static int dp83867_of_init(struct phy_device *phydev)
             phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID))
                return ret;
 
+       if (of_property_read_bool(of_node, "enet-phy-lane-swap"))
+               dp83867->port_mirroring = DP83867_PORT_MIRROING_EN;
+
+       if (of_property_read_bool(of_node, "enet-phy-lane-no-swap"))
+               dp83867->port_mirroring = DP83867_PORT_MIRROING_DIS;
+
        return of_property_read_u32(of_node, "ti,fifo-depth",
                                   &dp83867->fifo_depth);
 }
@@ -157,7 +197,7 @@ static int dp83867_of_init(struct phy_device *phydev)
 static int dp83867_config_init(struct phy_device *phydev)
 {
        struct dp83867_private *dp83867;
-       int ret, val;
+       int ret, val, bs;
        u16 delay;
 
        if (!phydev->priv) {
@@ -180,6 +220,22 @@ static int dp83867_config_init(struct phy_device *phydev)
                        return val;
                val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
                val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
+
+               /* The code below checks if "port mirroring" N/A MODE4 has been
+                * enabled during power on bootstrap.
+                *
+                * Such N/A mode enabled by mistake can put PHY IC in some
+                * internal testing mode and disable RGMII transmission.
+                *
+                * In this particular case one needs to check STRAP_STS1
+                * register's bit 11 (marked as RESERVED).
+                */
+
+               bs = phy_read_mmd_indirect(phydev, DP83867_STRAP_STS1,
+                                          DP83867_DEVADDR);
+               if (bs & DP83867_STRAP_STS1_RESERVED)
+                       val &= ~DP83867_PHYCR_RESERVED_MASK;
+
                ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
                if (ret)
                        return ret;
@@ -228,6 +284,9 @@ static int dp83867_config_init(struct phy_device *phydev)
                phy_write(phydev, DP83867_CFG3, val);
        }
 
+       if (dp83867->port_mirroring != DP83867_PORT_MIRROING_KEEP)
+               dp83867_config_port_mirroring(phydev);
+
        return 0;
 }
 
index ed0d235cf850ea11564bb446f8c8fc53879a9e73..f9d0fa315a47624409cb054e762a6c8b6537a7b6 100644 (file)
  */
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <linux/ctype.h>
 #include <linux/errno.h>
 #include <linux/unistd.h>
+#include <linux/hwmon.h>
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/delay.h>
 #define MII_88E1121_PHY_MSCR_TX_DELAY  BIT(4)
 #define MII_88E1121_PHY_MSCR_DELAY_MASK        (~(0x3 << 4))
 
+#define MII_88E1121_MISC_TEST                          0x1a
+#define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK      0x1f00
+#define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT     8
+#define MII_88E1510_MISC_TEST_TEMP_IRQ_EN              BIT(7)
+#define MII_88E1510_MISC_TEST_TEMP_IRQ                 BIT(6)
+#define MII_88E1121_MISC_TEST_TEMP_SENSOR_EN           BIT(5)
+#define MII_88E1121_MISC_TEST_TEMP_MASK                        0x1f
+
+#define MII_88E1510_TEMP_SENSOR                0x1b
+#define MII_88E1510_TEMP_SENSOR_MASK   0xff
+
 #define MII_88E1318S_PHY_MSCR1_REG     16
 #define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
 
@@ -172,6 +185,8 @@ static struct marvell_hw_stat marvell_hw_stats[] = {
 
 struct marvell_priv {
        u64 stats[ARRAY_SIZE(marvell_hw_stats)];
+       char *hwmon_name;
+       struct device *hwmon_dev;
 };
 
 static int marvell_ack_interrupt(struct phy_device *phydev)
@@ -1468,6 +1483,371 @@ static void marvell_get_stats(struct phy_device *phydev,
                data[i] = marvell_get_stat(phydev, i);
 }
 
+#ifdef CONFIG_HWMON
+static int m88e1121_get_temp(struct phy_device *phydev, long *temp)
+{
+       int ret;
+       int val;
+
+       *temp = 0;
+
+       mutex_lock(&phydev->lock);
+
+       ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
+       if (ret < 0)
+               goto error;
+
+       /* Enable temperature sensor */
+       ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+       if (ret < 0)
+               goto error;
+
+       ret = phy_write(phydev, MII_88E1121_MISC_TEST,
+                       ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+       if (ret < 0)
+               goto error;
+
+       /* Wait for temperature to stabilize */
+       usleep_range(10000, 12000);
+
+       val = phy_read(phydev, MII_88E1121_MISC_TEST);
+       if (val < 0) {
+               ret = val;
+               goto error;
+       }
+
+       /* Disable temperature sensor */
+       ret = phy_write(phydev, MII_88E1121_MISC_TEST,
+                       ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+       if (ret < 0)
+               goto error;
+
+       *temp = ((val & MII_88E1121_MISC_TEST_TEMP_MASK) - 5) * 5000;
+
+error:
+       phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+       mutex_unlock(&phydev->lock);
+
+       return ret;
+}
+
+static int m88e1121_hwmon_read(struct device *dev,
+                              enum hwmon_sensor_types type,
+                              u32 attr, int channel, long *temp)
+{
+       struct phy_device *phydev = dev_get_drvdata(dev);
+       int err;
+
+       switch (attr) {
+       case hwmon_temp_input:
+               err = m88e1121_get_temp(phydev, temp);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return err;
+}
+
+static umode_t m88e1121_hwmon_is_visible(const void *data,
+                                        enum hwmon_sensor_types type,
+                                        u32 attr, int channel)
+{
+       if (type != hwmon_temp)
+               return 0;
+
+       switch (attr) {
+       case hwmon_temp_input:
+               return 0444;
+       default:
+               return 0;
+       }
+}
+
+static u32 m88e1121_hwmon_chip_config[] = {
+       HWMON_C_REGISTER_TZ,
+       0
+};
+
+static const struct hwmon_channel_info m88e1121_hwmon_chip = {
+       .type = hwmon_chip,
+       .config = m88e1121_hwmon_chip_config,
+};
+
+static u32 m88e1121_hwmon_temp_config[] = {
+       HWMON_T_INPUT,
+       0
+};
+
+static const struct hwmon_channel_info m88e1121_hwmon_temp = {
+       .type = hwmon_temp,
+       .config = m88e1121_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *m88e1121_hwmon_info[] = {
+       &m88e1121_hwmon_chip,
+       &m88e1121_hwmon_temp,
+       NULL
+};
+
+static const struct hwmon_ops m88e1121_hwmon_hwmon_ops = {
+       .is_visible = m88e1121_hwmon_is_visible,
+       .read = m88e1121_hwmon_read,
+};
+
+static const struct hwmon_chip_info m88e1121_hwmon_chip_info = {
+       .ops = &m88e1121_hwmon_hwmon_ops,
+       .info = m88e1121_hwmon_info,
+};
+
+static int m88e1510_get_temp(struct phy_device *phydev, long *temp)
+{
+       int ret;
+
+       *temp = 0;
+
+       mutex_lock(&phydev->lock);
+
+       ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
+       if (ret < 0)
+               goto error;
+
+       ret = phy_read(phydev, MII_88E1510_TEMP_SENSOR);
+       if (ret < 0)
+               goto error;
+
+       *temp = ((ret & MII_88E1510_TEMP_SENSOR_MASK) - 25) * 1000;
+
+error:
+       phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+       mutex_unlock(&phydev->lock);
+
+       return ret;
+}
+
+int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp)
+{
+       int ret;
+
+       *temp = 0;
+
+       mutex_lock(&phydev->lock);
+
+       ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
+       if (ret < 0)
+               goto error;
+
+       ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+       if (ret < 0)
+               goto error;
+
+       *temp = (((ret & MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) >>
+                 MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT) * 5) - 25;
+       /* convert to mC */
+       *temp *= 1000;
+
+error:
+       phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+       mutex_unlock(&phydev->lock);
+
+       return ret;
+}
+
+int m88e1510_set_temp_critical(struct phy_device *phydev, long temp)
+{
+       int ret;
+
+       mutex_lock(&phydev->lock);
+
+       ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
+       if (ret < 0)
+               goto error;
+
+       ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+       if (ret < 0)
+               goto error;
+
+       temp = temp / 1000;
+       temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
+       ret = phy_write(phydev, MII_88E1121_MISC_TEST,
+                       (ret & ~MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) |
+                       (temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT));
+
+error:
+       phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+       mutex_unlock(&phydev->lock);
+
+       return ret;
+}
+
+int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm)
+{
+       int ret;
+
+       *alarm = false;
+
+       mutex_lock(&phydev->lock);
+
+       ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
+       if (ret < 0)
+               goto error;
+
+       ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+       if (ret < 0)
+               goto error;
+       *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ);
+
+error:
+       phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+       mutex_unlock(&phydev->lock);
+
+       return ret;
+}
+
+static int m88e1510_hwmon_read(struct device *dev,
+                              enum hwmon_sensor_types type,
+                              u32 attr, int channel, long *temp)
+{
+       struct phy_device *phydev = dev_get_drvdata(dev);
+       int err;
+
+       switch (attr) {
+       case hwmon_temp_input:
+               err = m88e1510_get_temp(phydev, temp);
+               break;
+       case hwmon_temp_crit:
+               err = m88e1510_get_temp_critical(phydev, temp);
+               break;
+       case hwmon_temp_max_alarm:
+               err = m88e1510_get_temp_alarm(phydev, temp);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return err;
+}
+
+static int m88e1510_hwmon_write(struct device *dev,
+                               enum hwmon_sensor_types type,
+                               u32 attr, int channel, long temp)
+{
+       struct phy_device *phydev = dev_get_drvdata(dev);
+       int err;
+
+       switch (attr) {
+       case hwmon_temp_crit:
+               err = m88e1510_set_temp_critical(phydev, temp);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+       return err;
+}
+
+static umode_t m88e1510_hwmon_is_visible(const void *data,
+                                        enum hwmon_sensor_types type,
+                                        u32 attr, int channel)
+{
+       if (type != hwmon_temp)
+               return 0;
+
+       switch (attr) {
+       case hwmon_temp_input:
+       case hwmon_temp_max_alarm:
+               return 0444;
+       case hwmon_temp_crit:
+               return 0644;
+       default:
+               return 0;
+       }
+}
+
+static u32 m88e1510_hwmon_temp_config[] = {
+       HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM,
+       0
+};
+
+static const struct hwmon_channel_info m88e1510_hwmon_temp = {
+       .type = hwmon_temp,
+       .config = m88e1510_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *m88e1510_hwmon_info[] = {
+       &m88e1121_hwmon_chip,
+       &m88e1510_hwmon_temp,
+       NULL
+};
+
+static const struct hwmon_ops m88e1510_hwmon_hwmon_ops = {
+       .is_visible = m88e1510_hwmon_is_visible,
+       .read = m88e1510_hwmon_read,
+       .write = m88e1510_hwmon_write,
+};
+
+static const struct hwmon_chip_info m88e1510_hwmon_chip_info = {
+       .ops = &m88e1510_hwmon_hwmon_ops,
+       .info = m88e1510_hwmon_info,
+};
+
+static int marvell_hwmon_name(struct phy_device *phydev)
+{
+       struct marvell_priv *priv = phydev->priv;
+       struct device *dev = &phydev->mdio.dev;
+       const char *devname = dev_name(dev);
+       size_t len = strlen(devname);
+       int i, j;
+
+       priv->hwmon_name = devm_kzalloc(dev, len, GFP_KERNEL);
+       if (!priv->hwmon_name)
+               return -ENOMEM;
+
+       for (i = j = 0; i < len && devname[i]; i++) {
+               if (isalnum(devname[i]))
+                       priv->hwmon_name[j++] = devname[i];
+       }
+
+       return 0;
+}
+
+static int marvell_hwmon_probe(struct phy_device *phydev,
+                              const struct hwmon_chip_info *chip)
+{
+       struct marvell_priv *priv = phydev->priv;
+       struct device *dev = &phydev->mdio.dev;
+       int err;
+
+       err = marvell_hwmon_name(phydev);
+       if (err)
+               return err;
+
+       priv->hwmon_dev = devm_hwmon_device_register_with_info(
+               dev, priv->hwmon_name, phydev, chip, NULL);
+
+       return PTR_ERR_OR_ZERO(priv->hwmon_dev);
+}
+
+static int m88e1121_hwmon_probe(struct phy_device *phydev)
+{
+       return marvell_hwmon_probe(phydev, &m88e1121_hwmon_chip_info);
+}
+
+static int m88e1510_hwmon_probe(struct phy_device *phydev)
+{
+       return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info);
+}
+#else
+static int m88e1121_hwmon_probe(struct phy_device *phydev)
+{
+       return 0;
+}
+
+static int m88e1510_hwmon_probe(struct phy_device *phydev)
+{
+       return 0;
+}
+#endif
+
 static int marvell_probe(struct phy_device *phydev)
 {
        struct marvell_priv *priv;
@@ -1481,14 +1861,47 @@ static int marvell_probe(struct phy_device *phydev)
        return 0;
 }
 
+static int m88e1121_probe(struct phy_device *phydev)
+{
+       int err;
+
+       err = marvell_probe(phydev);
+       if (err)
+               return err;
+
+       return m88e1121_hwmon_probe(phydev);
+}
+
+static int m88e1510_probe(struct phy_device *phydev)
+{
+       int err;
+
+       err = marvell_probe(phydev);
+       if (err)
+               return err;
+
+       return m88e1510_hwmon_probe(phydev);
+}
+
+static void marvell_remove(struct phy_device *phydev)
+{
+#ifdef CONFIG_HWMON
+
+       struct marvell_priv *priv = phydev->priv;
+
+       if (priv && priv->hwmon_dev)
+               hwmon_device_unregister(priv->hwmon_dev);
+#endif
+}
+
 static struct phy_driver marvell_drivers[] = {
        {
                .phy_id = MARVELL_PHY_ID_88E1101,
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1101",
                .features = PHY_GBIT_FEATURES,
-               .probe = marvell_probe,
                .flags = PHY_HAS_INTERRUPT,
+               .probe = marvell_probe,
                .config_init = &marvell_config_init,
                .config_aneg = &marvell_config_aneg,
                .read_status = &genphy_read_status,
@@ -1560,7 +1973,8 @@ static struct phy_driver marvell_drivers[] = {
                .name = "Marvell 88E1121R",
                .features = PHY_GBIT_FEATURES,
                .flags = PHY_HAS_INTERRUPT,
-               .probe = marvell_probe,
+               .probe = &m88e1121_probe,
+               .remove = &marvell_remove,
                .config_init = &m88e1121_config_init,
                .config_aneg = &m88e1121_config_aneg,
                .read_status = &marvell_read_status,
@@ -1672,7 +2086,8 @@ static struct phy_driver marvell_drivers[] = {
                .name = "Marvell 88E1510",
                .features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE,
                .flags = PHY_HAS_INTERRUPT,
-               .probe = marvell_probe,
+               .probe = &m88e1510_probe,
+               .remove = &marvell_remove,
                .config_init = &m88e1510_config_init,
                .config_aneg = &m88e1510_config_aneg,
                .read_status = &marvell_read_status,
@@ -1693,7 +2108,28 @@ static struct phy_driver marvell_drivers[] = {
                .name = "Marvell 88E1540",
                .features = PHY_GBIT_FEATURES,
                .flags = PHY_HAS_INTERRUPT,
-               .probe = marvell_probe,
+               .probe = m88e1510_probe,
+               .remove = &marvell_remove,
+               .config_init = &marvell_config_init,
+               .config_aneg = &m88e1510_config_aneg,
+               .read_status = &marvell_read_status,
+               .ack_interrupt = &marvell_ack_interrupt,
+               .config_intr = &marvell_config_intr,
+               .did_interrupt = &m88e1121_did_interrupt,
+               .resume = &genphy_resume,
+               .suspend = &genphy_suspend,
+               .get_sset_count = marvell_get_sset_count,
+               .get_strings = marvell_get_strings,
+               .get_stats = marvell_get_stats,
+       },
+       {
+               .phy_id = MARVELL_PHY_ID_88E1545,
+               .phy_id_mask = MARVELL_PHY_ID_MASK,
+               .name = "Marvell 88E1545",
+               .probe = m88e1510_probe,
+               .remove = &marvell_remove,
+               .features = PHY_GBIT_FEATURES,
+               .flags = PHY_HAS_INTERRUPT,
                .config_init = &marvell_config_init,
                .config_aneg = &m88e1510_config_aneg,
                .read_status = &marvell_read_status,
@@ -1726,6 +2162,25 @@ static struct phy_driver marvell_drivers[] = {
                .get_strings = marvell_get_strings,
                .get_stats = marvell_get_stats,
        },
+       {
+               .phy_id = MARVELL_PHY_ID_88E6390,
+               .phy_id_mask = MARVELL_PHY_ID_MASK,
+               .name = "Marvell 88E6390",
+               .features = PHY_GBIT_FEATURES,
+               .flags = PHY_HAS_INTERRUPT,
+               .probe = m88e1510_probe,
+               .config_init = &marvell_config_init,
+               .config_aneg = &m88e1510_config_aneg,
+               .read_status = &marvell_read_status,
+               .ack_interrupt = &marvell_ack_interrupt,
+               .config_intr = &marvell_config_intr,
+               .did_interrupt = &m88e1121_did_interrupt,
+               .resume = &genphy_resume,
+               .suspend = &genphy_suspend,
+               .get_sset_count = marvell_get_sset_count,
+               .get_strings = marvell_get_strings,
+               .get_stats = marvell_get_stats,
+       },
 };
 
 module_phy_driver(marvell_drivers);
@@ -1743,7 +2198,9 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
        { MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK },
        { MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK },
        { MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
        { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK },
        { }
 };
 
diff --git a/drivers/net/phy/mdio-boardinfo.c b/drivers/net/phy/mdio-boardinfo.c
new file mode 100644 (file)
index 0000000..6b988f7
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+ * mdio-boardinfo - Collect pre-declarations for MDIO devices
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+
+#include "mdio-boardinfo.h"
+
+static LIST_HEAD(mdio_board_list);
+static DEFINE_MUTEX(mdio_board_lock);
+
+/**
+ * mdiobus_setup_mdiodev_from_board_info - create and setup MDIO devices
+ * from pre-collected board specific MDIO information
+ * @mdiodev: MDIO device pointer
+ * Context: can sleep
+ */
+void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus)
+{
+       struct mdio_board_entry *be;
+       struct mdio_device *mdiodev;
+       struct mdio_board_info *bi;
+       int ret;
+
+       mutex_lock(&mdio_board_lock);
+       list_for_each_entry(be, &mdio_board_list, list) {
+               bi = &be->board_info;
+
+               if (strcmp(bus->id, bi->bus_id))
+                       continue;
+
+               mdiodev = mdio_device_create(bus, bi->mdio_addr);
+               if (IS_ERR(mdiodev))
+                       continue;
+
+               strncpy(mdiodev->modalias, bi->modalias,
+                       sizeof(mdiodev->modalias));
+               mdiodev->bus_match = mdio_device_bus_match;
+               mdiodev->dev.platform_data = (void *)bi->platform_data;
+
+               ret = mdio_device_register(mdiodev);
+               if (ret) {
+                       mdio_device_free(mdiodev);
+                       continue;
+               }
+       }
+       mutex_unlock(&mdio_board_lock);
+}
+
+/**
+ * mdio_register_board_info - register MDIO devices for a given board
+ * @info: array of devices descriptors
+ * @n: number of descriptors provided
+ * Context: can sleep
+ *
+ * The board info passed can be marked with __initdata but be pointers
+ * such as platform_data etc. are copied as-is
+ */
+int mdiobus_register_board_info(const struct mdio_board_info *info,
+                               unsigned int n)
+{
+       struct mdio_board_entry *be;
+       unsigned int i;
+
+       be = kcalloc(n, sizeof(*be), GFP_KERNEL);
+       if (!be)
+               return -ENOMEM;
+
+       for (i = 0; i < n; i++, be++, info++) {
+               memcpy(&be->board_info, info, sizeof(*info));
+               mutex_lock(&mdio_board_lock);
+               list_add_tail(&be->list, &mdio_board_list);
+               mutex_unlock(&mdio_board_lock);
+       }
+
+       return 0;
+}
diff --git a/drivers/net/phy/mdio-boardinfo.h b/drivers/net/phy/mdio-boardinfo.h
new file mode 100644 (file)
index 0000000..00f9816
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * mdio-boardinfo.h - board info interface internal to the mdio_bus
+ * component
+ */
+
+#ifndef __MDIO_BOARD_INFO_H
+#define __MDIO_BOARD_INFO_H
+
+#include <linux/phy.h>
+#include <linux/mutex.h>
+
+struct mdio_board_entry {
+       struct list_head        list;
+       struct mdio_board_info  board_info;
+};
+
+void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus);
+
+#endif /* __MDIO_BOARD_INFO_H */
index 27ab63064f95884d426720a6f529d93e236fd746..7faa79b254ef761c4842910e656993fb808ca68b 100644 (file)
@@ -32,8 +32,7 @@
 
 struct mdio_gpio_info {
        struct mdiobb_ctrl ctrl;
-       int mdc, mdio, mdo;
-       int mdc_active_low, mdio_active_low, mdo_active_low;
+       struct gpio_desc *mdc, *mdio, *mdo;
 };
 
 static void *mdio_gpio_of_get_data(struct platform_device *pdev)
@@ -80,16 +79,14 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
                 * assume the pin serves as pull-up. If direction is
                 * output, the default value is high.
                 */
-               gpio_set_value_cansleep(bitbang->mdo,
-                                       1 ^ bitbang->mdo_active_low);
+               gpiod_set_value(bitbang->mdo, 1);
                return;
        }
 
        if (dir)
-               gpio_direction_output(bitbang->mdio,
-                                     1 ^ bitbang->mdio_active_low);
+               gpiod_direction_output(bitbang->mdio, 1);
        else
-               gpio_direction_input(bitbang->mdio);
+               gpiod_direction_input(bitbang->mdio);
 }
 
 static int mdio_get(struct mdiobb_ctrl *ctrl)
@@ -97,8 +94,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
        struct mdio_gpio_info *bitbang =
                container_of(ctrl, struct mdio_gpio_info, ctrl);
 
-       return gpio_get_value_cansleep(bitbang->mdio) ^
-               bitbang->mdio_active_low;
+       return gpiod_get_value(bitbang->mdio);
 }
 
 static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -107,11 +103,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
                container_of(ctrl, struct mdio_gpio_info, ctrl);
 
        if (bitbang->mdo)
-               gpio_set_value_cansleep(bitbang->mdo,
-                                       what ^ bitbang->mdo_active_low);
+               gpiod_set_value(bitbang->mdo, what);
        else
-               gpio_set_value_cansleep(bitbang->mdio,
-                                       what ^ bitbang->mdio_active_low);
+               gpiod_set_value(bitbang->mdio, what);
 }
 
 static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -119,7 +113,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
        struct mdio_gpio_info *bitbang =
                container_of(ctrl, struct mdio_gpio_info, ctrl);
 
-       gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low);
+       gpiod_set_value(bitbang->mdc, what);
 }
 
 static struct mdiobb_ops mdio_gpio_ops = {
@@ -137,6 +131,10 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
        struct mii_bus *new_bus;
        struct mdio_gpio_info *bitbang;
        int i;
+       int mdc, mdio, mdo;
+       unsigned long mdc_flags = GPIOF_OUT_INIT_LOW;
+       unsigned long mdio_flags = GPIOF_DIR_IN;
+       unsigned long mdo_flags = GPIOF_OUT_INIT_HIGH;
 
        bitbang = devm_kzalloc(dev, sizeof(*bitbang), GFP_KERNEL);
        if (!bitbang)
@@ -144,12 +142,20 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
 
        bitbang->ctrl.ops = &mdio_gpio_ops;
        bitbang->ctrl.reset = pdata->reset;
-       bitbang->mdc = pdata->mdc;
-       bitbang->mdc_active_low = pdata->mdc_active_low;
-       bitbang->mdio = pdata->mdio;
-       bitbang->mdio_active_low = pdata->mdio_active_low;
-       bitbang->mdo = pdata->mdo;
-       bitbang->mdo_active_low = pdata->mdo_active_low;
+       mdc = pdata->mdc;
+       bitbang->mdc = gpio_to_desc(mdc);
+       if (pdata->mdc_active_low)
+               mdc_flags = GPIOF_OUT_INIT_HIGH | GPIOF_ACTIVE_LOW;
+       mdio = pdata->mdio;
+       bitbang->mdio = gpio_to_desc(mdio);
+       if (pdata->mdio_active_low)
+               mdio_flags |= GPIOF_ACTIVE_LOW;
+       mdo = pdata->mdo;
+       if (mdo) {
+               bitbang->mdo = gpio_to_desc(mdo);
+               if (pdata->mdo_active_low)
+                       mdo_flags = GPIOF_OUT_INIT_LOW | GPIOF_ACTIVE_LOW;
+       }
 
        new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
        if (!new_bus)
@@ -174,20 +180,14 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
        else
                strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE);
 
-       if (devm_gpio_request(dev, bitbang->mdc, "mdc"))
+       if (devm_gpio_request_one(dev, mdc, mdc_flags, "mdc"))
                goto out_free_bus;
 
-       if (devm_gpio_request(dev, bitbang->mdio, "mdio"))
+       if (devm_gpio_request_one(dev, mdio, mdio_flags, "mdio"))
                goto out_free_bus;
 
-       if (bitbang->mdo) {
-               if (devm_gpio_request(dev, bitbang->mdo, "mdo"))
-                       goto out_free_bus;
-               gpio_direction_output(bitbang->mdo, 1);
-               gpio_direction_input(bitbang->mdio);
-       }
-
-       gpio_direction_output(bitbang->mdc, 0);
+       if (mdo && devm_gpio_request_one(dev, mdo, mdo_flags, "mdo"))
+               goto out_free_bus;
 
        dev_set_drvdata(dev, new_bus);
 
index 92af182951bec5cc0c6374242ec2a59570c6e40b..f095051beb549133db52cf3acd41ad9e3dbed29c 100644 (file)
@@ -311,6 +311,30 @@ static acpi_status acpi_register_phy(acpi_handle handle, u32 lvl,
 }
 #endif
 
+static const struct of_device_id xgene_mdio_of_match[] = {
+       {
+               .compatible = "apm,xgene-mdio-rgmii",
+               .data = (void *)XGENE_MDIO_RGMII
+       },
+       {
+               .compatible = "apm,xgene-mdio-xfi",
+               .data = (void *)XGENE_MDIO_XFI
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, xgene_mdio_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id xgene_mdio_acpi_match[] = {
+       { "APMC0D65", XGENE_MDIO_RGMII },
+       { "APMC0D66", XGENE_MDIO_XFI },
+       { }
+};
+
+MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match);
+#endif
+
+
 static int xgene_mdio_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -430,32 +454,6 @@ static int xgene_mdio_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_OF
-static const struct of_device_id xgene_mdio_of_match[] = {
-       {
-               .compatible = "apm,xgene-mdio-rgmii",
-               .data = (void *)XGENE_MDIO_RGMII
-       },
-       {
-               .compatible = "apm,xgene-mdio-xfi",
-               .data = (void *)XGENE_MDIO_XFI
-       },
-       {},
-};
-
-MODULE_DEVICE_TABLE(of, xgene_mdio_of_match);
-#endif
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_mdio_acpi_match[] = {
-       { "APMC0D65", XGENE_MDIO_RGMII },
-       { "APMC0D66", XGENE_MDIO_XFI },
-       { }
-};
-
-MODULE_DEVICE_TABLE(acpi, xgene_mdio_acpi_match);
-#endif
-
 static struct platform_driver xgene_mdio_driver = {
        .driver = {
                .name = "xgene-mdio",
index 354241b53c1d00fa6d6b8698744be32770dc210f..594a11d42401d2462a2e0746b96bc88a4503440f 100644 (file)
@@ -132,10 +132,6 @@ static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
 #define GET_BIT(field, src) \
                xgene_enet_get_field_value(field ## _POS, 1, src)
 
-static const struct of_device_id xgene_mdio_of_match[];
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgene_mdio_acpi_match[];
-#endif
 int xgene_mdio_rgmii_read(struct mii_bus *bus, int phy_id, int reg);
 int xgene_mdio_rgmii_write(struct mii_bus *bus, int phy_id, int reg, u16 data);
 struct phy_device *xgene_enet_phy_register(struct mii_bus *bus, int phy_addr);
index 653d076eafe5068672f177d565eec07798a782e3..fa7d51f14869efa8b94ce161e5bd4cb96b4951d3 100644 (file)
@@ -41,6 +41,8 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/mdio.h>
 
+#include "mdio-boardinfo.h"
+
 int mdiobus_register_device(struct mdio_device *mdiodev)
 {
        if (mdiodev->bus->mdio_map[mdiodev->addr])
@@ -343,6 +345,8 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
                }
        }
 
+       mdiobus_setup_mdiodev_from_board_info(bus);
+
        bus->state = MDIOBUS_REGISTERED;
        pr_info("%s: probed\n", bus->name);
        return 0;
index 43c8fd46504bc2e563597ef7ca24fff3d28e89b1..e24f28924af8953d288601763849cd7eb71cd827 100644 (file)
@@ -34,6 +34,17 @@ static void mdio_device_release(struct device *dev)
        kfree(to_mdio_device(dev));
 }
 
+int mdio_device_bus_match(struct device *dev, struct device_driver *drv)
+{
+       struct mdio_device *mdiodev = to_mdio_device(dev);
+       struct mdio_driver *mdiodrv = to_mdio_driver(drv);
+
+       if (mdiodrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY)
+               return 0;
+
+       return strcmp(mdiodev->modalias, drv->name) == 0;
+}
+
 struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr)
 {
        struct mdio_device *mdiodev;
@@ -67,7 +78,7 @@ int mdio_device_register(struct mdio_device *mdiodev)
 {
        int err;
 
-       dev_info(&mdiodev->dev, "mdio_device_register\n");
+       dev_dbg(&mdiodev->dev, "mdio_device_register\n");
 
        err = mdiobus_register_device(mdiodev);
        if (err)
index e03ead81fffb563339d96771e4f14c849b400ed6..650c2667d523d26ee0e9c122b9d8910e5ba1c0d8 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/phy.h>
 #include <linux/of.h>
 #include <linux/netdevice.h>
+#include <dt-bindings/net/mscc-phy-vsc8531.h>
 
 enum rgmii_rx_clock_delay {
        RGMII_RX_CLK_DELAY_0_2_NS = 0,
@@ -52,6 +53,11 @@ enum rgmii_rx_clock_delay {
 #define MSCC_PHY_DEV_AUX_CNTL            28
 #define HP_AUTO_MDIX_X_OVER_IND_MASK     0x2000
 
+#define MSCC_PHY_LED_MODE_SEL            29
+#define LED_1_MODE_SEL_MASK              0x00F0
+#define LED_0_MODE_SEL_MASK              0x000F
+#define LED_1_MODE_SEL_POS               4
+
 #define MSCC_EXT_PAGE_ACCESS             31
 #define MSCC_PHY_PAGE_STANDARD           0x0000 /* Standard registers */
 #define MSCC_PHY_PAGE_EXTENDED           0x0001 /* Extended registers */
@@ -99,6 +105,8 @@ enum rgmii_rx_clock_delay {
 
 struct vsc8531_private {
        int rate_magic;
+       u8 led_0_mode;
+       u8 led_1_mode;
 };
 
 #ifdef CONFIG_OF_MDIO
@@ -123,6 +131,29 @@ static int vsc85xx_phy_page_set(struct phy_device *phydev, u8 page)
        return rc;
 }
 
+static int vsc85xx_led_cntl_set(struct phy_device *phydev,
+                               u8 led_num,
+                               u8 mode)
+{
+       int rc;
+       u16 reg_val;
+
+       mutex_lock(&phydev->lock);
+       reg_val = phy_read(phydev, MSCC_PHY_LED_MODE_SEL);
+       if (led_num) {
+               reg_val &= ~LED_1_MODE_SEL_MASK;
+               reg_val |= (((u16)mode << LED_1_MODE_SEL_POS) &
+                           LED_1_MODE_SEL_MASK);
+       } else {
+               reg_val &= ~LED_0_MODE_SEL_MASK;
+               reg_val |= ((u16)mode & LED_0_MODE_SEL_MASK);
+       }
+       rc = phy_write(phydev, MSCC_PHY_LED_MODE_SEL, reg_val);
+       mutex_unlock(&phydev->lock);
+
+       return rc;
+}
+
 static int vsc85xx_mdix_get(struct phy_device *phydev, u8 *mdix)
 {
        u16 reg_val;
@@ -370,11 +401,41 @@ static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
 
        return -EINVAL;
 }
+
+static int vsc85xx_dt_led_mode_get(struct phy_device *phydev,
+                                  char *led,
+                                  u8 default_mode)
+{
+       struct device *dev = &phydev->mdio.dev;
+       struct device_node *of_node = dev->of_node;
+       u8 led_mode;
+       int err;
+
+       if (!of_node)
+               return -ENODEV;
+
+       led_mode = default_mode;
+       err = of_property_read_u8(of_node, led, &led_mode);
+       if (!err && (led_mode > 15 || led_mode == 7 || led_mode == 11)) {
+               phydev_err(phydev, "DT %s invalid\n", led);
+               return -EINVAL;
+       }
+
+       return led_mode;
+}
+
 #else
 static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev)
 {
        return 0;
 }
+
+static int vsc85xx_dt_led_mode_get(struct phy_device *phydev,
+                                  char *led,
+                                  u8 default_mode)
+{
+       return default_mode;
+}
 #endif /* CONFIG_OF_MDIO */
 
 static int vsc85xx_edge_rate_cntl_set(struct phy_device *phydev, u8 edge_rate)
@@ -499,6 +560,14 @@ static int vsc85xx_config_init(struct phy_device *phydev)
        if (rc)
                return rc;
 
+       rc = vsc85xx_led_cntl_set(phydev, 1, vsc8531->led_1_mode);
+       if (rc)
+               return rc;
+
+       rc = vsc85xx_led_cntl_set(phydev, 0, vsc8531->led_0_mode);
+       if (rc)
+               return rc;
+
        rc = genphy_config_init(phydev);
 
        return rc;
@@ -555,8 +624,9 @@ static int vsc85xx_read_status(struct phy_device *phydev)
 
 static int vsc85xx_probe(struct phy_device *phydev)
 {
-       int rate_magic;
        struct vsc8531_private *vsc8531;
+       int rate_magic;
+       int led_mode;
 
        rate_magic = vsc85xx_edge_rate_magic_get(phydev);
        if (rate_magic < 0)
@@ -570,6 +640,19 @@ static int vsc85xx_probe(struct phy_device *phydev)
 
        vsc8531->rate_magic = rate_magic;
 
+       /* LED[0] and LED[1] mode */
+       led_mode = vsc85xx_dt_led_mode_get(phydev, "vsc8531,led-0-mode",
+                                          VSC8531_LINK_1000_ACTIVITY);
+       if (led_mode < 0)
+               return led_mode;
+       vsc8531->led_0_mode = led_mode;
+
+       led_mode = vsc85xx_dt_led_mode_get(phydev, "vsc8531,led-1-mode",
+                                          VSC8531_LINK_100_ACTIVITY);
+       if (led_mode < 0)
+               return led_mode;
+       vsc8531->led_1_mode = led_mode;
+
        return 0;
 }
 
index 3d3b1f4339eff6be6713f968da0aa55eaa9931fb..a411b43a69eb436126cf9dcc85bb6ebb04c9af2a 100644 (file)
@@ -1297,7 +1297,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        return err;
 }
 
-static struct rtnl_link_stats64*
+static void
 ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
 {
        struct ppp *ppp = netdev_priv(dev);
@@ -1317,8 +1317,6 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
        stats64->rx_dropped       = dev->stats.rx_dropped;
        stats64->tx_dropped       = dev->stats.tx_dropped;
        stats64->rx_length_errors = dev->stats.rx_length_errors;
-
-       return stats64;
 }
 
 static int ppp_dev_init(struct net_device *dev)
index 9841f3dc068227ae93777eaca1ad30869d8670b1..08db4d687533c73e2b45b56cb5c9307e58f9a58e 100644 (file)
@@ -566,7 +566,7 @@ static int sl_change_mtu(struct net_device *dev, int new_mtu)
 
 /* Netdevice get statistics request */
 
-static struct rtnl_link_stats64 *
+static void
 sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct net_device_stats *devstats = &dev->stats;
@@ -597,7 +597,6 @@ sl_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
                stats->collisions     += comp->sls_o_misses;
        }
 #endif
-       return stats;
 }
 
 /* Netdevice register callback */
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
new file mode 100644 (file)
index 0000000..35b55a2
--- /dev/null
@@ -0,0 +1,1285 @@
+#include <linux/etherdevice.h>
+#include <linux/if_tap.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/nsproxy.h>
+#include <linux/compat.h>
+#include <linux/if_tun.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include <linux/idr.h>
+#include <linux/fs.h>
+#include <linux/uio.h>
+
+#include <net/net_namespace.h>
+#include <net/rtnetlink.h>
+#include <net/sock.h>
+#include <linux/virtio_net.h>
+#include <linux/skb_array.h>
+
+#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
+
+#define TAP_VNET_LE 0x80000000
+#define TAP_VNET_BE 0x40000000
+
+#ifdef CONFIG_TUN_VNET_CROSS_LE
+static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
+{
+       return q->flags & TAP_VNET_BE ? false :
+               virtio_legacy_is_little_endian();
+}
+
+static long tap_get_vnet_be(struct tap_queue *q, int __user *sp)
+{
+       int s = !!(q->flags & TAP_VNET_BE);
+
+       if (put_user(s, sp))
+               return -EFAULT;
+
+       return 0;
+}
+
+static long tap_set_vnet_be(struct tap_queue *q, int __user *sp)
+{
+       int s;
+
+       if (get_user(s, sp))
+               return -EFAULT;
+
+       if (s)
+               q->flags |= TAP_VNET_BE;
+       else
+               q->flags &= ~TAP_VNET_BE;
+
+       return 0;
+}
+#else
+static inline bool tap_legacy_is_little_endian(struct tap_queue *q)
+{
+       return virtio_legacy_is_little_endian();
+}
+
+static long tap_get_vnet_be(struct tap_queue *q, int __user *argp)
+{
+       return -EINVAL;
+}
+
+static long tap_set_vnet_be(struct tap_queue *q, int __user *argp)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_TUN_VNET_CROSS_LE */
+
+static inline bool tap_is_little_endian(struct tap_queue *q)
+{
+       return q->flags & TAP_VNET_LE ||
+               tap_legacy_is_little_endian(q);
+}
+
+static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val)
+{
+       return __virtio16_to_cpu(tap_is_little_endian(q), val);
+}
+
+static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val)
+{
+       return __cpu_to_virtio16(tap_is_little_endian(q), val);
+}
+
+static struct proto tap_proto = {
+       .name = "tap",
+       .owner = THIS_MODULE,
+       .obj_size = sizeof(struct tap_queue),
+};
+
+#define TAP_NUM_DEVS (1U << MINORBITS)
+
+static LIST_HEAD(major_list);
+
+struct major_info {
+       struct rcu_head rcu;
+       dev_t major;
+       struct idr minor_idr;
+       struct mutex minor_lock;
+       const char *device_name;
+       struct list_head next;
+};
+
+#define GOODCOPY_LEN 128
+
+static const struct proto_ops tap_socket_ops;
+
+#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
+#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
+
+static struct tap_dev *tap_dev_get_rcu(const struct net_device *dev)
+{
+       return rcu_dereference(dev->rx_handler_data);
+}
+
+/*
+ * RCU usage:
+ * The tap_queue and the macvlan_dev are loosely coupled, the
+ * pointers from one to the other can only be read while rcu_read_lock
+ * or rtnl is held.
+ *
+ * Both the file and the macvlan_dev hold a reference on the tap_queue
+ * through sock_hold(&q->sk). When the macvlan_dev goes away first,
+ * q->vlan becomes inaccessible. When the files gets closed,
+ * tap_get_queue() fails.
+ *
+ * There may still be references to the struct sock inside of the
+ * queue from outbound SKBs, but these never reference back to the
+ * file or the dev. The data structure is freed through __sk_free
+ * when both our references and any pending SKBs are gone.
+ */
+
+static int tap_enable_queue(struct tap_dev *tap, struct file *file,
+                           struct tap_queue *q)
+{
+       int err = -EINVAL;
+
+       ASSERT_RTNL();
+
+       if (q->enabled)
+               goto out;
+
+       err = 0;
+       rcu_assign_pointer(tap->taps[tap->numvtaps], q);
+       q->queue_index = tap->numvtaps;
+       q->enabled = true;
+
+       tap->numvtaps++;
+out:
+       return err;
+}
+
+/* Requires RTNL */
+static int tap_set_queue(struct tap_dev *tap, struct file *file,
+                        struct tap_queue *q)
+{
+       if (tap->numqueues == MAX_TAP_QUEUES)
+               return -EBUSY;
+
+       rcu_assign_pointer(q->tap, tap);
+       rcu_assign_pointer(tap->taps[tap->numvtaps], q);
+       sock_hold(&q->sk);
+
+       q->file = file;
+       q->queue_index = tap->numvtaps;
+       q->enabled = true;
+       file->private_data = q;
+       list_add_tail(&q->next, &tap->queue_list);
+
+       tap->numvtaps++;
+       tap->numqueues++;
+
+       return 0;
+}
+
+static int tap_disable_queue(struct tap_queue *q)
+{
+       struct tap_dev *tap;
+       struct tap_queue *nq;
+
+       ASSERT_RTNL();
+       if (!q->enabled)
+               return -EINVAL;
+
+       tap = rtnl_dereference(q->tap);
+
+       if (tap) {
+               int index = q->queue_index;
+               BUG_ON(index >= tap->numvtaps);
+               nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]);
+               nq->queue_index = index;
+
+               rcu_assign_pointer(tap->taps[index], nq);
+               RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL);
+               q->enabled = false;
+
+               tap->numvtaps--;
+       }
+
+       return 0;
+}
+
+/*
+ * The file owning the queue got closed, give up both
+ * the reference that the files holds as well as the
+ * one from the macvlan_dev if that still exists.
+ *
+ * Using the spinlock makes sure that we don't get
+ * to the queue again after destroying it.
+ */
+static void tap_put_queue(struct tap_queue *q)
+{
+       struct tap_dev *tap;
+
+       rtnl_lock();
+       tap = rtnl_dereference(q->tap);
+
+       if (tap) {
+               if (q->enabled)
+                       BUG_ON(tap_disable_queue(q));
+
+               tap->numqueues--;
+               RCU_INIT_POINTER(q->tap, NULL);
+               sock_put(&q->sk);
+               list_del_init(&q->next);
+       }
+
+       rtnl_unlock();
+
+       synchronize_rcu();
+       sock_put(&q->sk);
+}
+
+/*
+ * Select a queue based on the rxq of the device on which this packet
+ * arrived. If the incoming device is not mq, calculate a flow hash
+ * to select a queue. If all fails, find the first available queue.
+ * Cache vlan->numvtaps since it can become zero during the execution
+ * of this function.
+ */
+static struct tap_queue *tap_get_queue(struct tap_dev *tap,
+                                      struct sk_buff *skb)
+{
+       struct tap_queue *queue = NULL;
+       /* Access to taps array is protected by rcu, but access to numvtaps
+        * isn't. Below we use it to lookup a queue, but treat it as a hint
+        * and validate that the result isn't NULL - in case we are
+        * racing against queue removal.
+        */
+       int numvtaps = ACCESS_ONCE(tap->numvtaps);
+       __u32 rxq;
+
+       if (!numvtaps)
+               goto out;
+
+       if (numvtaps == 1)
+               goto single;
+
+       /* Check if we can use flow to select a queue */
+       rxq = skb_get_hash(skb);
+       if (rxq) {
+               queue = rcu_dereference(tap->taps[rxq % numvtaps]);
+               goto out;
+       }
+
+       if (likely(skb_rx_queue_recorded(skb))) {
+               rxq = skb_get_rx_queue(skb);
+
+               while (unlikely(rxq >= numvtaps))
+                       rxq -= numvtaps;
+
+               queue = rcu_dereference(tap->taps[rxq]);
+               goto out;
+       }
+
+single:
+       queue = rcu_dereference(tap->taps[0]);
+out:
+       return queue;
+}
+
+/*
+ * The net_device is going away, give up the reference
+ * that it holds on all queues and safely set the pointer
+ * from the queues to NULL.
+ */
+void tap_del_queues(struct tap_dev *tap)
+{
+       struct tap_queue *q, *tmp;
+
+       ASSERT_RTNL();
+       list_for_each_entry_safe(q, tmp, &tap->queue_list, next) {
+               list_del_init(&q->next);
+               RCU_INIT_POINTER(q->tap, NULL);
+               if (q->enabled)
+                       tap->numvtaps--;
+               tap->numqueues--;
+               sock_put(&q->sk);
+       }
+       BUG_ON(tap->numvtaps);
+       BUG_ON(tap->numqueues);
+       /* guarantee that any future tap_set_queue will fail */
+       tap->numvtaps = MAX_TAP_QUEUES;
+}
+EXPORT_SYMBOL_GPL(tap_del_queues);
+
+rx_handler_result_t tap_handle_frame(struct sk_buff **pskb)
+{
+       struct sk_buff *skb = *pskb;
+       struct net_device *dev = skb->dev;
+       struct tap_dev *tap;
+       struct tap_queue *q;
+       netdev_features_t features = TAP_FEATURES;
+
+       tap = tap_dev_get_rcu(dev);
+       if (!tap)
+               return RX_HANDLER_PASS;
+
+       q = tap_get_queue(tap, skb);
+       if (!q)
+               return RX_HANDLER_PASS;
+
+       if (__skb_array_full(&q->skb_array))
+               goto drop;
+
+       skb_push(skb, ETH_HLEN);
+
+       /* Apply the forward feature mask so that we perform segmentation
+        * according to users wishes.  This only works if VNET_HDR is
+        * enabled.
+        */
+       if (q->flags & IFF_VNET_HDR)
+               features |= tap->tap_features;
+       if (netif_needs_gso(skb, features)) {
+               struct sk_buff *segs = __skb_gso_segment(skb, features, false);
+
+               if (IS_ERR(segs))
+                       goto drop;
+
+               if (!segs) {
+                       if (skb_array_produce(&q->skb_array, skb))
+                               goto drop;
+                       goto wake_up;
+               }
+
+               consume_skb(skb);
+               while (segs) {
+                       struct sk_buff *nskb = segs->next;
+
+                       segs->next = NULL;
+                       if (skb_array_produce(&q->skb_array, segs)) {
+                               kfree_skb(segs);
+                               kfree_skb_list(nskb);
+                               break;
+                       }
+                       segs = nskb;
+               }
+       } else {
+               /* If we receive a partial checksum and the tap side
+                * doesn't support checksum offload, compute the checksum.
+                * Note: it doesn't matter which checksum feature to
+                *        check, we either support them all or none.
+                */
+               if (skb->ip_summed == CHECKSUM_PARTIAL &&
+                   !(features & NETIF_F_CSUM_MASK) &&
+                   skb_checksum_help(skb))
+                       goto drop;
+               if (skb_array_produce(&q->skb_array, skb))
+                       goto drop;
+       }
+
+wake_up:
+       wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
+       return RX_HANDLER_CONSUMED;
+
+drop:
+       /* Count errors/drops only here, thus don't care about args. */
+       if (tap->count_rx_dropped)
+               tap->count_rx_dropped(tap);
+       kfree_skb(skb);
+       return RX_HANDLER_CONSUMED;
+}
+EXPORT_SYMBOL_GPL(tap_handle_frame);
+
+static struct major_info *tap_get_major(int major)
+{
+       struct major_info *tap_major;
+
+       list_for_each_entry_rcu(tap_major, &major_list, next) {
+               if (tap_major->major == major)
+                       return tap_major;
+       }
+
+       return NULL;
+}
+
+int tap_get_minor(dev_t major, struct tap_dev *tap)
+{
+       int retval = -ENOMEM;
+       struct major_info *tap_major;
+
+       rcu_read_lock();
+       tap_major = tap_get_major(MAJOR(major));
+       if (!tap_major) {
+               retval = -EINVAL;
+               goto unlock;
+       }
+
+       mutex_lock(&tap_major->minor_lock);
+       retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_KERNEL);
+       if (retval >= 0) {
+               tap->minor = retval;
+       } else if (retval == -ENOSPC) {
+               netdev_err(tap->dev, "Too many tap devices\n");
+               retval = -EINVAL;
+       }
+       mutex_unlock(&tap_major->minor_lock);
+
+unlock:
+       rcu_read_unlock();
+       return retval < 0 ? retval : 0;
+}
+EXPORT_SYMBOL_GPL(tap_get_minor);
+
+void tap_free_minor(dev_t major, struct tap_dev *tap)
+{
+       struct major_info *tap_major;
+
+       rcu_read_lock();
+       tap_major = tap_get_major(MAJOR(major));
+       if (!tap_major) {
+               goto unlock;
+       }
+
+       mutex_lock(&tap_major->minor_lock);
+       if (tap->minor) {
+               idr_remove(&tap_major->minor_idr, tap->minor);
+               tap->minor = 0;
+       }
+       mutex_unlock(&tap_major->minor_lock);
+
+unlock:
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(tap_free_minor);
+
+static struct tap_dev *dev_get_by_tap_file(int major, int minor)
+{
+       struct net_device *dev = NULL;
+       struct tap_dev *tap;
+       struct major_info *tap_major;
+
+       rcu_read_lock();
+       tap_major = tap_get_major(major);
+       if (!tap_major) {
+               tap = NULL;
+               goto unlock;
+       }
+
+       mutex_lock(&tap_major->minor_lock);
+       tap = idr_find(&tap_major->minor_idr, minor);
+       if (tap) {
+               dev = tap->dev;
+               dev_hold(dev);
+       }
+       mutex_unlock(&tap_major->minor_lock);
+
+unlock:
+       rcu_read_unlock();
+       return tap;
+}
+
+static void tap_sock_write_space(struct sock *sk)
+{
+       wait_queue_head_t *wqueue;
+
+       if (!sock_writeable(sk) ||
+           !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
+               return;
+
+       wqueue = sk_sleep(sk);
+       if (wqueue && waitqueue_active(wqueue))
+               wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
+}
+
+static void tap_sock_destruct(struct sock *sk)
+{
+       struct tap_queue *q = container_of(sk, struct tap_queue, sk);
+
+       skb_array_cleanup(&q->skb_array);
+}
+
+static int tap_open(struct inode *inode, struct file *file)
+{
+       struct net *net = current->nsproxy->net_ns;
+       struct tap_dev *tap;
+       struct tap_queue *q;
+       int err = -ENODEV;
+
+       rtnl_lock();
+       tap = dev_get_by_tap_file(imajor(inode), iminor(inode));
+       if (!tap)
+               goto err;
+
+       err = -ENOMEM;
+       q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
+                                            &tap_proto, 0);
+       if (!q)
+               goto err;
+
+       RCU_INIT_POINTER(q->sock.wq, &q->wq);
+       init_waitqueue_head(&q->wq.wait);
+       q->sock.type = SOCK_RAW;
+       q->sock.state = SS_CONNECTED;
+       q->sock.file = file;
+       q->sock.ops = &tap_socket_ops;
+       sock_init_data(&q->sock, &q->sk);
+       q->sk.sk_write_space = tap_sock_write_space;
+       q->sk.sk_destruct = tap_sock_destruct;
+       q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
+       q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
+
+       /*
+        * so far only KVM virtio_net uses tap, enable zero copy between
+        * guest kernel and host kernel when lower device supports zerocopy
+        *
+        * The macvlan supports zerocopy iff the lower device supports zero
+        * copy so we don't have to look at the lower device directly.
+        */
+       if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
+               sock_set_flag(&q->sk, SOCK_ZEROCOPY);
+
+       err = -ENOMEM;
+       if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL))
+               goto err_array;
+
+       err = tap_set_queue(tap, file, q);
+       if (err)
+               goto err_queue;
+
+       dev_put(tap->dev);
+
+       rtnl_unlock();
+       return err;
+
+err_queue:
+       skb_array_cleanup(&q->skb_array);
+err_array:
+       sock_put(&q->sk);
+err:
+       if (tap)
+               dev_put(tap->dev);
+
+       rtnl_unlock();
+       return err;
+}
+
+static int tap_release(struct inode *inode, struct file *file)
+{
+       struct tap_queue *q = file->private_data;
+       tap_put_queue(q);
+       return 0;
+}
+
+static unsigned int tap_poll(struct file *file, poll_table *wait)
+{
+       struct tap_queue *q = file->private_data;
+       unsigned int mask = POLLERR;
+
+       if (!q)
+               goto out;
+
+       mask = 0;
+       poll_wait(file, &q->wq.wait, wait);
+
+       if (!skb_array_empty(&q->skb_array))
+               mask |= POLLIN | POLLRDNORM;
+
+       if (sock_writeable(&q->sk) ||
+           (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
+            sock_writeable(&q->sk)))
+               mask |= POLLOUT | POLLWRNORM;
+
+out:
+       return mask;
+}
+
+static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad,
+                                           size_t len, size_t linear,
+                                               int noblock, int *err)
+{
+       struct sk_buff *skb;
+
+       /* Under a page?  Don't bother with paged skb. */
+       if (prepad + len < PAGE_SIZE || !linear)
+               linear = len;
+
+       skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
+                                  err, 0);
+       if (!skb)
+               return NULL;
+
+       skb_reserve(skb, prepad);
+       skb_put(skb, linear);
+       skb->data_len = len - linear;
+       skb->len += len - linear;
+
+       return skb;
+}
+
+/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
+#define TAP_RESERVE HH_DATA_OFF(ETH_HLEN)
+
+/* Get packet from user space buffer */
+static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m,
+                           struct iov_iter *from, int noblock)
+{
+       int good_linear = SKB_MAX_HEAD(TAP_RESERVE);
+       struct sk_buff *skb;
+       struct tap_dev *tap;
+       unsigned long total_len = iov_iter_count(from);
+       unsigned long len = total_len;
+       int err;
+       struct virtio_net_hdr vnet_hdr = { 0 };
+       int vnet_hdr_len = 0;
+       int copylen = 0;
+       int depth;
+       bool zerocopy = false;
+       size_t linear;
+
+       if (q->flags & IFF_VNET_HDR) {
+               vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+
+               err = -EINVAL;
+               if (len < vnet_hdr_len)
+                       goto err;
+               len -= vnet_hdr_len;
+
+               err = -EFAULT;
+               if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from))
+                       goto err;
+               iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
+               if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
+                    tap16_to_cpu(q, vnet_hdr.csum_start) +
+                    tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
+                            tap16_to_cpu(q, vnet_hdr.hdr_len))
+                       vnet_hdr.hdr_len = cpu_to_tap16(q,
+                                tap16_to_cpu(q, vnet_hdr.csum_start) +
+                                tap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
+               err = -EINVAL;
+               if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len)
+                       goto err;
+       }
+
+       err = -EINVAL;
+       if (unlikely(len < ETH_HLEN))
+               goto err;
+
+       if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
+               struct iov_iter i;
+
+               copylen = vnet_hdr.hdr_len ?
+                       tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
+               if (copylen > good_linear)
+                       copylen = good_linear;
+               else if (copylen < ETH_HLEN)
+                       copylen = ETH_HLEN;
+               linear = copylen;
+               i = *from;
+               iov_iter_advance(&i, copylen);
+               if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
+                       zerocopy = true;
+       }
+
+       if (!zerocopy) {
+               copylen = len;
+               linear = tap16_to_cpu(q, vnet_hdr.hdr_len);
+               if (linear > good_linear)
+                       linear = good_linear;
+               else if (linear < ETH_HLEN)
+                       linear = ETH_HLEN;
+       }
+
+       skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen,
+                           linear, noblock, &err);
+       if (!skb)
+               goto err;
+
+       if (zerocopy)
+               err = zerocopy_sg_from_iter(skb, from);
+       else
+               err = skb_copy_datagram_from_iter(skb, 0, from, len);
+
+       if (err)
+               goto err_kfree;
+
+       skb_set_network_header(skb, ETH_HLEN);
+       skb_reset_mac_header(skb);
+       skb->protocol = eth_hdr(skb)->h_proto;
+
+       if (vnet_hdr_len) {
+               err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
+                                           tap_is_little_endian(q));
+               if (err)
+                       goto err_kfree;
+       }
+
+       skb_probe_transport_header(skb, ETH_HLEN);
+
+       /* Move network header to the right position for VLAN tagged packets */
+       if ((skb->protocol == htons(ETH_P_8021Q) ||
+            skb->protocol == htons(ETH_P_8021AD)) &&
+           __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
+               skb_set_network_header(skb, depth);
+
+       rcu_read_lock();
+       tap = rcu_dereference(q->tap);
+       /* copy skb_ubuf_info for callback when skb has no error */
+       if (zerocopy) {
+               skb_shinfo(skb)->destructor_arg = m->msg_control;
+               skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+               skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
+       } else if (m && m->msg_control) {
+               struct ubuf_info *uarg = m->msg_control;
+               uarg->callback(uarg, false);
+       }
+
+       if (tap) {
+               skb->dev = tap->dev;
+               dev_queue_xmit(skb);
+       } else {
+               kfree_skb(skb);
+       }
+       rcu_read_unlock();
+
+       return total_len;
+
+err_kfree:
+       kfree_skb(skb);
+
+err:
+       rcu_read_lock();
+       tap = rcu_dereference(q->tap);
+       if (tap && tap->count_tx_dropped)
+               tap->count_tx_dropped(tap);
+       rcu_read_unlock();
+
+       return err;
+}
+
+static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+       struct file *file = iocb->ki_filp;
+       struct tap_queue *q = file->private_data;
+
+       return tap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
+}
+
+/* Put packet to the user space buffer */
+static ssize_t tap_put_user(struct tap_queue *q,
+                           const struct sk_buff *skb,
+                           struct iov_iter *iter)
+{
+       int ret;
+       int vnet_hdr_len = 0;
+       int vlan_offset = 0;
+       int total;
+
+       if (q->flags & IFF_VNET_HDR) {
+               struct virtio_net_hdr vnet_hdr;
+               vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
+               if (iov_iter_count(iter) < vnet_hdr_len)
+                       return -EINVAL;
+
+               if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
+                                           tap_is_little_endian(q), true))
+                       BUG();
+
+               if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
+                   sizeof(vnet_hdr))
+                       return -EFAULT;
+
+               iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
+       }
+       total = vnet_hdr_len;
+       total += skb->len;
+
+       if (skb_vlan_tag_present(skb)) {
+               struct {
+                       __be16 h_vlan_proto;
+                       __be16 h_vlan_TCI;
+               } veth;
+               veth.h_vlan_proto = skb->vlan_proto;
+               veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
+
+               vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
+               total += VLAN_HLEN;
+
+               ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
+               if (ret || !iov_iter_count(iter))
+                       goto done;
+
+               ret = copy_to_iter(&veth, sizeof(veth), iter);
+               if (ret != sizeof(veth) || !iov_iter_count(iter))
+                       goto done;
+       }
+
+       ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
+                                    skb->len - vlan_offset);
+
+done:
+       return ret ? ret : total;
+}
+
+static ssize_t tap_do_read(struct tap_queue *q,
+                          struct iov_iter *to,
+                          int noblock)
+{
+       DEFINE_WAIT(wait);
+       struct sk_buff *skb;
+       ssize_t ret = 0;
+
+       if (!iov_iter_count(to))
+               return 0;
+
+       while (1) {
+               if (!noblock)
+                       prepare_to_wait(sk_sleep(&q->sk), &wait,
+                                       TASK_INTERRUPTIBLE);
+
+               /* Read frames from the queue */
+               skb = skb_array_consume(&q->skb_array);
+               if (skb)
+                       break;
+               if (noblock) {
+                       ret = -EAGAIN;
+                       break;
+               }
+               if (signal_pending(current)) {
+                       ret = -ERESTARTSYS;
+                       break;
+               }
+               /* Nothing to read, let's sleep */
+               schedule();
+       }
+       if (!noblock)
+               finish_wait(sk_sleep(&q->sk), &wait);
+
+       if (skb) {
+               ret = tap_put_user(q, skb, to);
+               if (unlikely(ret < 0))
+                       kfree_skb(skb);
+               else
+                       consume_skb(skb);
+       }
+       return ret;
+}
+
+static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+       struct file *file = iocb->ki_filp;
+       struct tap_queue *q = file->private_data;
+       ssize_t len = iov_iter_count(to), ret;
+
+       ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK);
+       ret = min_t(ssize_t, ret, len);
+       if (ret > 0)
+               iocb->ki_pos = ret;
+       return ret;
+}
+
+static struct tap_dev *tap_get_tap_dev(struct tap_queue *q)
+{
+       struct tap_dev *tap;
+
+       ASSERT_RTNL();
+       tap = rtnl_dereference(q->tap);
+       if (tap)
+               dev_hold(tap->dev);
+
+       return tap;
+}
+
+static void tap_put_tap_dev(struct tap_dev *tap)
+{
+       dev_put(tap->dev);
+}
+
+static int tap_ioctl_set_queue(struct file *file, unsigned int flags)
+{
+       struct tap_queue *q = file->private_data;
+       struct tap_dev *tap;
+       int ret;
+
+       tap = tap_get_tap_dev(q);
+       if (!tap)
+               return -EINVAL;
+
+       if (flags & IFF_ATTACH_QUEUE)
+               ret = tap_enable_queue(tap, file, q);
+       else if (flags & IFF_DETACH_QUEUE)
+               ret = tap_disable_queue(q);
+       else
+               ret = -EINVAL;
+
+       tap_put_tap_dev(tap);
+       return ret;
+}
+
+static int set_offload(struct tap_queue *q, unsigned long arg)
+{
+       struct tap_dev *tap;
+       netdev_features_t features;
+       netdev_features_t feature_mask = 0;
+
+       tap = rtnl_dereference(q->tap);
+       if (!tap)
+               return -ENOLINK;
+
+       features = tap->dev->features;
+
+       if (arg & TUN_F_CSUM) {
+               feature_mask = NETIF_F_HW_CSUM;
+
+               if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
+                       if (arg & TUN_F_TSO_ECN)
+                               feature_mask |= NETIF_F_TSO_ECN;
+                       if (arg & TUN_F_TSO4)
+                               feature_mask |= NETIF_F_TSO;
+                       if (arg & TUN_F_TSO6)
+                               feature_mask |= NETIF_F_TSO6;
+               }
+
+               if (arg & TUN_F_UFO)
+                       feature_mask |= NETIF_F_UFO;
+       }
+
+       /* tun/tap driver inverts the usage for TSO offloads, where
+        * setting the TSO bit means that the userspace wants to
+        * accept TSO frames and turning it off means that user space
+        * does not support TSO.
+        * For tap, we have to invert it to mean the same thing.
+        * When user space turns off TSO, we turn off GSO/LRO so that
+        * user-space will not receive TSO frames.
+        */
+       if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
+               features |= RX_OFFLOADS;
+       else
+               features &= ~RX_OFFLOADS;
+
+       /* tap_features are the same as features on tun/tap and
+        * reflect user expectations.
+        */
+       tap->tap_features = feature_mask;
+       if (tap->update_features)
+               tap->update_features(tap, features);
+
+       return 0;
+}
+
+/*
+ * provide compatibility with generic tun/tap interface
+ */
+static long tap_ioctl(struct file *file, unsigned int cmd,
+                     unsigned long arg)
+{
+       struct tap_queue *q = file->private_data;
+       struct tap_dev *tap;
+       void __user *argp = (void __user *)arg;
+       struct ifreq __user *ifr = argp;
+       unsigned int __user *up = argp;
+       unsigned short u;
+       int __user *sp = argp;
+       struct sockaddr sa;
+       int s;
+       int ret;
+
+       switch (cmd) {
+       case TUNSETIFF:
+               /* ignore the name, just look at flags */
+               if (get_user(u, &ifr->ifr_flags))
+                       return -EFAULT;
+
+               ret = 0;
+               if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP))
+                       ret = -EINVAL;
+               else
+                       q->flags = (q->flags & ~TAP_IFFEATURES) | u;
+
+               return ret;
+
+       case TUNGETIFF:
+               rtnl_lock();
+               tap = tap_get_tap_dev(q);
+               if (!tap) {
+                       rtnl_unlock();
+                       return -ENOLINK;
+               }
+
+               ret = 0;
+               u = q->flags;
+               if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
+                   put_user(u, &ifr->ifr_flags))
+                       ret = -EFAULT;
+               tap_put_tap_dev(tap);
+               rtnl_unlock();
+               return ret;
+
+       case TUNSETQUEUE:
+               if (get_user(u, &ifr->ifr_flags))
+                       return -EFAULT;
+               rtnl_lock();
+               ret = tap_ioctl_set_queue(file, u);
+               rtnl_unlock();
+               return ret;
+
+       case TUNGETFEATURES:
+               if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up))
+                       return -EFAULT;
+               return 0;
+
+       case TUNSETSNDBUF:
+               if (get_user(s, sp))
+                       return -EFAULT;
+
+               q->sk.sk_sndbuf = s;
+               return 0;
+
+       case TUNGETVNETHDRSZ:
+               s = q->vnet_hdr_sz;
+               if (put_user(s, sp))
+                       return -EFAULT;
+               return 0;
+
+       case TUNSETVNETHDRSZ:
+               if (get_user(s, sp))
+                       return -EFAULT;
+               if (s < (int)sizeof(struct virtio_net_hdr))
+                       return -EINVAL;
+
+               q->vnet_hdr_sz = s;
+               return 0;
+
+       case TUNGETVNETLE:
+               s = !!(q->flags & TAP_VNET_LE);
+               if (put_user(s, sp))
+                       return -EFAULT;
+               return 0;
+
+       case TUNSETVNETLE:
+               if (get_user(s, sp))
+                       return -EFAULT;
+               if (s)
+                       q->flags |= TAP_VNET_LE;
+               else
+                       q->flags &= ~TAP_VNET_LE;
+               return 0;
+
+       case TUNGETVNETBE:
+               return tap_get_vnet_be(q, sp);
+
+       case TUNSETVNETBE:
+               return tap_set_vnet_be(q, sp);
+
+       case TUNSETOFFLOAD:
+               /* let the user check for future flags */
+               if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
+                           TUN_F_TSO_ECN | TUN_F_UFO))
+                       return -EINVAL;
+
+               rtnl_lock();
+               ret = set_offload(q, arg);
+               rtnl_unlock();
+               return ret;
+
+       case SIOCGIFHWADDR:
+               rtnl_lock();
+               tap = tap_get_tap_dev(q);
+               if (!tap) {
+                       rtnl_unlock();
+                       return -ENOLINK;
+               }
+               ret = 0;
+               u = tap->dev->type;
+               if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) ||
+                   copy_to_user(&ifr->ifr_hwaddr.sa_data, tap->dev->dev_addr, ETH_ALEN) ||
+                   put_user(u, &ifr->ifr_hwaddr.sa_family))
+                       ret = -EFAULT;
+               tap_put_tap_dev(tap);
+               rtnl_unlock();
+               return ret;
+
+       case SIOCSIFHWADDR:
+               if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
+                       return -EFAULT;
+               rtnl_lock();
+               tap = tap_get_tap_dev(q);
+               if (!tap) {
+                       rtnl_unlock();
+                       return -ENOLINK;
+               }
+               ret = dev_set_mac_address(tap->dev, &sa);
+               tap_put_tap_dev(tap);
+               rtnl_unlock();
+               return ret;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+#ifdef CONFIG_COMPAT
+static long tap_compat_ioctl(struct file *file, unsigned int cmd,
+                            unsigned long arg)
+{
+       return tap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+const struct file_operations tap_fops = {
+       .owner          = THIS_MODULE,
+       .open           = tap_open,
+       .release        = tap_release,
+       .read_iter      = tap_read_iter,
+       .write_iter     = tap_write_iter,
+       .poll           = tap_poll,
+       .llseek         = no_llseek,
+       .unlocked_ioctl = tap_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = tap_compat_ioctl,
+#endif
+};
+
+static int tap_sendmsg(struct socket *sock, struct msghdr *m,
+                      size_t total_len)
+{
+       struct tap_queue *q = container_of(sock, struct tap_queue, sock);
+       return tap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
+}
+
+static int tap_recvmsg(struct socket *sock, struct msghdr *m,
+                      size_t total_len, int flags)
+{
+       struct tap_queue *q = container_of(sock, struct tap_queue, sock);
+       int ret;
+       if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
+               return -EINVAL;
+       ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT);
+       if (ret > total_len) {
+               m->msg_flags |= MSG_TRUNC;
+               ret = flags & MSG_TRUNC ? ret : total_len;
+       }
+       return ret;
+}
+
+static int tap_peek_len(struct socket *sock)
+{
+       struct tap_queue *q = container_of(sock, struct tap_queue,
+                                              sock);
+       return skb_array_peek_len(&q->skb_array);
+}
+
+/* Ops structure to mimic raw sockets with tun */
+static const struct proto_ops tap_socket_ops = {
+       .sendmsg = tap_sendmsg,
+       .recvmsg = tap_recvmsg,
+       .peek_len = tap_peek_len,
+};
+
+/* Get an underlying socket object from tun file.  Returns error unless file is
+ * attached to a device.  The returned object works like a packet socket, it
+ * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
+ * holding a reference to the file for as long as the socket is in use. */
+struct socket *tap_get_socket(struct file *file)
+{
+       struct tap_queue *q;
+       if (file->f_op != &tap_fops)
+               return ERR_PTR(-EINVAL);
+       q = file->private_data;
+       if (!q)
+               return ERR_PTR(-EBADFD);
+       return &q->sock;
+}
+EXPORT_SYMBOL_GPL(tap_get_socket);
+
+int tap_queue_resize(struct tap_dev *tap)
+{
+       struct net_device *dev = tap->dev;
+       struct tap_queue *q;
+       struct skb_array **arrays;
+       int n = tap->numqueues;
+       int ret, i = 0;
+
+       arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL);
+       if (!arrays)
+               return -ENOMEM;
+
+       list_for_each_entry(q, &tap->queue_list, next)
+               arrays[i++] = &q->skb_array;
+
+       ret = skb_array_resize_multiple(arrays, n,
+                                       dev->tx_queue_len, GFP_KERNEL);
+
+       kfree(arrays);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(tap_queue_resize);
+
+static int tap_list_add(dev_t major, const char *device_name)
+{
+       struct major_info *tap_major;
+
+       tap_major = kzalloc(sizeof(*tap_major), GFP_ATOMIC);
+       if (!tap_major)
+               return -ENOMEM;
+
+       tap_major->major = MAJOR(major);
+
+       idr_init(&tap_major->minor_idr);
+       mutex_init(&tap_major->minor_lock);
+
+       tap_major->device_name = device_name;
+
+       list_add_tail_rcu(&tap_major->next, &major_list);
+       return 0;
+}
+
+int tap_create_cdev(struct cdev *tap_cdev,
+                   dev_t *tap_major, const char *device_name)
+{
+       int err;
+
+       err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name);
+       if (err)
+               goto out1;
+
+       cdev_init(tap_cdev, &tap_fops);
+       err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
+       if (err)
+               goto out2;
+
+       err =  tap_list_add(*tap_major, device_name);
+       if (err)
+               goto out3;
+
+       return 0;
+
+out3:
+       cdev_del(tap_cdev);
+out2:
+       unregister_chrdev_region(*tap_major, TAP_NUM_DEVS);
+out1:
+       return err;
+}
+EXPORT_SYMBOL_GPL(tap_create_cdev);
+
+void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev)
+{
+       struct major_info *tap_major, *tmp;
+
+       cdev_del(tap_cdev);
+       unregister_chrdev_region(major, TAP_NUM_DEVS);
+       list_for_each_entry_safe(tap_major, tmp, &major_list, next) {
+               if (tap_major->major == MAJOR(major)) {
+                       idr_destroy(&tap_major->minor_idr);
+                       list_del_rcu(&tap_major->next);
+                       kfree_rcu(tap_major, rcu);
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(tap_destroy_cdev);
+
+MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
+MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>");
+MODULE_LICENSE("GPL");
index bdc58567d10e7b370b6966c35251b81619f314ce..4a24b5d15f5a5dfe770d184533f70f7140d9e145 100644 (file)
@@ -1798,7 +1798,7 @@ unwind:
        return err;
 }
 
-static struct rtnl_link_stats64 *
+static void
 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct team *team = netdev_priv(dev);
@@ -1835,7 +1835,6 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
        stats->rx_dropped       = rx_dropped;
        stats->tx_dropped       = tx_dropped;
        stats->rx_nohandler     = rx_nohandler;
-       return stats;
 }
 
 static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
@@ -2002,8 +2001,6 @@ static const struct net_device_ops team_netdev_ops = {
        .ndo_add_slave          = team_add_slave,
        .ndo_del_slave          = team_del_slave,
        .ndo_fix_features       = team_fix_features,
-       .ndo_neigh_construct    = netdev_default_l2upper_neigh_construct,
-       .ndo_neigh_destroy      = netdev_default_l2upper_neigh_destroy,
        .ndo_change_carrier     = team_change_carrier,
        .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
        .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
index bfabe180053e414dee777e0e56b24eceef05c918..30863e378925b3555dea6eadb99a02678779cfd5 100644 (file)
@@ -218,6 +218,7 @@ struct tun_struct {
        struct list_head disabled;
        void *security;
        u32 flow_count;
+       u32 rx_batched;
        struct tun_pcpu_stats __percpu *pcpu_stats;
 };
 
@@ -522,6 +523,7 @@ static void tun_queue_purge(struct tun_file *tfile)
        while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
                kfree_skb(skb);
 
+       skb_queue_purge(&tfile->sk.sk_write_queue);
        skb_queue_purge(&tfile->sk.sk_error_queue);
 }
 
@@ -953,7 +955,7 @@ static void tun_set_headroom(struct net_device *dev, int new_hr)
        tun->align = new_hr;
 }
 
-static struct rtnl_link_stats64 *
+static void
 tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
@@ -987,7 +989,6 @@ tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
        stats->rx_dropped  = rx_dropped;
        stats->rx_frame_errors = rx_frame_errors;
        stats->tx_dropped = tx_dropped;
-       return stats;
 }
 
 static const struct net_device_ops tun_netdev_ops = {
@@ -1140,10 +1141,46 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
        return skb;
 }
 
+static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
+                          struct sk_buff *skb, int more)
+{
+       struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
+       struct sk_buff_head process_queue;
+       u32 rx_batched = tun->rx_batched;
+       bool rcv = false;
+
+       if (!rx_batched || (!more && skb_queue_empty(queue))) {
+               local_bh_disable();
+               netif_receive_skb(skb);
+               local_bh_enable();
+               return;
+       }
+
+       spin_lock(&queue->lock);
+       if (!more || skb_queue_len(queue) == rx_batched) {
+               __skb_queue_head_init(&process_queue);
+               skb_queue_splice_tail_init(queue, &process_queue);
+               rcv = true;
+       } else {
+               __skb_queue_tail(queue, skb);
+       }
+       spin_unlock(&queue->lock);
+
+       if (rcv) {
+               struct sk_buff *nskb;
+
+               local_bh_disable();
+               while ((nskb = __skb_dequeue(&process_queue)))
+                       netif_receive_skb(nskb);
+               netif_receive_skb(skb);
+               local_bh_enable();
+       }
+}
+
 /* Get packet from user space buffer */
 static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                            void *msg_control, struct iov_iter *from,
-                           int noblock)
+                           int noblock, bool more)
 {
        struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
        struct sk_buff *skb;
@@ -1286,9 +1323,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
 
        rxhash = skb_get_hash(skb);
 #ifndef CONFIG_4KSTACKS
-       local_bh_disable();
-       netif_receive_skb(skb);
-       local_bh_enable();
+       tun_rx_batched(tun, tfile, skb, more);
 #else
        netif_rx_ni(skb);
 #endif
@@ -1314,7 +1349,8 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (!tun)
                return -EBADFD;
 
-       result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK);
+       result = tun_get_user(tun, tfile, NULL, from,
+                             file->f_flags & O_NONBLOCK, false);
 
        tun_put(tun);
        return result;
@@ -1572,7 +1608,8 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
                return -EBADFD;
 
        ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
-                          m->msg_flags & MSG_DONTWAIT);
+                          m->msg_flags & MSG_DONTWAIT,
+                          m->msg_flags & MSG_MORE);
        tun_put(tun);
        return ret;
 }
@@ -1773,6 +1810,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                tun->align = NET_SKB_PAD;
                tun->filter_attached = false;
                tun->sndbuf = tfile->socket.sk->sk_sndbuf;
+               tun->rx_batched = 0;
 
                tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
                if (!tun->pcpu_stats) {
@@ -2441,6 +2479,29 @@ static void tun_set_msglevel(struct net_device *dev, u32 value)
 #endif
 }
 
+static int tun_get_coalesce(struct net_device *dev,
+                           struct ethtool_coalesce *ec)
+{
+       struct tun_struct *tun = netdev_priv(dev);
+
+       ec->rx_max_coalesced_frames = tun->rx_batched;
+
+       return 0;
+}
+
+static int tun_set_coalesce(struct net_device *dev,
+                           struct ethtool_coalesce *ec)
+{
+       struct tun_struct *tun = netdev_priv(dev);
+
+       if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
+               tun->rx_batched = NAPI_POLL_WEIGHT;
+       else
+               tun->rx_batched = ec->rx_max_coalesced_frames;
+
+       return 0;
+}
+
 static const struct ethtool_ops tun_ethtool_ops = {
        .get_settings   = tun_get_settings,
        .get_drvinfo    = tun_get_drvinfo,
@@ -2448,6 +2509,8 @@ static const struct ethtool_ops tun_ethtool_ops = {
        .set_msglevel   = tun_set_msglevel,
        .get_link       = ethtool_op_get_link,
        .get_ts_info    = ethtool_op_get_ts_info,
+       .get_coalesce   = tun_get_coalesce,
+       .set_coalesce   = tun_set_coalesce,
 };
 
 static int tun_queue_resize(struct tun_struct *tun)
index 86144f9a80ee84a5d4914dace38fdd8ef2932672..f5552aaaa77a59bf558da6c22218a919bf99ec94 100644 (file)
@@ -466,7 +466,7 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
  * connected. This causes the link state to be incorrect. Work around this by
  * always setting the state to off, then on.
  */
-void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb)
+static void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb)
 {
        struct usb_cdc_notification *event;
 
index 08f8703e4d542aba124c34a77967702f753114f1..9889a70ff4f6fece5bfabbfb45a3470f721a5a32 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/irq.h>
 #include <linux/irqchip/chained_irq.h>
 #include <linux/microchipphy.h>
+#include <linux/phy.h>
 #include "lan78xx.h"
 
 #define DRIVER_AUTHOR  "WOOJUNG HUH <woojung.huh@microchip.com>"
index ad42295356dd32b70009852247151b4856deea86..986243c932ccd6fe19c592805c1c63274f5e5555 100644 (file)
@@ -3590,7 +3590,7 @@ static bool delay_autosuspend(struct r8152 *tp)
                return false;
 }
 
-static int rtl8152_rumtime_suspend(struct r8152 *tp)
+static int rtl8152_runtime_suspend(struct r8152 *tp)
 {
        struct net_device *netdev = tp->netdev;
        int ret = 0;
@@ -3672,7 +3672,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
        mutex_lock(&tp->control);
 
        if (PMSG_IS_AUTO(message))
-               ret = rtl8152_rumtime_suspend(tp);
+               ret = rtl8152_runtime_suspend(tp);
        else
                ret = rtl8152_system_suspend(tp);
 
index 0520952aa096efd4131526ae53cde4bf94786d42..8c39d6d690e5e7f8ea6e522b8eb01a2db550c61a 100644 (file)
@@ -158,8 +158,8 @@ static u64 veth_stats_one(struct pcpu_vstats *result, struct net_device *dev)
        return atomic64_read(&priv->dropped);
 }
 
-static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
-                                                 struct rtnl_link_stats64 *tot)
+static void veth_get_stats64(struct net_device *dev,
+                            struct rtnl_link_stats64 *tot)
 {
        struct veth_priv *priv = netdev_priv(dev);
        struct net_device *peer;
@@ -177,8 +177,6 @@ static struct rtnl_link_stats64 *veth_get_stats64(struct net_device *dev,
                tot->rx_packets = one.packets;
        }
        rcu_read_unlock();
-
-       return tot;
 }
 
 /* fake multicast ability */
index 765c2d6358daf38203cdb1a50a31cc04f65c1968..11e28530c83c9fa6162725419f4c219ea82b7136 100644 (file)
 #include <linux/virtio.h>
 #include <linux/virtio_net.h>
 #include <linux/bpf.h>
+#include <linux/bpf_trace.h>
 #include <linux/scatterlist.h>
 #include <linux/if_vlan.h>
 #include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/average.h>
-#include <net/busy_poll.h>
 
 static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
@@ -41,6 +41,9 @@ module_param(gso, bool, 0444);
 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
 #define GOOD_COPY_LEN  128
 
+/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
+#define VIRTIO_XDP_HEADROOM 256
+
 /* RX packet size EWMA. The average packet size is used to determine the packet
  * buffer size when refilling RX rings. As the entire RX ring may be refilled
  * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -338,17 +341,21 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
        return skb;
 }
 
-static void virtnet_xdp_xmit(struct virtnet_info *vi,
+static bool virtnet_xdp_xmit(struct virtnet_info *vi,
                             struct receive_queue *rq,
-                            struct send_queue *sq,
                             struct xdp_buff *xdp,
                             void *data)
 {
        struct virtio_net_hdr_mrg_rxbuf *hdr;
        unsigned int num_sg, len;
+       struct send_queue *sq;
+       unsigned int qp;
        void *xdp_sent;
        int err;
 
+       qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
+       sq = &vi->sq[qp];
+
        /* Free up any pending old buffers before queueing new ones. */
        while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
                if (vi->mergeable_rx_bufs) {
@@ -363,6 +370,7 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi,
        }
 
        if (vi->mergeable_rx_bufs) {
+               xdp->data -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
                /* Zero header and leave csum up to XDP layers */
                hdr = xdp->data;
                memset(hdr, 0, vi->hdr_len);
@@ -379,7 +387,9 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi,
                num_sg = 2;
                sg_init_table(sq->sg, 2);
                sg_set_buf(sq->sg, hdr, vi->hdr_len);
-               skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
+               skb_to_sgvec(skb, sq->sg + 1,
+                            xdp->data - xdp->data_hard_start,
+                            xdp->data_end - xdp->data);
        }
        err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
                                   data, GFP_ATOMIC);
@@ -390,53 +400,12 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi,
                        put_page(page);
                } else /* small buffer */
                        kfree_skb(data);
-               return; // On error abort to avoid unnecessary kick
+               /* On error abort to avoid unnecessary kick */
+               return false;
        }
 
        virtqueue_kick(sq->vq);
-}
-
-static u32 do_xdp_prog(struct virtnet_info *vi,
-                      struct receive_queue *rq,
-                      struct bpf_prog *xdp_prog,
-                      void *data, int len)
-{
-       int hdr_padded_len;
-       struct xdp_buff xdp;
-       void *buf;
-       unsigned int qp;
-       u32 act;
-
-       if (vi->mergeable_rx_bufs) {
-               hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
-               xdp.data = data + hdr_padded_len;
-               xdp.data_end = xdp.data + (len - vi->hdr_len);
-               buf = data;
-       } else { /* small buffers */
-               struct sk_buff *skb = data;
-
-               xdp.data = skb->data;
-               xdp.data_end = xdp.data + len;
-               buf = skb->data;
-       }
-
-       act = bpf_prog_run_xdp(xdp_prog, &xdp);
-       switch (act) {
-       case XDP_PASS:
-               return XDP_PASS;
-       case XDP_TX:
-               qp = vi->curr_queue_pairs -
-                       vi->xdp_queue_pairs +
-                       smp_processor_id();
-               xdp.data = buf;
-               virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data);
-               return XDP_TX;
-       default:
-               bpf_warn_invalid_xdp_action(act);
-       case XDP_ABORTED:
-       case XDP_DROP:
-               return XDP_DROP;
-       }
+       return true;
 }
 
 static struct sk_buff *receive_small(struct net_device *dev,
@@ -448,30 +417,44 @@ static struct sk_buff *receive_small(struct net_device *dev,
        struct bpf_prog *xdp_prog;
 
        len -= vi->hdr_len;
-       skb_trim(skb, len);
 
        rcu_read_lock();
        xdp_prog = rcu_dereference(rq->xdp_prog);
        if (xdp_prog) {
                struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
+               struct xdp_buff xdp;
                u32 act;
 
                if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
                        goto err_xdp;
-               act = do_xdp_prog(vi, rq, xdp_prog, skb, len);
+
+               xdp.data_hard_start = skb->data;
+               xdp.data = skb->data + VIRTIO_XDP_HEADROOM;
+               xdp.data_end = xdp.data + len;
+               act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
                switch (act) {
                case XDP_PASS:
+                       /* Recalculate length in case bpf program changed it */
+                       __skb_pull(skb, xdp.data - xdp.data_hard_start);
+                       len = xdp.data_end - xdp.data;
                        break;
                case XDP_TX:
+                       if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp, skb)))
+                               trace_xdp_exception(vi->dev, xdp_prog, act);
                        rcu_read_unlock();
                        goto xdp_xmit;
-               case XDP_DROP:
                default:
+                       bpf_warn_invalid_xdp_action(act);
+               case XDP_ABORTED:
+                       trace_xdp_exception(vi->dev, xdp_prog, act);
+               case XDP_DROP:
                        goto err_xdp;
                }
        }
        rcu_read_unlock();
 
+       skb_trim(skb, len);
        return skb;
 
 err_xdp:
@@ -520,7 +503,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
                                       unsigned int *len)
 {
        struct page *page = alloc_page(GFP_ATOMIC);
-       unsigned int page_off = 0;
+       unsigned int page_off = VIRTIO_XDP_HEADROOM;
 
        if (!page)
                return NULL;
@@ -556,7 +539,8 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
                put_page(p);
        }
 
-       *len = page_off;
+       /* Headroom does not contribute to packet length */
+       *len = page_off - VIRTIO_XDP_HEADROOM;
        return page;
 err_buf:
        __free_pages(page, 0);
@@ -584,6 +568,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
        xdp_prog = rcu_dereference(rq->xdp_prog);
        if (xdp_prog) {
                struct page *xdp_page;
+               struct xdp_buff xdp;
+               void *data;
                u32 act;
 
                /* This happens when rx buffer size is underestimated */
@@ -593,7 +579,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                                      page, offset, &len);
                        if (!xdp_page)
                                goto err_xdp;
-                       offset = 0;
+                       offset = VIRTIO_XDP_HEADROOM;
                } else {
                        xdp_page = page;
                }
@@ -606,28 +592,47 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                if (unlikely(hdr->hdr.gso_type))
                        goto err_xdp;
 
-               act = do_xdp_prog(vi, rq, xdp_prog,
-                                 page_address(xdp_page) + offset, len);
+               /* Allow consuming headroom but reserve enough space to push
+                * the descriptor on if we get an XDP_TX return code.
+                */
+               data = page_address(xdp_page) + offset;
+               xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
+               xdp.data = data + vi->hdr_len;
+               xdp.data_end = xdp.data + (len - vi->hdr_len);
+               act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
                switch (act) {
                case XDP_PASS:
+                       /* recalculate offset to account for any header
+                        * adjustments. Note other cases do not build an
+                        * skb and avoid using offset
+                        */
+                       offset = xdp.data -
+                                       page_address(xdp_page) - vi->hdr_len;
+
                        /* We can only create skb based on xdp_page. */
                        if (unlikely(xdp_page != page)) {
                                rcu_read_unlock();
                                put_page(page);
                                head_skb = page_to_skb(vi, rq, xdp_page,
-                                                      0, len, PAGE_SIZE);
+                                                      offset, len, PAGE_SIZE);
                                ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
                                return head_skb;
                        }
                        break;
                case XDP_TX:
+                       if (unlikely(!virtnet_xdp_xmit(vi, rq, &xdp, data)))
+                               trace_xdp_exception(vi->dev, xdp_prog, act);
                        ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
                        if (unlikely(xdp_page != page))
                                goto err_xdp;
                        rcu_read_unlock();
                        goto xdp_xmit;
-               case XDP_DROP:
                default:
+                       bpf_warn_invalid_xdp_action(act);
+               case XDP_ABORTED:
+                       trace_xdp_exception(vi->dev, xdp_prog, act);
+               case XDP_DROP:
                        if (unlikely(xdp_page != page))
                                __free_pages(xdp_page, 0);
                        ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
@@ -777,23 +782,30 @@ frame_err:
        dev_kfree_skb(skb);
 }
 
+static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
+{
+       return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
+}
+
 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
                             gfp_t gfp)
 {
+       int headroom = GOOD_PACKET_LEN + virtnet_get_headroom(vi);
+       unsigned int xdp_headroom = virtnet_get_headroom(vi);
        struct sk_buff *skb;
        struct virtio_net_hdr_mrg_rxbuf *hdr;
        int err;
 
-       skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
+       skb = __netdev_alloc_skb_ip_align(vi->dev, headroom, gfp);
        if (unlikely(!skb))
                return -ENOMEM;
 
-       skb_put(skb, GOOD_PACKET_LEN);
+       skb_put(skb, headroom);
 
        hdr = skb_vnet_hdr(skb);
        sg_init_table(rq->sg, 2);
        sg_set_buf(rq->sg, hdr, vi->hdr_len);
-       skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
+       skb_to_sgvec(skb, rq->sg + 1, xdp_headroom, skb->len - xdp_headroom);
 
        err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
        if (err < 0)
@@ -861,24 +873,27 @@ static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len)
        return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
 }
 
-static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
+static int add_recvbuf_mergeable(struct virtnet_info *vi,
+                                struct receive_queue *rq, gfp_t gfp)
 {
        struct page_frag *alloc_frag = &rq->alloc_frag;
+       unsigned int headroom = virtnet_get_headroom(vi);
        char *buf;
        unsigned long ctx;
        int err;
        unsigned int len, hole;
 
        len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len);
-       if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
+       if (unlikely(!skb_page_frag_refill(len + headroom, alloc_frag, gfp)))
                return -ENOMEM;
 
        buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
+       buf += headroom; /* advance address leaving hole at front of pkt */
        ctx = mergeable_buf_to_ctx(buf, len);
        get_page(alloc_frag->page);
-       alloc_frag->offset += len;
+       alloc_frag->offset += len + headroom;
        hole = alloc_frag->size - alloc_frag->offset;
-       if (hole < len) {
+       if (hole < len + headroom) {
                /* To avoid internal fragmentation, if there is very likely not
                 * enough space for another buffer, add the remaining space to
                 * the current buffer. This extra space is not included in
@@ -912,7 +927,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
        gfp |= __GFP_COLD;
        do {
                if (vi->mergeable_rx_bufs)
-                       err = add_recvbuf_mergeable(rq, gfp);
+                       err = add_recvbuf_mergeable(vi, rq, gfp);
                else if (vi->big_packets)
                        err = add_recvbuf_big(vi, rq, gfp);
                else
@@ -1007,53 +1022,17 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
        /* Out of packets? */
        if (received < budget) {
                r = virtqueue_enable_cb_prepare(rq->vq);
-               napi_complete_done(napi, received);
-               if (unlikely(virtqueue_poll(rq->vq, r)) &&
-                   napi_schedule_prep(napi)) {
-                       virtqueue_disable_cb(rq->vq);
-                       __napi_schedule(napi);
-               }
-       }
-
-       return received;
-}
-
-#ifdef CONFIG_NET_RX_BUSY_POLL
-/* must be called with local_bh_disable()d */
-static int virtnet_busy_poll(struct napi_struct *napi)
-{
-       struct receive_queue *rq =
-               container_of(napi, struct receive_queue, napi);
-       struct virtnet_info *vi = rq->vq->vdev->priv;
-       int r, received = 0, budget = 4;
-
-       if (!(vi->status & VIRTIO_NET_S_LINK_UP))
-               return LL_FLUSH_FAILED;
-
-       if (!napi_schedule_prep(napi))
-               return LL_FLUSH_BUSY;
-
-       virtqueue_disable_cb(rq->vq);
-
-again:
-       received += virtnet_receive(rq, budget);
-
-       r = virtqueue_enable_cb_prepare(rq->vq);
-       clear_bit(NAPI_STATE_SCHED, &napi->state);
-       if (unlikely(virtqueue_poll(rq->vq, r)) &&
-           napi_schedule_prep(napi)) {
-               virtqueue_disable_cb(rq->vq);
-               if (received < budget) {
-                       budget -= received;
-                       goto again;
-               } else {
-                       __napi_schedule(napi);
+               if (napi_complete_done(napi, received)) {
+                       if (unlikely(virtqueue_poll(rq->vq, r)) &&
+                           napi_schedule_prep(napi)) {
+                               virtqueue_disable_cb(rq->vq);
+                               __napi_schedule(napi);
+                       }
                }
        }
 
        return received;
 }
-#endif /* CONFIG_NET_RX_BUSY_POLL */
 
 static int virtnet_open(struct net_device *dev)
 {
@@ -1244,10 +1223,9 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
        struct sockaddr *addr;
        struct scatterlist sg;
 
-       addr = kmalloc(sizeof(*addr), GFP_KERNEL);
+       addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
        if (!addr)
                return -ENOMEM;
-       memcpy(addr, p, sizeof(*addr));
 
        ret = eth_prepare_mac_addr_change(dev, addr);
        if (ret)
@@ -1281,8 +1259,8 @@ out:
        return ret;
 }
 
-static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
-                                              struct rtnl_link_stats64 *tot)
+static void virtnet_stats(struct net_device *dev,
+                         struct rtnl_link_stats64 *tot)
 {
        struct virtnet_info *vi = netdev_priv(dev);
        int cpu;
@@ -1315,8 +1293,6 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
        tot->rx_dropped = dev->stats.rx_dropped;
        tot->rx_length_errors = dev->stats.rx_length_errors;
        tot->rx_frame_errors = dev->stats.rx_frame_errors;
-
-       return tot;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1339,7 +1315,7 @@ static void virtnet_ack_link_announce(struct virtnet_info *vi)
        rtnl_unlock();
 }
 
-static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
+static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
 {
        struct scatterlist sg;
        struct net_device *dev = vi->dev;
@@ -1365,6 +1341,16 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
        return 0;
 }
 
+static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
+{
+       int err;
+
+       rtnl_lock();
+       err = _virtnet_set_queues(vi, queue_pairs);
+       rtnl_unlock();
+       return err;
+}
+
 static int virtnet_close(struct net_device *dev)
 {
        struct virtnet_info *vi = netdev_priv(dev);
@@ -1617,7 +1603,7 @@ static int virtnet_set_channels(struct net_device *dev,
                return -EINVAL;
 
        get_online_cpus();
-       err = virtnet_set_queues(vi, queue_pairs);
+       err = _virtnet_set_queues(vi, queue_pairs);
        if (!err) {
                netif_set_real_num_tx_queues(dev, queue_pairs);
                netif_set_real_num_rx_queues(dev, queue_pairs);
@@ -1707,19 +1693,91 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
        .set_settings = virtnet_set_settings,
 };
 
+static void virtnet_freeze_down(struct virtio_device *vdev)
+{
+       struct virtnet_info *vi = vdev->priv;
+       int i;
+
+       /* Make sure no work handler is accessing the device */
+       flush_work(&vi->config_work);
+
+       netif_device_detach(vi->dev);
+       cancel_delayed_work_sync(&vi->refill);
+
+       if (netif_running(vi->dev)) {
+               for (i = 0; i < vi->max_queue_pairs; i++)
+                       napi_disable(&vi->rq[i].napi);
+       }
+}
+
+static int init_vqs(struct virtnet_info *vi);
+static void _remove_vq_common(struct virtnet_info *vi);
+
+static int virtnet_restore_up(struct virtio_device *vdev)
+{
+       struct virtnet_info *vi = vdev->priv;
+       int err, i;
+
+       err = init_vqs(vi);
+       if (err)
+               return err;
+
+       virtio_device_ready(vdev);
+
+       if (netif_running(vi->dev)) {
+               for (i = 0; i < vi->curr_queue_pairs; i++)
+                       if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
+                               schedule_delayed_work(&vi->refill, 0);
+
+               for (i = 0; i < vi->max_queue_pairs; i++)
+                       virtnet_napi_enable(&vi->rq[i]);
+       }
+
+       netif_device_attach(vi->dev);
+       return err;
+}
+
+static int virtnet_reset(struct virtnet_info *vi)
+{
+       struct virtio_device *dev = vi->vdev;
+       int ret;
+
+       virtio_config_disable(dev);
+       dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED;
+       virtnet_freeze_down(dev);
+       _remove_vq_common(vi);
+
+       dev->config->reset(dev);
+       virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+       virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
+
+       ret = virtio_finalize_features(dev);
+       if (ret)
+               goto err;
+
+       ret = virtnet_restore_up(dev);
+       if (ret)
+               goto err;
+       ret = _virtnet_set_queues(vi, vi->curr_queue_pairs);
+       if (ret)
+               goto err;
+
+       virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
+       virtio_config_enable(dev);
+       return 0;
+err:
+       virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
+       return ret;
+}
+
 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
 {
        unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
        struct virtnet_info *vi = netdev_priv(dev);
        struct bpf_prog *old_prog;
-       u16 xdp_qp = 0, curr_qp;
+       u16 oxdp_qp, xdp_qp = 0, curr_qp;
        int i, err;
 
-       if (prog && prog->xdp_adjust_head) {
-               netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n");
-               return -EOPNOTSUPP;
-       }
-
        if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
            virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
            virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
@@ -1749,21 +1807,32 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
                return -ENOMEM;
        }
 
-       err = virtnet_set_queues(vi, curr_qp + xdp_qp);
+       if (prog) {
+               prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
+               if (IS_ERR(prog))
+                       return PTR_ERR(prog);
+       }
+
+       err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
        if (err) {
                dev_warn(&dev->dev, "XDP Device queue allocation failure.\n");
-               return err;
+               goto virtio_queue_err;
        }
 
-       if (prog) {
-               prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
-               if (IS_ERR(prog)) {
-                       virtnet_set_queues(vi, curr_qp);
-                       return PTR_ERR(prog);
-               }
+       oxdp_qp = vi->xdp_queue_pairs;
+
+       /* Changing the headroom in buffers is a disruptive operation because
+        * existing buffers must be flushed and reallocated. This will happen
+        * when a xdp program is initially added or xdp is disabled by removing
+        * the xdp program resulting in number of XDP queues changing.
+        */
+       if (vi->xdp_queue_pairs != xdp_qp) {
+               vi->xdp_queue_pairs = xdp_qp;
+               err = virtnet_reset(vi);
+               if (err)
+                       goto virtio_reset_err;
        }
 
-       vi->xdp_queue_pairs = xdp_qp;
        netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
 
        for (i = 0; i < vi->max_queue_pairs; i++) {
@@ -1774,6 +1843,21 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
        }
 
        return 0;
+
+virtio_reset_err:
+       /* On reset error do our best to unwind XDP changes inflight and return
+        * error up to user space for resolution. The underlying reset hung on
+        * us so not much we can do here.
+        */
+       dev_warn(&dev->dev, "XDP reset failure and queues unstable\n");
+       vi->xdp_queue_pairs = oxdp_qp;
+virtio_queue_err:
+       /* On queue set error we can unwind bpf ref count and user space can
+        * retry this is most likely an allocation failure.
+        */
+       if (prog)
+               bpf_prog_sub(prog, vi->max_queue_pairs - 1);
+       return err;
 }
 
 static bool virtnet_xdp_query(struct net_device *dev)
@@ -1813,9 +1897,6 @@ static const struct net_device_ops virtnet_netdev = {
        .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = virtnet_netpoll,
-#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       .ndo_busy_poll          = virtnet_busy_poll,
 #endif
        .ndo_xdp                = virtnet_xdp,
 };
@@ -1877,12 +1958,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
        kfree(vi->sq);
 }
 
-static void free_receive_bufs(struct virtnet_info *vi)
+static void _free_receive_bufs(struct virtnet_info *vi)
 {
        struct bpf_prog *old_prog;
        int i;
 
-       rtnl_lock();
        for (i = 0; i < vi->max_queue_pairs; i++) {
                while (vi->rq[i].pages)
                        __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
@@ -1892,6 +1972,12 @@ static void free_receive_bufs(struct virtnet_info *vi)
                if (old_prog)
                        bpf_prog_put(old_prog);
        }
+}
+
+static void free_receive_bufs(struct virtnet_info *vi)
+{
+       rtnl_lock();
+       _free_receive_bufs(vi);
        rtnl_unlock();
 }
 
@@ -2330,9 +2416,7 @@ static int virtnet_probe(struct virtio_device *vdev)
                goto free_unregister_netdev;
        }
 
-       rtnl_lock();
        virtnet_set_queues(vi, vi->curr_queue_pairs);
-       rtnl_unlock();
 
        /* Assume link up if device can't report link status,
           otherwise get link status from config. */
@@ -2364,6 +2448,15 @@ free:
        return err;
 }
 
+static void _remove_vq_common(struct virtnet_info *vi)
+{
+       vi->vdev->config->reset(vi->vdev);
+       free_unused_bufs(vi);
+       _free_receive_bufs(vi);
+       free_receive_page_frags(vi);
+       virtnet_del_vqs(vi);
+}
+
 static void remove_vq_common(struct virtnet_info *vi)
 {
        vi->vdev->config->reset(vi->vdev);
@@ -2399,21 +2492,9 @@ static void virtnet_remove(struct virtio_device *vdev)
 static int virtnet_freeze(struct virtio_device *vdev)
 {
        struct virtnet_info *vi = vdev->priv;
-       int i;
 
        virtnet_cpu_notif_remove(vi);
-
-       /* Make sure no work handler is accessing the device */
-       flush_work(&vi->config_work);
-
-       netif_device_detach(vi->dev);
-       cancel_delayed_work_sync(&vi->refill);
-
-       if (netif_running(vi->dev)) {
-               for (i = 0; i < vi->max_queue_pairs; i++)
-                       napi_disable(&vi->rq[i].napi);
-       }
-
+       virtnet_freeze_down(vdev);
        remove_vq_common(vi);
 
        return 0;
@@ -2422,28 +2503,12 @@ static int virtnet_freeze(struct virtio_device *vdev)
 static int virtnet_restore(struct virtio_device *vdev)
 {
        struct virtnet_info *vi = vdev->priv;
-       int err, i;
+       int err;
 
-       err = init_vqs(vi);
+       err = virtnet_restore_up(vdev);
        if (err)
                return err;
-
-       virtio_device_ready(vdev);
-
-       if (netif_running(vi->dev)) {
-               for (i = 0; i < vi->curr_queue_pairs; i++)
-                       if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
-                               schedule_delayed_work(&vi->refill, 0);
-
-               for (i = 0; i < vi->max_queue_pairs; i++)
-                       virtnet_napi_enable(&vi->rq[i]);
-       }
-
-       netif_device_attach(vi->dev);
-
-       rtnl_lock();
        virtnet_set_queues(vi, vi->curr_queue_pairs);
-       rtnl_unlock();
 
        err = virtnet_cpu_notif_add(vi);
        if (err)
index e34b1297c96af96e6a9d6a6841c4bbc24e74254e..25bc764ae7dc4c4dc9a5e6cb4f17f89f62464f4c 100644 (file)
@@ -1851,7 +1851,7 @@ vmxnet3_poll(struct napi_struct *napi, int budget)
        rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
 
        if (rxd_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rxd_done);
                vmxnet3_enable_all_intrs(rx_queue->adapter);
        }
        return rxd_done;
@@ -1882,7 +1882,7 @@ vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
        rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
 
        if (rxd_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, rxd_done);
                vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
        }
        return rxd_done;
index aabc6ef366b466f019637b82464dea1ee4aa8c50..f88ffafebfbfd40192fd5919ce970f1bf15b73fc 100644 (file)
@@ -113,7 +113,7 @@ vmxnet3_global_stats[] = {
 };
 
 
-struct rtnl_link_stats64 *
+void
 vmxnet3_get_stats64(struct net_device *netdev,
                   struct rtnl_link_stats64 *stats)
 {
@@ -160,8 +160,6 @@ vmxnet3_get_stats64(struct net_device *netdev,
                stats->rx_dropped += drvRxStats->drop_total;
                stats->multicast +=  devRxStats->mcastPktsRxOK;
        }
-
-       return stats;
 }
 
 static int
index 59e077be88290e5ffbd470850f98d1bb762c3f04..ba1c9f93592b809cddc64b5e2dc68d1ecadf4190 100644 (file)
@@ -465,8 +465,8 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
 
 void vmxnet3_set_ethtool_ops(struct net_device *netdev);
 
-struct rtnl_link_stats64 *
-vmxnet3_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
+void vmxnet3_get_stats64(struct net_device *dev,
+                        struct rtnl_link_stats64 *stats);
 
 extern char vmxnet3_driver_name[];
 #endif
index 454f907d419a7f87cc0ae1813f40c054726be7e8..22379da63400776ff70994097de6d472232ca908 100644 (file)
@@ -77,8 +77,8 @@ static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
        kfree_skb(skb);
 }
 
-static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
-                                                struct rtnl_link_stats64 *stats)
+static void vrf_get_stats64(struct net_device *dev,
+                           struct rtnl_link_stats64 *stats)
 {
        int i;
 
@@ -102,7 +102,6 @@ static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
                stats->rx_bytes += rbytes;
                stats->rx_packets += rpkts;
        }
-       return stats;
 }
 
 /* Local traffic destined to local address. Reinsert the packet to rx
@@ -379,7 +378,8 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
        if (unlikely(!neigh))
                neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
        if (!IS_ERR(neigh)) {
-               ret = dst_neigh_output(dst, neigh, skb);
+               sock_confirm_neigh(skb, neigh);
+               ret = neigh_output(neigh, skb);
                rcu_read_unlock_bh();
                return ret;
        }
@@ -575,8 +575,10 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
        neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
        if (unlikely(!neigh))
                neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
-       if (!IS_ERR(neigh))
-               ret = dst_neigh_output(dst, neigh, skb);
+       if (!IS_ERR(neigh)) {
+               sock_confirm_neigh(skb, neigh);
+               ret = neigh_output(neigh, skb);
+       }
 
        rcu_read_unlock_bh();
 err:
index 30b04cf2bb1e08f89ac93c086b17bf8d59df6b37..c5db8f8563c15ab16670dab6faa8df896ac1eb6e 100644 (file)
@@ -75,6 +75,7 @@ struct vxlan_fdb {
        struct list_head  remotes;
        u8                eth_addr[ETH_ALEN];
        u16               state;        /* see ndm_state */
+       __be32            vni;
        u8                flags;        /* see ndm_flags */
 };
 
@@ -302,6 +303,10 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
        if (rdst->remote_vni != vxlan->default_dst.remote_vni &&
            nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni)))
                goto nla_put_failure;
+       if ((vxlan->flags & VXLAN_F_COLLECT_METADATA) && fdb->vni &&
+           nla_put_u32(skb, NDA_SRC_VNI,
+                       be32_to_cpu(fdb->vni)))
+               goto nla_put_failure;
        if (rdst->remote_ifindex &&
            nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
                goto nla_put_failure;
@@ -400,34 +405,51 @@ static u32 eth_hash(const unsigned char *addr)
        return hash_64(value, FDB_HASH_BITS);
 }
 
+static u32 eth_vni_hash(const unsigned char *addr, __be32 vni)
+{
+       /* use 1 byte of OUI and 3 bytes of NIC */
+       u32 key = get_unaligned((u32 *)(addr + 2));
+
+       return jhash_2words(key, vni, vxlan_salt) & (FDB_HASH_SIZE - 1);
+}
+
 /* Hash chain to use given mac address */
 static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
-                                               const u8 *mac)
+                                               const u8 *mac, __be32 vni)
 {
-       return &vxlan->fdb_head[eth_hash(mac)];
+       if (vxlan->flags & VXLAN_F_COLLECT_METADATA)
+               return &vxlan->fdb_head[eth_vni_hash(mac, vni)];
+       else
+               return &vxlan->fdb_head[eth_hash(mac)];
 }
 
 /* Look up Ethernet address in forwarding table */
 static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
-                                       const u8 *mac)
+                                         const u8 *mac, __be32 vni)
 {
-       struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
+       struct hlist_head *head = vxlan_fdb_head(vxlan, mac, vni);
        struct vxlan_fdb *f;
 
        hlist_for_each_entry_rcu(f, head, hlist) {
-               if (ether_addr_equal(mac, f->eth_addr))
-                       return f;
+               if (ether_addr_equal(mac, f->eth_addr)) {
+                       if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
+                               if (vni == f->vni)
+                                       return f;
+                       } else {
+                               return f;
+                       }
+               }
        }
 
        return NULL;
 }
 
 static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
-                                       const u8 *mac)
+                                       const u8 *mac, __be32 vni)
 {
        struct vxlan_fdb *f;
 
-       f = __vxlan_find_mac(vxlan, mac);
+       f = __vxlan_find_mac(vxlan, mac, vni);
        if (f)
                f->used = jiffies;
 
@@ -605,15 +627,15 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                            const u8 *mac, union vxlan_addr *ip,
                            __u16 state, __u16 flags,
-                           __be16 port, __be32 vni, __u32 ifindex,
-                           __u8 ndm_flags)
+                           __be16 port, __be32 src_vni, __be32 vni,
+                           __u32 ifindex, __u8 ndm_flags)
 {
        struct vxlan_rdst *rd = NULL;
        struct vxlan_fdb *f;
        int notify = 0;
        int rc;
 
-       f = __vxlan_find_mac(vxlan, mac);
+       f = __vxlan_find_mac(vxlan, mac, src_vni);
        if (f) {
                if (flags & NLM_F_EXCL) {
                        netdev_dbg(vxlan->dev,
@@ -670,6 +692,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
                f->state = state;
                f->flags = ndm_flags;
                f->updated = f->used = jiffies;
+               f->vni = src_vni;
                INIT_LIST_HEAD(&f->remotes);
                memcpy(f->eth_addr, mac, ETH_ALEN);
 
@@ -681,7 +704,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
 
                ++vxlan->addrcnt;
                hlist_add_head_rcu(&f->hlist,
-                                  vxlan_fdb_head(vxlan, mac));
+                                  vxlan_fdb_head(vxlan, mac, src_vni));
        }
 
        if (notify) {
@@ -718,8 +741,8 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
 }
 
 static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
-                          union vxlan_addr *ip, __be16 *port, __be32 *vni,
-                          u32 *ifindex)
+                          union vxlan_addr *ip, __be16 *port, __be32 *src_vni,
+                          __be32 *vni, u32 *ifindex)
 {
        struct net *net = dev_net(vxlan->dev);
        int err;
@@ -757,6 +780,14 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
                *vni = vxlan->default_dst.remote_vni;
        }
 
+       if (tb[NDA_SRC_VNI]) {
+               if (nla_len(tb[NDA_SRC_VNI]) != sizeof(u32))
+                       return -EINVAL;
+               *src_vni = cpu_to_be32(nla_get_u32(tb[NDA_SRC_VNI]));
+       } else {
+               *src_vni = vxlan->default_dst.remote_vni;
+       }
+
        if (tb[NDA_IFINDEX]) {
                struct net_device *tdev;
 
@@ -782,7 +813,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        /* struct net *net = dev_net(vxlan->dev); */
        union vxlan_addr ip;
        __be16 port;
-       __be32 vni;
+       __be32 src_vni, vni;
        u32 ifindex;
        int err;
 
@@ -795,7 +826,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
        if (tb[NDA_DST] == NULL)
                return -EINVAL;
 
-       err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
+       err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
        if (err)
                return err;
 
@@ -804,36 +835,24 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 
        spin_lock_bh(&vxlan->hash_lock);
        err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
-                              port, vni, ifindex, ndm->ndm_flags);
+                              port, src_vni, vni, ifindex, ndm->ndm_flags);
        spin_unlock_bh(&vxlan->hash_lock);
 
        return err;
 }
 
-/* Delete entry (via netlink) */
-static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
-                           struct net_device *dev,
-                           const unsigned char *addr, u16 vid)
+static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
+                             const unsigned char *addr, union vxlan_addr ip,
+                             __be16 port, __be32 src_vni, u32 vni, u32 ifindex,
+                             u16 vid)
 {
-       struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_fdb *f;
        struct vxlan_rdst *rd = NULL;
-       union vxlan_addr ip;
-       __be16 port;
-       __be32 vni;
-       u32 ifindex;
-       int err;
+       int err = -ENOENT;
 
-       err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex);
-       if (err)
-               return err;
-
-       err = -ENOENT;
-
-       spin_lock_bh(&vxlan->hash_lock);
-       f = vxlan_find_mac(vxlan, addr);
+       f = vxlan_find_mac(vxlan, addr, src_vni);
        if (!f)
-               goto out;
+               return err;
 
        if (!vxlan_addr_any(&ip)) {
                rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
@@ -841,8 +860,6 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
                        goto out;
        }
 
-       err = 0;
-
        /* remove a destination if it's not the only one on the list,
         * otherwise destroy the fdb entry
         */
@@ -856,6 +873,28 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
        vxlan_fdb_destroy(vxlan, f);
 
 out:
+       return 0;
+}
+
+/* Delete entry (via netlink) */
+static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
+                           struct net_device *dev,
+                           const unsigned char *addr, u16 vid)
+{
+       struct vxlan_dev *vxlan = netdev_priv(dev);
+       union vxlan_addr ip;
+       __be32 src_vni, vni;
+       __be16 port;
+       u32 ifindex;
+       int err;
+
+       err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &src_vni, &vni, &ifindex);
+       if (err)
+               return err;
+
+       spin_lock_bh(&vxlan->hash_lock);
+       err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex,
+                                vid);
        spin_unlock_bh(&vxlan->hash_lock);
 
        return err;
@@ -901,12 +940,13 @@ out:
  * Return true if packet is bogus and should be dropped.
  */
 static bool vxlan_snoop(struct net_device *dev,
-                       union vxlan_addr *src_ip, const u8 *src_mac)
+                       union vxlan_addr *src_ip, const u8 *src_mac,
+                       __be32 vni)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_fdb *f;
 
-       f = vxlan_find_mac(vxlan, src_mac);
+       f = vxlan_find_mac(vxlan, src_mac, vni);
        if (likely(f)) {
                struct vxlan_rdst *rdst = first_remote_rcu(f);
 
@@ -935,6 +975,7 @@ static bool vxlan_snoop(struct net_device *dev,
                                         NUD_REACHABLE,
                                         NLM_F_EXCL|NLM_F_CREATE,
                                         vxlan->cfg.dst_port,
+                                        vni,
                                         vxlan->default_dst.remote_vni,
                                         0, NTF_SELF);
                spin_unlock(&vxlan->hash_lock);
@@ -1202,7 +1243,7 @@ static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
 
 static bool vxlan_set_mac(struct vxlan_dev *vxlan,
                          struct vxlan_sock *vs,
-                         struct sk_buff *skb)
+                         struct sk_buff *skb, __be32 vni)
 {
        union vxlan_addr saddr;
 
@@ -1226,7 +1267,7 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
        }
 
        if ((vxlan->flags & VXLAN_F_LEARN) &&
-           vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
+           vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source, vni))
                return false;
 
        return true;
@@ -1268,6 +1309,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
        __be16 protocol = htons(ETH_P_TEB);
        bool raw_proto = false;
        void *oiph;
+       __be32 vni = 0;
 
        /* Need UDP and VXLAN header to be present */
        if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1289,7 +1331,9 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
        if (!vs)
                goto drop;
 
-       vxlan = vxlan_vs_find_vni(vs, vxlan_vni(vxlan_hdr(skb)->vx_vni));
+       vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
+
+       vxlan = vxlan_vs_find_vni(vs, vni);
        if (!vxlan)
                goto drop;
 
@@ -1307,7 +1351,6 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
                        goto drop;
 
        if (vxlan_collect_metadata(vs)) {
-               __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
                struct metadata_dst *tun_dst;
 
                tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
@@ -1345,7 +1388,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
        }
 
        if (!raw_proto) {
-               if (!vxlan_set_mac(vxlan, vs, skb))
+               if (!vxlan_set_mac(vxlan, vs, skb, vni))
                        goto drop;
        } else {
                skb_reset_mac_header(skb);
@@ -1377,7 +1420,7 @@ drop:
        return 0;
 }
 
-static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
+static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct arphdr *parp;
@@ -1424,7 +1467,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
                        goto out;
                }
 
-               f = vxlan_find_mac(vxlan, n->ha);
+               f = vxlan_find_mac(vxlan, n->ha, vni);
                if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
                        /* bridge-local neighbor */
                        neigh_release(n);
@@ -1548,7 +1591,7 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request,
        return reply;
 }
 
-static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
+static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct nd_msg *msg;
@@ -1585,7 +1628,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
                        goto out;
                }
 
-               f = vxlan_find_mac(vxlan, n->ha);
+               f = vxlan_find_mac(vxlan, n->ha, vni);
                if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
                        /* bridge-local neighbor */
                        neigh_release(n);
@@ -1906,7 +1949,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
 
 /* Bypass encapsulation if the destination is local */
 static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
-                              struct vxlan_dev *dst_vxlan)
+                              struct vxlan_dev *dst_vxlan, __be32 vni)
 {
        struct pcpu_sw_netstats *tx_stats, *rx_stats;
        union vxlan_addr loopback;
@@ -1932,7 +1975,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
        }
 
        if (dst_vxlan->flags & VXLAN_F_LEARN)
-               vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
+               vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, vni);
 
        u64_stats_update_begin(&tx_stats->syncp);
        tx_stats->tx_packets++;
@@ -1951,7 +1994,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
 
 static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
                                 struct vxlan_dev *vxlan, union vxlan_addr *daddr,
-                                __be32 dst_port, __be32 vni, struct dst_entry *dst,
+                                __be16 dst_port, __be32 vni, struct dst_entry *dst,
                                 u32 rt_flags)
 {
 #if IS_ENABLED(CONFIG_IPV6)
@@ -1976,7 +2019,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
 
                        return -ENOENT;
                }
-               vxlan_encap_bypass(skb, vxlan, dst_vxlan);
+               vxlan_encap_bypass(skb, vxlan, dst_vxlan, vni);
                return 1;
        }
 
@@ -1984,7 +2027,8 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
 }
 
 static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
-                          struct vxlan_rdst *rdst, bool did_rsc)
+                          __be32 default_vni, struct vxlan_rdst *rdst,
+                          bool did_rsc)
 {
        struct dst_cache *dst_cache;
        struct ip_tunnel_info *info;
@@ -2011,14 +2055,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                if (vxlan_addr_any(dst)) {
                        if (did_rsc) {
                                /* short-circuited back to local bridge */
-                               vxlan_encap_bypass(skb, vxlan, vxlan);
+                               vxlan_encap_bypass(skb, vxlan, vxlan, default_vni);
                                return;
                        }
                        goto drop;
                }
 
                dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port;
-               vni = rdst->remote_vni;
+               vni = (rdst->remote_vni) ? : default_vni;
                src = &vxlan->cfg.saddr;
                dst_cache = &rdst->dst_cache;
                md->gbp = skb->mark;
@@ -2173,23 +2217,29 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
        bool did_rsc = false;
        struct vxlan_rdst *rdst, *fdst = NULL;
        struct vxlan_fdb *f;
+       __be32 vni = 0;
 
        info = skb_tunnel_info(skb);
 
        skb_reset_mac_header(skb);
 
        if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
-               if (info && info->mode & IP_TUNNEL_INFO_TX)
-                       vxlan_xmit_one(skb, dev, NULL, false);
-               else
-                       kfree_skb(skb);
-               return NETDEV_TX_OK;
+               if (info && info->mode & IP_TUNNEL_INFO_BRIDGE &&
+                   info->mode & IP_TUNNEL_INFO_TX) {
+                       vni = tunnel_id_to_key32(info->key.tun_id);
+               } else {
+                       if (info && info->mode & IP_TUNNEL_INFO_TX)
+                               vxlan_xmit_one(skb, dev, vni, NULL, false);
+                       else
+                               kfree_skb(skb);
+                       return NETDEV_TX_OK;
+               }
        }
 
        if (vxlan->flags & VXLAN_F_PROXY) {
                eth = eth_hdr(skb);
                if (ntohs(eth->h_proto) == ETH_P_ARP)
-                       return arp_reduce(dev, skb);
+                       return arp_reduce(dev, skb, vni);
 #if IS_ENABLED(CONFIG_IPV6)
                else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
                         pskb_may_pull(skb, sizeof(struct ipv6hdr)
@@ -2200,13 +2250,13 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
                                msg = (struct nd_msg *)skb_transport_header(skb);
                                if (msg->icmph.icmp6_code == 0 &&
                                    msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
-                                       return neigh_reduce(dev, skb);
+                                       return neigh_reduce(dev, skb, vni);
                }
 #endif
        }
 
        eth = eth_hdr(skb);
-       f = vxlan_find_mac(vxlan, eth->h_dest);
+       f = vxlan_find_mac(vxlan, eth->h_dest, vni);
        did_rsc = false;
 
        if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
@@ -2214,11 +2264,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
             ntohs(eth->h_proto) == ETH_P_IPV6)) {
                did_rsc = route_shortcircuit(dev, skb);
                if (did_rsc)
-                       f = vxlan_find_mac(vxlan, eth->h_dest);
+                       f = vxlan_find_mac(vxlan, eth->h_dest, vni);
        }
 
        if (f == NULL) {
-               f = vxlan_find_mac(vxlan, all_zeros_mac);
+               f = vxlan_find_mac(vxlan, all_zeros_mac, vni);
                if (f == NULL) {
                        if ((vxlan->flags & VXLAN_F_L2MISS) &&
                            !is_multicast_ether_addr(eth->h_dest))
@@ -2239,11 +2289,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
                }
                skb1 = skb_clone(skb, GFP_ATOMIC);
                if (skb1)
-                       vxlan_xmit_one(skb1, dev, rdst, did_rsc);
+                       vxlan_xmit_one(skb1, dev, vni, rdst, did_rsc);
        }
 
        if (fdst)
-               vxlan_xmit_one(skb, dev, fdst, did_rsc);
+               vxlan_xmit_one(skb, dev, vni, fdst, did_rsc);
        else
                kfree_skb(skb);
        return NETDEV_TX_OK;
@@ -2307,12 +2357,12 @@ static int vxlan_init(struct net_device *dev)
        return 0;
 }
 
-static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
+static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
 {
        struct vxlan_fdb *f;
 
        spin_lock_bh(&vxlan->hash_lock);
-       f = __vxlan_find_mac(vxlan, all_zeros_mac);
+       f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
        if (f)
                vxlan_fdb_destroy(vxlan, f);
        spin_unlock_bh(&vxlan->hash_lock);
@@ -2322,7 +2372,7 @@ static void vxlan_uninit(struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
 
-       vxlan_fdb_delete_default(vxlan);
+       vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
 
        free_percpu(dev->tstats);
 }
@@ -2925,6 +2975,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
                                       NLM_F_EXCL|NLM_F_CREATE,
                                       vxlan->cfg.dst_port,
                                       vxlan->default_dst.remote_vni,
+                                      vxlan->default_dst.remote_vni,
                                       vxlan->default_dst.remote_ifindex,
                                       NTF_SELF);
                if (err)
@@ -2933,7 +2984,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
 
        err = register_netdevice(dev);
        if (err) {
-               vxlan_fdb_delete_default(vxlan);
+               vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
                return err;
        }
 
index e38ce4da3efbf8af30c89556e8a896e1c5314bea..a5045b5279d70a92c827424be3ff7869c6193dc8 100644 (file)
@@ -573,7 +573,7 @@ static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
        howmany += hdlc_rx_done(priv, budget - howmany);
 
        if (howmany < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, howmany);
                qe_setbits32(priv->uccf->p_uccm,
                             (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
        }
@@ -1175,3 +1175,4 @@ static struct platform_driver ucc_hdlc_driver = {
 };
 
 module_platform_driver(ucc_hdlc_driver);
+MODULE_LICENSE("GPL");
index 7ef49dab68556b9309293383844a530a18db3681..cff0cfadd650cbc7472a03d9baba6ca55013f62b 100644 (file)
@@ -341,7 +341,7 @@ static int sca_poll(struct napi_struct *napi, int budget)
                received = sca_rx_done(port, budget);
 
        if (received < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, received);
                enable_intr(port);
        }
 
index 9d9b4e0def2a531344c8fd3d78258e1690340350..1f6bc8791d510612e4fd9b243bbfc1aab541d8e8 100644 (file)
@@ -241,7 +241,6 @@ static struct spi_driver slic_ds26522_driver = {
        .driver = {
                   .name = "ds26522",
                   .bus = &spi_bus_type,
-                  .owner = THIS_MODULE,
                   .of_match_table = slic_ds26522_match,
                   },
        .probe = slic_ds26522_probe,
@@ -249,15 +248,4 @@ static struct spi_driver slic_ds26522_driver = {
        .id_table = slic_ds26522_id,
 };
 
-static int __init slic_ds26522_init(void)
-{
-       return spi_register_driver(&slic_ds26522_driver);
-}
-
-static void __exit slic_ds26522_exit(void)
-{
-       spi_unregister_driver(&slic_ds26522_driver);
-}
-
-module_init(slic_ds26522_init);
-module_exit(slic_ds26522_exit);
+module_spi_driver(slic_ds26522_driver);
index 70ecd82d674dcee50aef2cbf64b321959f09544a..098c814e22c8b6d7478bbc81035bb7fd9d6f4f5a 100644 (file)
@@ -413,6 +413,13 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
                                                       skb_tail_pointer(newskb),
                                                       RX_PKT_SIZE,
                                                       PCI_DMA_FROMDEVICE);
+                               if (pci_dma_mapping_error(priv->pdev,
+                                          priv->rx_buffers[entry].mapping)) {
+                                       priv->rx_buffers[entry].skb = NULL;
+                                       dev_kfree_skb(newskb);
+                                       skb = NULL;
+                                       /* TODO: update rx dropped stats */
+                               }
                        } else {
                                skb = NULL;
                                /* TODO: update rx dropped stats */
@@ -1450,6 +1457,12 @@ static int adm8211_init_rings(struct ieee80211_hw *dev)
                                                  skb_tail_pointer(rx_info->skb),
                                                  RX_PKT_SIZE,
                                                  PCI_DMA_FROMDEVICE);
+               if (pci_dma_mapping_error(priv->pdev, rx_info->mapping)) {
+                       dev_kfree_skb(rx_info->skb);
+                       rx_info->skb = NULL;
+                       break;
+               }
+
                desc->buffer1 = cpu_to_le32(rx_info->mapping);
                desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL);
        }
@@ -1613,7 +1626,7 @@ static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int
 }
 
 /* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */
-static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
+static int adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
                           u16 plcp_signal,
                           size_t hdrlen)
 {
@@ -1625,6 +1638,8 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
 
        mapping = pci_map_single(priv->pdev, skb->data, skb->len,
                                 PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(priv->pdev, mapping))
+               return -ENOMEM;
 
        spin_lock_irqsave(&priv->lock, flags);
 
@@ -1657,6 +1672,8 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
 
        /* Trigger transmit poll */
        ADM8211_CSR_WRITE(TDR, 0);
+
+       return 0;
 }
 
 /* Put adm8211_tx_hdr on skb and transmit */
@@ -1710,7 +1727,10 @@ static void adm8211_tx(struct ieee80211_hw *dev,
 
        txhdr->retry_limit = info->control.rates[0].count;
 
-       adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
+       if (adm8211_tx_raw(dev, skb, plcp_signal, hdrlen)) {
+               /* Drop packet */
+               ieee80211_free_txskb(dev, skb);
+       }
 }
 
 static int adm8211_alloc_rings(struct ieee80211_hw *dev)
@@ -1843,7 +1863,8 @@ static int adm8211_probe(struct pci_dev *pdev,
        priv->rx_ring_size = rx_ring_size;
        priv->tx_ring_size = tx_ring_size;
 
-       if (adm8211_alloc_rings(dev)) {
+       err = adm8211_alloc_rings(dev);
+       if (err) {
                printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n",
                       pci_name(pdev));
                goto err_iounmap;
index db1ca629cbd6d9c57d2fdd472c45c62fe1c5638f..b4241cf9b7ed41cad7094af9c1b56c772e66b63e 100644 (file)
@@ -3,6 +3,7 @@ config ATH10K
         depends on MAC80211 && HAS_DMA
        select ATH_COMMON
        select CRC32
+       select WANT_DEV_COREDUMP
         ---help---
           This module adds support for wireless adapters based on
           Atheros IEEE 802.11ac family of chipsets.
index 766c63bf05c4a969b774269d2ba4fbe80f629418..45226dbee5ce23c773db06c0d08ece006bcba2c0 100644 (file)
@@ -33,6 +33,9 @@ static const struct of_device_id ath10k_ahb_of_match[] = {
 
 MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match);
 
+#define QCA4019_SRAM_ADDR      0x000C0000
+#define QCA4019_SRAM_LEN       0x00040000 /* 256 kb */
+
 static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar)
 {
        return &((struct ath10k_pci *)ar->drv_priv)->ahb[0];
@@ -699,6 +702,25 @@ out:
        return ret;
 }
 
+static u32 ath10k_ahb_qca4019_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+       u32 val = 0, region = addr & 0xfffff;
+
+       val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+
+       if (region >= QCA4019_SRAM_ADDR && region <=
+           (QCA4019_SRAM_ADDR + QCA4019_SRAM_LEN)) {
+               /* SRAM contents for QCA4019 can be directly accessed and
+                * no conversions are required
+                */
+               val |= region;
+       } else {
+               val |= 0x100000 | region;
+       }
+
+       return val;
+}
+
 static const struct ath10k_hif_ops ath10k_ahb_hif_ops = {
        .tx_sg                  = ath10k_pci_hif_tx_sg,
        .diag_read              = ath10k_pci_hif_diag_read,
@@ -766,6 +788,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
        ar_pci->mem_len = ar_ahb->mem_len;
        ar_pci->ar = ar;
        ar_pci->bus_ops = &ath10k_ahb_bus_ops;
+       ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr;
 
        ret = ath10k_pci_setup_resource(ar);
        if (ret) {
index 0b4d7965988445d0e9660cafeb1a5fd6e3eb64ad..4045657e0a6ec75ee5e27e51b06fe95916f8ad77 100644 (file)
@@ -958,10 +958,10 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
         * coherent DMA are unsupported
         */
        dest_ring->base_addr_owner_space_unaligned =
-               dma_alloc_coherent(ar->dev,
-                                  (nentries * sizeof(struct ce_desc) +
-                                   CE_DESC_RING_ALIGN),
-                                  &base_addr, GFP_KERNEL);
+               dma_zalloc_coherent(ar->dev,
+                                   (nentries * sizeof(struct ce_desc) +
+                                    CE_DESC_RING_ALIGN),
+                                   &base_addr, GFP_KERNEL);
        if (!dest_ring->base_addr_owner_space_unaligned) {
                kfree(dest_ring);
                return ERR_PTR(-ENOMEM);
@@ -969,13 +969,6 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
 
        dest_ring->base_addr_ce_space_unaligned = base_addr;
 
-       /*
-        * Correctly initialize memory to 0 to prevent garbage
-        * data crashing system when download firmware
-        */
-       memset(dest_ring->base_addr_owner_space_unaligned, 0,
-              nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN);
-
        dest_ring->base_addr_owner_space = PTR_ALIGN(
                        dest_ring->base_addr_owner_space_unaligned,
                        CE_DESC_RING_ALIGN);
@@ -1130,3 +1123,42 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
        ce_state->src_ring = NULL;
        ce_state->dest_ring = NULL;
 }
+
+void ath10k_ce_dump_registers(struct ath10k *ar,
+                             struct ath10k_fw_crash_data *crash_data)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+       struct ath10k_ce_crash_data ce;
+       u32 addr, id;
+
+       lockdep_assert_held(&ar->data_lock);
+
+       ath10k_err(ar, "Copy Engine register dump:\n");
+
+       spin_lock_bh(&ar_pci->ce_lock);
+       for (id = 0; id < CE_COUNT; id++) {
+               addr = ath10k_ce_base_address(ar, id);
+               ce.base_addr = cpu_to_le32(addr);
+
+               ce.src_wr_idx =
+                       cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
+               ce.src_r_idx =
+                       cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
+               ce.dst_wr_idx =
+                       cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
+               ce.dst_r_idx =
+                       cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));
+
+               if (crash_data)
+                       crash_data->ce_crash_data[id] = ce;
+
+               ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
+                          le32_to_cpu(ce.base_addr),
+                          le32_to_cpu(ce.src_wr_idx),
+                          le32_to_cpu(ce.src_r_idx),
+                          le32_to_cpu(ce.dst_wr_idx),
+                          le32_to_cpu(ce.dst_r_idx));
+       }
+
+       spin_unlock_bh(&ar_pci->ce_lock);
+}
index dfc098606bee16be8e6e724b2aeae9a70ffd7a90..e76a98242b98fbb6bd072f766a81b7b046725201 100644 (file)
@@ -20,8 +20,6 @@
 
 #include "hif.h"
 
-/* Maximum number of Copy Engine's supported */
-#define CE_COUNT_MAX 12
 #define CE_HTT_H2T_MSG_SRC_NENTRIES 8192
 
 /* Descriptor rings must be aligned to this boundary */
@@ -228,6 +226,8 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar);
 void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
 int ath10k_ce_disable_interrupts(struct ath10k *ar);
 void ath10k_ce_enable_interrupts(struct ath10k *ar);
+void ath10k_ce_dump_registers(struct ath10k *ar,
+                             struct ath10k_fw_crash_data *crash_data);
 
 /* ce_attr.flags values */
 /* Use NonSnooping PCIe accesses? */
index 749e381edd380e945bcd7d26c4cab35eb51b3272..59729aa8cd821a5e3b6e148f4f5b02c4d493824e 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/module.h>
 #include <linux/firmware.h>
 #include <linux/of.h>
+#include <linux/dmi.h>
+#include <linux/ctype.h>
 #include <asm/byteorder.h>
 
 #include "core.h"
@@ -349,7 +351,7 @@ void ath10k_core_get_fw_features_str(struct ath10k *ar,
                                     char *buf,
                                     size_t buf_len)
 {
-       unsigned int len = 0;
+       size_t len = 0;
        int i;
 
        for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
@@ -454,7 +456,10 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
                dir = ".";
 
        snprintf(filename, sizeof(filename), "%s/%s", dir, file);
-       ret = request_firmware(&fw, filename, ar->dev);
+       ret = request_firmware_direct(&fw, filename, ar->dev);
+       ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n",
+                  filename, ret);
+
        if (ret)
                return ERR_PTR(ret);
 
@@ -694,8 +699,12 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
                   "boot get otp board id result 0x%08x board_id %d chip_id %d\n",
                   result, board_id, chip_id);
 
-       if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0)
+       if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 ||
+           (board_id == 0)) {
+               ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                          "board id does not exist in otp, ignore it\n");
                return -EOPNOTSUPP;
+       }
 
        ar->id.bmi_ids_valid = true;
        ar->id.bmi_board_id = board_id;
@@ -704,6 +713,72 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
        return 0;
 }
 
+static void ath10k_core_check_bdfext(const struct dmi_header *hdr, void *data)
+{
+       struct ath10k *ar = data;
+       const char *bdf_ext;
+       const char *magic = ATH10K_SMBIOS_BDF_EXT_MAGIC;
+       u8 bdf_enabled;
+       int i;
+
+       if (hdr->type != ATH10K_SMBIOS_BDF_EXT_TYPE)
+               return;
+
+       if (hdr->length != ATH10K_SMBIOS_BDF_EXT_LENGTH) {
+               ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                          "wrong smbios bdf ext type length (%d).\n",
+                          hdr->length);
+               return;
+       }
+
+       bdf_enabled = *((u8 *)hdr + ATH10K_SMBIOS_BDF_EXT_OFFSET);
+       if (!bdf_enabled) {
+               ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not found.\n");
+               return;
+       }
+
+       /* Only one string exists (per spec) */
+       bdf_ext = (char *)hdr + hdr->length;
+
+       if (memcmp(bdf_ext, magic, strlen(magic)) != 0) {
+               ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                          "bdf variant magic does not match.\n");
+               return;
+       }
+
+       for (i = 0; i < strlen(bdf_ext); i++) {
+               if (!isascii(bdf_ext[i]) || !isprint(bdf_ext[i])) {
+                       ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                                  "bdf variant name contains non ascii chars.\n");
+                       return;
+               }
+       }
+
+       /* Copy extension name without magic suffix */
+       if (strscpy(ar->id.bdf_ext, bdf_ext + strlen(magic),
+                   sizeof(ar->id.bdf_ext)) < 0) {
+               ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                          "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
+                           bdf_ext);
+               return;
+       }
+
+       ath10k_dbg(ar, ATH10K_DBG_BOOT,
+                  "found and validated bdf variant smbios_type 0x%x bdf %s\n",
+                  ATH10K_SMBIOS_BDF_EXT_TYPE, bdf_ext);
+}
+
+static int ath10k_core_check_smbios(struct ath10k *ar)
+{
+       ar->id.bdf_ext[0] = '\0';
+       dmi_walk(ath10k_core_check_bdfext, ar);
+
+       if (ar->id.bdf_ext[0] == '\0')
+               return -ENODATA;
+
+       return 0;
+}
+
 static int ath10k_download_and_run_otp(struct ath10k *ar)
 {
        u32 result, address = ar->hw_params.patch_load_addr;
@@ -1050,6 +1125,9 @@ err:
 static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
                                         size_t name_len)
 {
+       /* strlen(',variant=') + strlen(ar->id.bdf_ext) */
+       char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
+
        if (ar->id.bmi_ids_valid) {
                scnprintf(name, name_len,
                          "bus=%s,bmi-chip-id=%d,bmi-board-id=%d",
@@ -1059,12 +1137,15 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
                goto out;
        }
 
+       if (ar->id.bdf_ext[0] != '\0')
+               scnprintf(variant, sizeof(variant), ",variant=%s",
+                         ar->id.bdf_ext);
+
        scnprintf(name, name_len,
-                 "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x",
+                 "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
                  ath10k_bus_str(ar->hif.bus),
                  ar->id.vendor, ar->id.device,
-                 ar->id.subsystem_vendor, ar->id.subsystem_device);
-
+                 ar->id.subsystem_vendor, ar->id.subsystem_device, variant);
 out:
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name);
 
@@ -1091,7 +1172,8 @@ static int ath10k_core_fetch_board_file(struct ath10k *ar)
        ar->bd_api = 1;
        ret = ath10k_core_fetch_board_data_api_1(ar);
        if (ret) {
-               ath10k_err(ar, "failed to fetch board data\n");
+               ath10k_err(ar, "failed to fetch board-2.bin or board.bin from %s\n",
+                          ar->hw_params.fw.dir);
                return ret;
        }
 
@@ -1112,12 +1194,8 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
        /* first fetch the firmware file (firmware-*.bin) */
        fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
                                                 name);
-       if (IS_ERR(fw_file->firmware)) {
-               ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n",
-                          ar->hw_params.fw.dir, name,
-                          PTR_ERR(fw_file->firmware));
+       if (IS_ERR(fw_file->firmware))
                return PTR_ERR(fw_file->firmware);
-       }
 
        data = fw_file->firmware->data;
        len = fw_file->firmware->size;
@@ -1281,44 +1359,39 @@ err:
        return ret;
 }
 
+static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name,
+                                   size_t fw_name_len, int fw_api)
+{
+       scnprintf(fw_name, fw_name_len, "%s-%d.bin", ATH10K_FW_FILE_BASE, fw_api);
+}
+
 static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
 {
-       int ret;
+       int ret, i;
+       char fw_name[100];
 
        /* calibration file is optional, don't check for any errors */
        ath10k_fetch_cal_file(ar);
 
-       ar->fw_api = 5;
-       ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+       for (i = ATH10K_FW_API_MAX; i >= ATH10K_FW_API_MIN; i--) {
+               ar->fw_api = i;
+               ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n",
+                          ar->fw_api);
 
-       ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE,
-                                              &ar->normal_mode_fw.fw_file);
-       if (ret == 0)
-               goto success;
-
-       ar->fw_api = 4;
-       ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
-
-       ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE,
-                                              &ar->normal_mode_fw.fw_file);
-       if (ret == 0)
-               goto success;
-
-       ar->fw_api = 3;
-       ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+               ath10k_core_get_fw_name(ar, fw_name, sizeof(fw_name), ar->fw_api);
+               ret = ath10k_core_fetch_firmware_api_n(ar, fw_name,
+                                                      &ar->normal_mode_fw.fw_file);
+               if (!ret)
+                       goto success;
+       }
 
-       ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE,
-                                              &ar->normal_mode_fw.fw_file);
-       if (ret == 0)
-               goto success;
+       /* we end up here if we couldn't fetch any firmware */
 
-       ar->fw_api = 2;
-       ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+       ath10k_err(ar, "Failed to find firmware-N.bin (N between %d and %d) from %s: %d",
+                  ATH10K_FW_API_MIN, ATH10K_FW_API_MAX, ar->hw_params.fw.dir,
+                  ret);
 
-       ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE,
-                                              &ar->normal_mode_fw.fw_file);
-       if (ret)
-               return ret;
+       return ret;
 
 success:
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
@@ -1510,6 +1583,7 @@ static int ath10k_init_hw_params(struct ath10k *ar)
 static void ath10k_core_restart(struct work_struct *work)
 {
        struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+       int ret;
 
        set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
 
@@ -1561,6 +1635,11 @@ static void ath10k_core_restart(struct work_struct *work)
        }
 
        mutex_unlock(&ar->conf_mutex);
+
+       ret = ath10k_debug_fw_devcoredump(ar);
+       if (ret)
+               ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
+                           ret);
 }
 
 static void ath10k_core_set_coverage_class_work(struct work_struct *work)
@@ -1913,7 +1992,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
                   ar->hw->wiphy->fw_version);
 
-       if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) {
+       if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map) &&
+           mode == ATH10K_FIRMWARE_MODE_NORMAL) {
                val = 0;
                if (ath10k_peer_stats_enabled(ar))
                        val = WMI_10_4_PEER_STATS;
@@ -1966,10 +2046,13 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
         * possible to implicitly make it correct by creating a dummy vdev and
         * then deleting it.
         */
-       status = ath10k_core_reset_rx_filter(ar);
-       if (status) {
-               ath10k_err(ar, "failed to reset rx filter: %d\n", status);
-               goto err_hif_stop;
+       if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+               status = ath10k_core_reset_rx_filter(ar);
+               if (status) {
+                       ath10k_err(ar,
+                                  "failed to reset rx filter: %d\n", status);
+                       goto err_hif_stop;
+               }
        }
 
        /* If firmware indicates Full Rx Reorder support it must be used in a
@@ -2119,6 +2202,10 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
                goto err_free_firmware_files;
        }
 
+       ret = ath10k_core_check_smbios(ar);
+       if (ret)
+               ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not set.\n");
+
        ret = ath10k_core_fetch_board_file(ar);
        if (ret) {
                ath10k_err(ar, "failed to fetch board file: %d\n", ret);
index 09ff8b8a644116da3532b756e29b376f64545267..88d14be7fcceb44539f80c7217e4d6b0ea27dd79 100644 (file)
@@ -46,7 +46,7 @@
 #define WMI_READY_TIMEOUT (5 * HZ)
 #define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ)
 #define ATH10K_CONNECTION_LOSS_HZ (3 * HZ)
-#define ATH10K_NUM_CHANS 39
+#define ATH10K_NUM_CHANS 40
 
 /* Antenna noise floor */
 #define ATH10K_DEFAULT_NOISE_FLOOR -95
 #define ATH10K_NAPI_BUDGET      64
 #define ATH10K_NAPI_QUOTA_LIMIT 60
 
+/* SMBIOS type containing Board Data File Name Extension */
+#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8
+
+/* SMBIOS type structure length (excluding strings-set) */
+#define ATH10K_SMBIOS_BDF_EXT_LENGTH 0x9
+
+/* Offset pointing to Board Data File Name Extension */
+#define ATH10K_SMBIOS_BDF_EXT_OFFSET 0x8
+
+/* Board Data File Name Extension string length.
+ * String format: BDF_<Customer ID>_<Extension>\0
+ */
+#define ATH10K_SMBIOS_BDF_EXT_STR_LENGTH 0x20
+
+/* The magic used by QCA spec */
+#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_"
+
 struct ath10k;
 
 enum ath10k_bus {
@@ -314,6 +331,7 @@ struct ath10k_peer {
        struct ieee80211_vif *vif;
        struct ieee80211_sta *sta;
 
+       bool removed;
        int vdev_id;
        u8 addr[ETH_ALEN];
        DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
@@ -419,6 +437,21 @@ struct ath10k_vif_iter {
        struct ath10k_vif *arvif;
 };
 
+/* Copy Engine register dump, protected by ce-lock */
+struct ath10k_ce_crash_data {
+       __le32 base_addr;
+       __le32 src_wr_idx;
+       __le32 src_r_idx;
+       __le32 dst_wr_idx;
+       __le32 dst_r_idx;
+};
+
+struct ath10k_ce_crash_hdr {
+       __le32 ce_count;
+       __le32 reserved[3]; /* for future use */
+       struct ath10k_ce_crash_data entries[];
+};
+
 /* used for crash-dump storage, protected by data-lock */
 struct ath10k_fw_crash_data {
        bool crashed_since_read;
@@ -426,6 +459,7 @@ struct ath10k_fw_crash_data {
        uuid_le uuid;
        struct timespec timestamp;
        __le32 registers[REG_DUMP_COUNT_QCA988X];
+       struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX];
 };
 
 struct ath10k_debug {
@@ -781,6 +815,8 @@ struct ath10k {
                bool bmi_ids_valid;
                u8 bmi_board_id;
                u8 bmi_chip_id;
+
+               char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH];
        } id;
 
        int fw_api;
index 82a4c67f3672ba8e7f05951ee3dd6916ac1ca356..fb0ade3adb07de51d0f952ae63235efbeb4b4d44 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/utsname.h>
 #include <linux/crc32.h>
 #include <linux/firmware.h>
+#include <linux/devcoredump.h>
 
 #include "core.h"
 #include "debug.h"
@@ -40,6 +41,7 @@
  */
 enum ath10k_fw_crash_dump_type {
        ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
+       ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
 
        ATH10K_FW_CRASH_DUMP_MAX,
 };
@@ -235,7 +237,7 @@ static ssize_t ath10k_read_wmi_services(struct file *file,
 {
        struct ath10k *ar = file->private_data;
        char *buf;
-       unsigned int len = 0, buf_len = 4096;
+       size_t len = 0, buf_len = 4096;
        const char *name;
        ssize_t ret_cnt;
        bool enabled;
@@ -399,6 +401,7 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
                         * prevent firmware from DoS-ing the host.
                         */
                        ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+                       ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd);
                        ath10k_warn(ar, "dropping fw peer stats\n");
                        goto free;
                }
@@ -409,10 +412,12 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
                        goto free;
                }
 
+               if (!list_empty(&stats.peers))
+                       list_splice_tail_init(&stats.peers_extd,
+                                             &ar->debug.fw_stats.peers_extd);
+
                list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
                list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs);
-               list_splice_tail_init(&stats.peers_extd,
-                                     &ar->debug.fw_stats.peers_extd);
        }
 
        complete(&ar->debug.fw_stats_complete);
@@ -524,7 +529,7 @@ static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf,
                                    size_t count, loff_t *ppos)
 {
        const char *buf = file->private_data;
-       unsigned int len = strlen(buf);
+       size_t len = strlen(buf);
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
@@ -542,17 +547,16 @@ static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file,
                                                size_t count, loff_t *ppos)
 {
        struct ath10k *ar = file->private_data;
-       int ret, len, buf_len;
+       int ret;
+       size_t len = 0, buf_len = 500;
        char *buf;
 
-       buf_len = 500;
        buf = kmalloc(buf_len, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
 
        spin_lock_bh(&ar->data_lock);
 
-       len = 0;
        len += scnprintf(buf + len, buf_len - len,
                         "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter);
        len += scnprintf(buf + len, buf_len - len,
@@ -691,7 +695,7 @@ static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
                                   size_t count, loff_t *ppos)
 {
        struct ath10k *ar = file->private_data;
-       unsigned int len;
+       size_t len;
        char buf[50];
 
        len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
@@ -721,17 +725,21 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
 }
 EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data);
 
-static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar)
+static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar,
+                                                           bool mark_read)
 {
        struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
+       struct ath10k_ce_crash_hdr *ce_hdr;
        struct ath10k_dump_file_data *dump_data;
        struct ath10k_tlv_dump_data *dump_tlv;
-       int hdr_len = sizeof(*dump_data);
-       unsigned int len, sofar = 0;
+       size_t hdr_len = sizeof(*dump_data);
+       size_t len, sofar = 0;
        unsigned char *buf;
 
        len = hdr_len;
        len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+       len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+               CE_COUNT * sizeof(ce_hdr->entries[0]);
 
        sofar += hdr_len;
 
@@ -790,19 +798,66 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar)
               sizeof(crash_data->registers));
        sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
 
-       ar->debug.fw_crash_data->crashed_since_read = false;
+       dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+       dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
+       dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
+                                       CE_COUNT * sizeof(ce_hdr->entries[0]));
+       ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
+       ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
+       memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
+       memcpy(ce_hdr->entries, crash_data->ce_crash_data,
+              CE_COUNT * sizeof(ce_hdr->entries[0]));
+       sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+                CE_COUNT * sizeof(ce_hdr->entries[0]);
+
+       ar->debug.fw_crash_data->crashed_since_read = !mark_read;
 
        spin_unlock_bh(&ar->data_lock);
 
        return dump_data;
 }
 
+int ath10k_debug_fw_devcoredump(struct ath10k *ar)
+{
+       struct ath10k_dump_file_data *dump;
+       void *dump_ptr;
+       u32 dump_len;
+
+       /* To keep the dump file available also for debugfs don't mark the
+        * file read, only debugfs should do that.
+        */
+       dump = ath10k_build_dump_file(ar, false);
+       if (!dump) {
+               ath10k_warn(ar, "no crash dump data found for devcoredump");
+               return -ENODATA;
+       }
+
+       /* Make a copy of the dump file for dev_coredumpv() as during the
+        * transition period we need to own the original file. Once
+        * fw_crash_dump debugfs file is removed no need to have a copy
+        * anymore.
+        */
+       dump_len = le32_to_cpu(dump->len);
+       dump_ptr = vzalloc(dump_len);
+
+       if (!dump_ptr)
+               return -ENOMEM;
+
+       memcpy(dump_ptr, dump, dump_len);
+
+       dev_coredumpv(ar->dev, dump_ptr, dump_len, GFP_KERNEL);
+
+       return 0;
+}
+
 static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file)
 {
        struct ath10k *ar = inode->i_private;
        struct ath10k_dump_file_data *dump;
 
-       dump = ath10k_build_dump_file(ar);
+       ath10k_warn(ar, "fw_crash_dump debugfs file is deprecated, please use /sys/class/devcoredump instead.");
+
+       dump = ath10k_build_dump_file(ar, true);
        if (!dump)
                return -ENODATA;
 
@@ -844,7 +899,7 @@ static ssize_t ath10k_reg_addr_read(struct file *file,
 {
        struct ath10k *ar = file->private_data;
        u8 buf[32];
-       unsigned int len = 0;
+       size_t len = 0;
        u32 reg_addr;
 
        mutex_lock(&ar->conf_mutex);
@@ -892,7 +947,7 @@ static ssize_t ath10k_reg_value_read(struct file *file,
 {
        struct ath10k *ar = file->private_data;
        u8 buf[48];
-       unsigned int len;
+       size_t len;
        u32 reg_addr, reg_val;
        int ret;
 
@@ -1115,7 +1170,7 @@ static ssize_t ath10k_read_htt_stats_mask(struct file *file,
 {
        struct ath10k *ar = file->private_data;
        char buf[32];
-       unsigned int len;
+       size_t len;
 
        len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
 
@@ -1169,7 +1224,7 @@ static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file,
        struct ath10k *ar = file->private_data;
        char buf[64];
        u8 amsdu, ampdu;
-       unsigned int len;
+       size_t len;
 
        mutex_lock(&ar->conf_mutex);
 
@@ -1229,7 +1284,7 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file,
                                     size_t count, loff_t *ppos)
 {
        struct ath10k *ar = file->private_data;
-       unsigned int len;
+       size_t len;
        char buf[96];
 
        len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n",
@@ -1555,11 +1610,10 @@ static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf,
                                      size_t count, loff_t *ppos)
 {
        struct ath10k *ar = file->private_data;
-       int len = 0;
+       size_t len;
        char buf[32];
 
-       len = scnprintf(buf, sizeof(buf) - len, "%d\n",
-                       ar->ani_enabled);
+       len = scnprintf(buf, sizeof(buf), "%d\n", ar->ani_enabled);
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
@@ -1584,11 +1638,10 @@ static ssize_t ath10k_read_nf_cal_period(struct file *file,
                                         size_t count, loff_t *ppos)
 {
        struct ath10k *ar = file->private_data;
-       unsigned int len;
+       size_t len;
        char buf[32];
 
-       len = scnprintf(buf, sizeof(buf), "%d\n",
-                       ar->debug.nf_cal_period);
+       len = scnprintf(buf, sizeof(buf), "%d\n", ar->debug.nf_cal_period);
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
@@ -1684,9 +1737,10 @@ void ath10k_debug_tpc_stats_process(struct ath10k *ar,
 }
 
 static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats,
-                                  unsigned int j, char *buf, unsigned int *len)
+                                  unsigned int j, char *buf, size_t *len)
 {
-       unsigned int i, buf_len;
+       int i;
+       size_t buf_len;
        static const char table_str[][5] = { "CDD",
                                             "STBC",
                                             "TXBF" };
@@ -1726,7 +1780,8 @@ static void ath10k_tpc_stats_fill(struct ath10k *ar,
                                  struct ath10k_tpc_stats *tpc_stats,
                                  char *buf)
 {
-       unsigned int len, j, buf_len;
+       int j;
+       size_t len, buf_len;
 
        len = 0;
        buf_len = ATH10K_TPC_CONFIG_BUF_SIZE;
@@ -1860,7 +1915,7 @@ static ssize_t ath10k_tpc_stats_read(struct file *file, char __user *user_buf,
                                     size_t count, loff_t *ppos)
 {
        const char *buf = file->private_data;
-       unsigned int len = strlen(buf);
+       size_t len = strlen(buf);
 
        return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
@@ -2284,7 +2339,7 @@ static ssize_t ath10k_debug_fw_checksums_read(struct file *file,
                                              size_t count, loff_t *ppos)
 {
        struct ath10k *ar = file->private_data;
-       unsigned int len = 0, buf_len = 4096;
+       size_t len = 0, buf_len = 4096;
        ssize_t ret_cnt;
        char *buf;
 
@@ -2500,7 +2555,7 @@ void ath10k_dbg_dump(struct ath10k *ar,
                     const void *buf, size_t len)
 {
        char linebuf[256];
-       unsigned int linebuflen;
+       size_t linebuflen;
        const void *ptr;
 
        if (ath10k_debug_mask & mask) {
index 335512b11ca2907ddc22dc7061a7730dd35c32af..2368f47314ae81994d337f948a685c55477c2b85 100644 (file)
@@ -84,6 +84,9 @@ struct ath10k_fw_crash_data *
 ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
 
 void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
+
+int ath10k_debug_fw_devcoredump(struct ath10k *ar);
+
 #define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
 
 void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
@@ -166,6 +169,11 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
        return 0;
 }
 
+static inline int ath10k_debug_fw_devcoredump(struct ath10k *ar)
+{
+       return 0;
+}
+
 #define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
 
 #define ath10k_debug_get_et_strings NULL
index fce6f8137d3330f56b0f5a79128cb552b94c55fa..7353e7ea88f13f3e804213f1aa2ab8f34e41557a 100644 (file)
@@ -306,6 +306,69 @@ static const struct file_operations fops_delba = {
        .llseek = default_llseek,
 };
 
+static ssize_t ath10k_dbg_sta_read_peer_debug_trigger(struct file *file,
+                                                     char __user *user_buf,
+                                                     size_t count,
+                                                     loff_t *ppos)
+{
+       struct ieee80211_sta *sta = file->private_data;
+       struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+       struct ath10k *ar = arsta->arvif->ar;
+       char buf[8];
+       int len = 0;
+
+       mutex_lock(&ar->conf_mutex);
+       len = scnprintf(buf, sizeof(buf) - len,
+                       "Write 1 to once trigger the debug logs\n");
+       mutex_unlock(&ar->conf_mutex);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
+                                       const char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct ieee80211_sta *sta = file->private_data;
+       struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+       struct ath10k *ar = arsta->arvif->ar;
+       u8 peer_debug_trigger;
+       int ret;
+
+       if (kstrtou8_from_user(user_buf, count, 0, &peer_debug_trigger))
+               return -EINVAL;
+
+       if (peer_debug_trigger != 1)
+               return -EINVAL;
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (ar->state != ATH10K_STATE_ON) {
+               ret = -ENETDOWN;
+               goto out;
+       }
+
+       ret = ath10k_wmi_peer_set_param(ar, arsta->arvif->vdev_id, sta->addr,
+                                       WMI_PEER_DEBUG, peer_debug_trigger);
+       if (ret) {
+               ath10k_warn(ar, "failed to set param to trigger peer tid logs for station ret: %d\n",
+                           ret);
+               goto out;
+       }
+out:
+       mutex_unlock(&ar->conf_mutex);
+       return count;
+}
+
+static const struct file_operations fops_peer_debug_trigger = {
+       .open = simple_open,
+       .read = ath10k_dbg_sta_read_peer_debug_trigger,
+       .write = ath10k_dbg_sta_write_peer_debug_trigger,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
 void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                            struct ieee80211_sta *sta, struct dentry *dir)
 {
@@ -314,4 +377,6 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
        debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
        debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
+       debugfs_create_file("peer_debug_trigger", 0600, dir, sta,
+                           &fops_peer_debug_trigger);
 }
index 175aae38c3757a9d0118528e1c277ad1a4c98d7f..9f6a915f91bfe842a1672b81a8b6046c7f9d4f1b 100644 (file)
@@ -474,33 +474,16 @@ static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
        }
 }
 
-static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc)
-{
-       struct ath10k_htc_svc_tx_credits *entry;
-
-       entry = &htc->service_tx_alloc[0];
-
-       /*
-        * for PCIE allocate all credists/HTC buffers to WMI.
-        * no buffers are used/required for data. data always
-        * remains on host.
-        */
-       entry++;
-       entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
-       entry->credit_allocation = htc->total_transmit_credits;
-}
-
 static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
                                           u16 service_id)
 {
        u8 allocation = 0;
-       int i;
 
-       for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
-               if (htc->service_tx_alloc[i].service_id == service_id)
-                       allocation =
-                           htc->service_tx_alloc[i].credit_allocation;
-       }
+       /* The WMI control service is the only service with flow control.
+        * Let it have all transmit credits.
+        */
+       if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL)
+               allocation = htc->total_transmit_credits;
 
        return allocation;
 }
@@ -574,8 +557,6 @@ int ath10k_htc_wait_target(struct ath10k_htc *htc)
                return -ECOMM;
        }
 
-       ath10k_htc_setup_target_buffer_assignments(htc);
-
        /* setup our pseudo HTC control endpoint connection */
        memset(&conn_req, 0, sizeof(conn_req));
        memset(&conn_resp, 0, sizeof(conn_resp));
@@ -726,12 +707,6 @@ setup:
        ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
        ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
        ep->tx_credits = tx_alloc;
-       ep->tx_credit_size = htc->target_credit_size;
-       ep->tx_credits_per_max_message = ep->max_ep_message_len /
-                                        htc->target_credit_size;
-
-       if (ep->max_ep_message_len % htc->target_credit_size)
-               ep->tx_credits_per_max_message++;
 
        /* copy all the callbacks */
        ep->ep_ops = conn_req->ep_ops;
index 0c55cd92a951722d72f9cfc79c96da4e62cdc676..6ababa345e2b94c169817f72cde649e235f1efc5 100644 (file)
@@ -314,8 +314,6 @@ struct ath10k_htc_ep {
 
        u8 seq_no; /* for debugging */
        int tx_credits;
-       int tx_credit_size;
-       int tx_credits_per_max_message;
        bool tx_credit_flow_enabled;
 };
 
@@ -339,7 +337,6 @@ struct ath10k_htc {
        struct completion ctl_resp;
 
        int total_transmit_credits;
-       struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
        int target_credit_size;
 };
 
index 44b25cf00553eb8380e0aa885c522fb370d5f079..90c2f72666b831b5fb88bee6440b517a50e3bb45 100644 (file)
@@ -1636,7 +1636,7 @@ struct ath10k_htt {
                int size;
 
                /* size - 1 */
-               unsigned size_mask;
+               unsigned int size_mask;
 
                /* how many rx buffers to keep in the ring */
                int fill_level;
@@ -1657,7 +1657,7 @@ struct ath10k_htt {
 
                /* where HTT SW has processed bufs filled by rx MAC DMA */
                struct {
-                       unsigned msdu_payld;
+                       unsigned int msdu_payld;
                } sw_rd_idx;
 
                /*
@@ -1820,7 +1820,7 @@ int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
 
 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
-int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
 int ath10k_htt_tx(struct ath10k_htt *htt,
                  enum ath10k_hw_txrx_mode txmode,
                  struct sk_buff *msdu);
index 86d082cf4eef06f607dc8e32f790721f63bf3b77..02a3fc81fbe3bc8204aec31c99890559fe15b708 100644 (file)
@@ -702,6 +702,10 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
                /* 80MHZ */
                case 2:
                        status->vht_flag |= RX_VHT_FLAG_80MHZ;
+                       break;
+               case 3:
+                       status->vht_flag |= RX_VHT_FLAG_160MHZ;
+                       break;
                }
 
                status->flag |= RX_FLAG_VHT;
@@ -926,7 +930,7 @@ static void ath10k_process_rx(struct ath10k *ar,
        *status = *rx_status;
 
        ath10k_dbg(ar, ATH10K_DBG_DATA,
-                  "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
+                  "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
                   skb,
                   skb->len,
                   ieee80211_get_SA(hdr),
@@ -940,6 +944,7 @@ static void ath10k_process_rx(struct ath10k *ar,
                   status->flag & RX_FLAG_VHT ? "vht" : "",
                   status->flag & RX_FLAG_40MHZ ? "40" : "",
                   status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
+                  status->vht_flag & RX_VHT_FLAG_160MHZ ? "160" : "",
                   status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
                   status->rate_idx,
                   status->vht_nss,
@@ -2231,6 +2236,8 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
                return;
        }
 
+       memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+
        if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
            txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
                rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
@@ -2245,7 +2252,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
                rate *= 10;
                if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
                        rate = rate - 5;
-               arsta->txrate.legacy = rate * 10;
+               arsta->txrate.legacy = rate;
        } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
                arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
                arsta->txrate.mcs = txrate.mcs;
@@ -2451,8 +2458,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
                u32 freq = __le32_to_cpu(resp->chan_change.freq);
 
-               ar->tgt_oper_chan =
-                       __ieee80211_get_channel(ar->hw->wiphy, freq);
+               ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
                ath10k_dbg(ar, ATH10K_DBG_HTT,
                           "htt chan change freq %u phymode %s\n",
                           freq, ath10k_wmi_phymode_str(phymode));
@@ -2486,7 +2492,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
                ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
                                skb->data, skb->len);
                break;
-       };
+       }
        return true;
 }
 EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
index 27e49db4287a526430588449c89ae5ee4c1048f5..86b427f5e2bcbc5cd8eb83ae5ddc12773bdff59b 100644 (file)
@@ -239,6 +239,7 @@ static void ath10k_htt_tx_free_cont_txbuf(struct ath10k_htt *htt)
 
        size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
        dma_free_coherent(ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr);
+       htt->txbuf.vaddr = NULL;
 }
 
 static int ath10k_htt_tx_alloc_cont_txbuf(struct ath10k_htt *htt)
@@ -268,6 +269,7 @@ static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt)
                          size,
                          htt->frag_desc.vaddr,
                          htt->frag_desc.paddr);
+       htt->frag_desc.vaddr = NULL;
 }
 
 static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
index 883547f3347c2564e480c531e2534abd603d5ba0..f0fda0f2b3b487147dc6eb147eb55b8b4287b15a 100644 (file)
@@ -128,6 +128,10 @@ enum qca9377_chip_id_rev {
 #define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
 #define QCA4019_HW_1_0_PATCH_LOAD_ADDR  0x1234
 
+#define ATH10K_FW_FILE_BASE            "firmware"
+#define ATH10K_FW_API_MAX              5
+#define ATH10K_FW_API_MIN              2
+
 #define ATH10K_FW_API2_FILE            "firmware-2.bin"
 #define ATH10K_FW_API3_FILE            "firmware-3.bin"
 
@@ -512,7 +516,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
 /* Target specific defines for WMI-TLV firmware */
 #define TARGET_TLV_NUM_VDEVS                   4
 #define TARGET_TLV_NUM_STATIONS                        32
-#define TARGET_TLV_NUM_PEERS                   35
+#define TARGET_TLV_NUM_PEERS                   33
 #define TARGET_TLV_NUM_TDLS_VDEVS              1
 #define TARGET_TLV_NUM_TIDS                    ((TARGET_TLV_NUM_PEERS) * 2)
 #define TARGET_TLV_NUM_MSDU_DESC               (1024 + 32)
@@ -578,6 +582,9 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
 #define TARGET_10_4_IPHDR_PAD_CONFIG           1
 #define TARGET_10_4_QWRAP_CONFIG               0
 
+/* Maximum number of Copy Engine's supported */
+#define CE_COUNT_MAX 12
+
 /* Number of Copy Engines supported */
 #define CE_COUNT ar->hw_values->ce_count
 
index aa545a1dbdc71931fdd588a05acd01fa8ee9d1d1..3029f257a19a5988e13368a3922e9e55fc961258 100644 (file)
@@ -569,10 +569,14 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
                case NL80211_CHAN_WIDTH_80:
                        phymode = MODE_11AC_VHT80;
                        break;
+               case NL80211_CHAN_WIDTH_160:
+                       phymode = MODE_11AC_VHT160;
+                       break;
+               case NL80211_CHAN_WIDTH_80P80:
+                       phymode = MODE_11AC_VHT80_80;
+                       break;
                case NL80211_CHAN_WIDTH_5:
                case NL80211_CHAN_WIDTH_10:
-               case NL80211_CHAN_WIDTH_80P80:
-               case NL80211_CHAN_WIDTH_160:
                        phymode = MODE_UNKNOWN;
                        break;
                }
@@ -971,6 +975,7 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
        arg.vdev_id = vdev_id;
        arg.channel.freq = channel->center_freq;
        arg.channel.band_center_freq1 = chandef->center_freq1;
+       arg.channel.band_center_freq2 = chandef->center_freq2;
 
        /* TODO setup this dynamically, what in case we
           don't have any vifs? */
@@ -1227,6 +1232,36 @@ static int ath10k_monitor_recalc(struct ath10k *ar)
                return ath10k_monitor_stop(ar);
 }
 
+static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       if (!arvif->is_started) {
+               ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n");
+               return false;
+       }
+
+       return true;
+}
+
+static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif)
+{
+       struct ath10k *ar = arvif->ar;
+       u32 vdev_param;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       vdev_param = ar->wmi.vdev_param->protection_mode;
+
+       ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n",
+                  arvif->vdev_id, arvif->use_cts_prot);
+
+       return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+                                        arvif->use_cts_prot ? 1 : 0);
+}
+
 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
 {
        struct ath10k *ar = arvif->ar;
@@ -1245,6 +1280,9 @@ static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
                rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
                              WMI_RTSCTS_PROFILE);
 
+       ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
+                  arvif->vdev_id, rts_cts);
+
        return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
                                         rts_cts);
 }
@@ -1384,6 +1422,7 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
 
        arg.channel.freq = chandef->chan->center_freq;
        arg.channel.band_center_freq1 = chandef->center_freq1;
+       arg.channel.band_center_freq2 = chandef->center_freq2;
        arg.channel.mode = chan_to_phymode(chandef);
 
        arg.channel.min_power = 0;
@@ -1954,7 +1993,7 @@ static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
 {
        struct sk_buff *skb = data;
        struct ieee80211_mgmt *mgmt = (void *)skb->data;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
 
        if (vif->type != NL80211_IFTYPE_STATION)
                return;
@@ -1977,7 +2016,7 @@ static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
                                               struct ieee80211_vif *vif)
 {
        u32 *vdev_id = data;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct ath10k *ar = arvif->ar;
        struct ieee80211_hw *hw = ar->hw;
 
@@ -2044,7 +2083,7 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
                                      struct ieee80211_sta *sta,
                                      struct wmi_peer_assoc_complete_arg *arg)
 {
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        u32 aid;
 
        lockdep_assert_held(&ar->conf_mutex);
@@ -2120,7 +2159,7 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
                                      struct ieee80211_sta *sta,
                                      struct wmi_peer_assoc_complete_arg *arg)
 {
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
        struct cfg80211_chan_def def;
        const struct ieee80211_supported_band *sband;
@@ -2183,7 +2222,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
                                   struct wmi_peer_assoc_complete_arg *arg)
 {
        const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct cfg80211_chan_def def;
        enum nl80211_band band;
        const u8 *ht_mcs_mask;
@@ -2407,7 +2446,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
                                    struct wmi_peer_assoc_complete_arg *arg)
 {
        const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct cfg80211_chan_def def;
        enum nl80211_band band;
        const u16 *vht_mcs_mask;
@@ -2447,6 +2486,9 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
        if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
                arg->peer_flags |= ar->wmi.peer_flags->bw80;
 
+       if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+               arg->peer_flags |= ar->wmi.peer_flags->bw160;
+
        arg->peer_vht_rates.rx_max_rate =
                __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
        arg->peer_vht_rates.rx_mcs_set =
@@ -2465,7 +2507,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
                                    struct ieee80211_sta *sta,
                                    struct wmi_peer_assoc_complete_arg *arg)
 {
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
 
        switch (arvif->vdev_type) {
        case WMI_VDEV_TYPE_AP:
@@ -2500,12 +2542,39 @@ static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
               ATH10K_MAC_FIRST_OFDM_RATE_IDX;
 }
 
+static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar,
+                                                   struct ieee80211_sta *sta)
+{
+       if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+               switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+               case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+                       return MODE_11AC_VHT160;
+               case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
+                       return MODE_11AC_VHT80_80;
+               default:
+                       /* not sure if this is a valid case? */
+                       return MODE_11AC_VHT160;
+               }
+       }
+
+       if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+               return MODE_11AC_VHT80;
+
+       if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+               return MODE_11AC_VHT40;
+
+       if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+               return MODE_11AC_VHT20;
+
+       return MODE_UNKNOWN;
+}
+
 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
                                        struct ieee80211_vif *vif,
                                        struct ieee80211_sta *sta,
                                        struct wmi_peer_assoc_complete_arg *arg)
 {
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct cfg80211_chan_def def;
        enum nl80211_band band;
        const u8 *ht_mcs_mask;
@@ -2546,12 +2615,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
                 */
                if (sta->vht_cap.vht_supported &&
                    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
-                       if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
-                               phymode = MODE_11AC_VHT80;
-                       else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
-                               phymode = MODE_11AC_VHT40;
-                       else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
-                               phymode = MODE_11AC_VHT20;
+                       phymode = ath10k_mac_get_phymode_vht(ar, sta);
                } else if (sta->ht_cap.ht_supported &&
                           !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
                        if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
@@ -2625,7 +2689,7 @@ static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
                                      struct ieee80211_vif *vif,
                                      struct ieee80211_sta_vht_cap vht_cap)
 {
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        int ret;
        u32 param;
        u32 value;
@@ -2692,7 +2756,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
                             struct ieee80211_bss_conf *bss_conf)
 {
        struct ath10k *ar = hw->priv;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct ieee80211_sta_ht_cap ht_cap;
        struct ieee80211_sta_vht_cap vht_cap;
        struct wmi_peer_assoc_complete_arg peer_arg;
@@ -2785,7 +2849,7 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
                                struct ieee80211_vif *vif)
 {
        struct ath10k *ar = hw->priv;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct ieee80211_sta_vht_cap vht_cap = {};
        int ret;
 
@@ -2818,7 +2882,7 @@ static int ath10k_station_assoc(struct ath10k *ar,
                                struct ieee80211_sta *sta,
                                bool reassoc)
 {
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct wmi_peer_assoc_complete_arg peer_arg;
        int ret = 0;
 
@@ -2885,7 +2949,7 @@ static int ath10k_station_disassoc(struct ath10k *ar,
                                   struct ieee80211_vif *vif,
                                   struct ieee80211_sta *sta)
 {
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        int ret = 0;
 
        lockdep_assert_held(&ar->conf_mutex);
@@ -3111,7 +3175,7 @@ static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
                                      struct ieee80211_vif *vif)
 {
        struct ath10k *ar = data;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
 
        if (arvif->tx_paused)
                return;
@@ -3198,7 +3262,7 @@ struct ath10k_mac_tx_pause {
 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
                                            struct ieee80211_vif *vif)
 {
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct ath10k_mac_tx_pause *arg = data;
 
        if (arvif->vdev_id != arg->vdev_id)
@@ -3294,7 +3358,7 @@ static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
                return false;
 
        if (vif)
-               return !ath10k_vif_to_arvif(vif)->nohwcrypt;
+               return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt;
 
        return true;
 }
@@ -3359,7 +3423,7 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
                                       struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
 
        /* This is case only for P2P_GO */
        if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
@@ -3495,7 +3559,6 @@ static int ath10k_mac_tx_submit(struct ath10k *ar,
  */
 static int ath10k_mac_tx(struct ath10k *ar,
                         struct ieee80211_vif *vif,
-                        struct ieee80211_sta *sta,
                         enum ath10k_hw_txrx_mode txmode,
                         enum ath10k_mac_tx_path txpath,
                         struct sk_buff *skb)
@@ -3637,7 +3700,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
                txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
                txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
 
-               ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+               ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
                if (ret) {
                        ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
                                    ret);
@@ -3742,6 +3805,9 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
        if (!peer)
                return NULL;
 
+       if (peer->removed)
+               return NULL;
+
        if (peer->sta)
                return peer->sta->txq[tid];
        else if (peer->vif)
@@ -3824,7 +3890,7 @@ int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
                spin_unlock_bh(&ar->htt.tx_lock);
        }
 
-       ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+       ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
        if (unlikely(ret)) {
                ath10k_warn(ar, "failed to push frame: %d\n", ret);
 
@@ -4105,7 +4171,7 @@ static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
                spin_unlock_bh(&ar->htt.tx_lock);
        }
 
-       ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+       ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb);
        if (ret) {
                ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
                if (is_htt) {
@@ -4279,6 +4345,13 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
                vht_cap.cap |= val;
        }
 
+       /* Currently the firmware seems to be buggy, don't enable 80+80
+        * mode until that's resolved.
+        */
+       if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) &&
+           !(ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
+               vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+
        mcs_map = 0;
        for (i = 0; i < 8; i++) {
                if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
@@ -4669,7 +4742,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar)
        lockdep_assert_held(&ar->conf_mutex);
 
        list_for_each_entry(arvif, &ar->arvifs, list) {
-               WARN_ON(arvif->txpower < 0);
+               if (arvif->txpower <= 0)
+                       continue;
 
                if (txpower == -1)
                        txpower = arvif->txpower;
@@ -4677,8 +4751,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar)
                        txpower = min(txpower, arvif->txpower);
        }
 
-       if (WARN_ON(txpower == -1))
-               return -EINVAL;
+       if (txpower == -1)
+               return 0;
 
        ret = ath10k_mac_txpower_setup(ar, txpower);
        if (ret) {
@@ -4775,7 +4849,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
                                struct ieee80211_vif *vif)
 {
        struct ath10k *ar = hw->priv;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct ath10k_peer *peer;
        enum wmi_sta_powersave_param param;
        int ret = 0;
@@ -5111,7 +5185,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
                                    struct ieee80211_vif *vif)
 {
        struct ath10k *ar = hw->priv;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct ath10k_peer *peer;
        int ret;
        int i;
@@ -5194,6 +5268,10 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
                        ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
        }
 
+       ret = ath10k_mac_txpower_recalc(ar);
+       if (ret)
+               ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+
        spin_lock_bh(&ar->htt.tx_lock);
        ath10k_mac_vif_tx_unlock_all(arvif);
        spin_unlock_bh(&ar->htt.tx_lock);
@@ -5242,7 +5320,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
                                    u32 changed)
 {
        struct ath10k *ar = hw->priv;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        int ret = 0;
        u32 vdev_param, pdev_param, slottime, preamble;
 
@@ -5328,20 +5406,18 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
 
        if (changed & BSS_CHANGED_ERP_CTS_PROT) {
                arvif->use_cts_prot = info->use_cts_prot;
-               ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
-                          arvif->vdev_id, info->use_cts_prot);
 
                ret = ath10k_recalc_rtscts_prot(arvif);
                if (ret)
                        ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
                                    arvif->vdev_id, ret);
 
-               vdev_param = ar->wmi.vdev_param->protection_mode;
-               ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
-                                               info->use_cts_prot ? 1 : 0);
-               if (ret)
-                       ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
-                                   info->use_cts_prot, arvif->vdev_id, ret);
+               if (ath10k_mac_can_set_cts_prot(arvif)) {
+                       ret = ath10k_mac_set_cts_prot(arvif);
+                       if (ret)
+                               ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
+                                           arvif->vdev_id, ret);
+               }
        }
 
        if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -5436,7 +5512,7 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
                          struct ieee80211_scan_request *hw_req)
 {
        struct ath10k *ar = hw->priv;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct cfg80211_scan_request *req = &hw_req->req;
        struct wmi_start_scan_arg arg;
        int ret = 0;
@@ -5568,7 +5644,7 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                          struct ieee80211_key_conf *key)
 {
        struct ath10k *ar = hw->priv;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct ath10k_peer *peer;
        const u8 *peer_addr;
        bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
@@ -5707,7 +5783,7 @@ static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
                                           int keyidx)
 {
        struct ath10k *ar = hw->priv;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        int ret;
 
        mutex_lock(&arvif->ar->conf_mutex);
@@ -5888,7 +5964,7 @@ static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
                                            struct ieee80211_vif *vif)
 {
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        int *num_tdls_vifs = data;
 
        if (vif->type != NL80211_IFTYPE_STATION)
@@ -5916,7 +5992,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
                            enum ieee80211_sta_state new_state)
 {
        struct ath10k *ar = hw->priv;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
        struct ath10k_peer *peer;
        int ret = 0;
@@ -6151,7 +6227,7 @@ exit:
 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
                                u16 ac, bool enable)
 {
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct wmi_sta_uapsd_auto_trig_arg arg = {};
        u32 prio = 0, acc = 0;
        u32 value = 0;
@@ -6259,7 +6335,7 @@ static int ath10k_conf_tx(struct ieee80211_hw *hw,
                          const struct ieee80211_tx_queue_params *params)
 {
        struct ath10k *ar = hw->priv;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct wmi_wmm_params_arg *p = NULL;
        int ret;
 
@@ -6333,7 +6409,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
                                    enum ieee80211_roc_type type)
 {
        struct ath10k *ar = hw->priv;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct wmi_start_scan_arg arg;
        int ret = 0;
        u32 scan_time_msec;
@@ -6833,7 +6909,7 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
                                          struct ieee80211_vif *vif,
                                          const struct cfg80211_bitrate_mask *mask)
 {
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct cfg80211_chan_def def;
        struct ath10k *ar = arvif->ar;
        enum nl80211_band band;
@@ -6934,6 +7010,9 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
                        bw = WMI_PEER_CHWIDTH_80MHZ;
                        break;
                case IEEE80211_STA_RX_BW_160:
+                       bw = WMI_PEER_CHWIDTH_160MHZ;
+                       break;
+               default:
                        ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
                                    sta->bandwidth, sta->addr);
                        bw = WMI_PEER_CHWIDTH_20MHZ;
@@ -6981,7 +7060,7 @@ static void ath10k_offset_tsf(struct ieee80211_hw *hw,
                              struct ieee80211_vif *vif, s64 tsf_offset)
 {
        struct ath10k *ar = hw->priv;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        u32 offset, vdev_param;
        int ret;
 
@@ -7006,7 +7085,7 @@ static int ath10k_ampdu_action(struct ieee80211_hw *hw,
                               struct ieee80211_ampdu_params *params)
 {
        struct ath10k *ar = hw->priv;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct ieee80211_sta *sta = params->sta;
        enum ieee80211_ampdu_mlme_action action = params->action;
        u16 tid = params->tid;
@@ -7104,7 +7183,7 @@ ath10k_mac_update_vif_chan(struct ath10k *ar,
                ath10k_monitor_stop(ar);
 
        for (i = 0; i < n_vifs; i++) {
-               arvif = ath10k_vif_to_arvif(vifs[i].vif);
+               arvif = (void *)vifs[i].vif->drv_priv;
 
                ath10k_dbg(ar, ATH10K_DBG_MAC,
                           "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
@@ -7137,7 +7216,7 @@ ath10k_mac_update_vif_chan(struct ath10k *ar,
        spin_unlock_bh(&ar->data_lock);
 
        for (i = 0; i < n_vifs; i++) {
-               arvif = ath10k_vif_to_arvif(vifs[i].vif);
+               arvif = (void *)vifs[i].vif->drv_priv;
 
                if (WARN_ON(!arvif->is_started))
                        continue;
@@ -7364,6 +7443,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
                arvif->is_up = true;
        }
 
+       if (ath10k_mac_can_set_cts_prot(arvif)) {
+               ret = ath10k_mac_set_cts_prot(arvif);
+               if (ret)
+                       ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
+                                   arvif->vdev_id, ret);
+       }
+
        mutex_unlock(&ar->conf_mutex);
        return 0;
 
@@ -7434,6 +7520,20 @@ ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
        return 0;
 }
 
+static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+                                            struct ieee80211_vif *vif,
+                                            struct ieee80211_sta *sta)
+{
+       struct ath10k *ar;
+       struct ath10k_peer *peer;
+
+       ar = hw->priv;
+
+       list_for_each_entry(peer, &ar->peers, list)
+               if (peer->sta == sta)
+                       peer->removed = true;
+}
+
 static const struct ieee80211_ops ath10k_ops = {
        .tx                             = ath10k_mac_op_tx,
        .wake_tx_queue                  = ath10k_mac_op_wake_tx_queue,
@@ -7474,6 +7574,7 @@ static const struct ieee80211_ops ath10k_ops = {
        .assign_vif_chanctx             = ath10k_mac_op_assign_vif_chanctx,
        .unassign_vif_chanctx           = ath10k_mac_op_unassign_vif_chanctx,
        .switch_vif_chanctx             = ath10k_mac_op_switch_vif_chanctx,
+       .sta_pre_rcu_remove             = ath10k_mac_op_sta_pre_rcu_remove,
 
        CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
 
@@ -7548,6 +7649,7 @@ static const struct ieee80211_channel ath10k_5ghz_channels[] = {
        CHAN5G(157, 5785, 0),
        CHAN5G(161, 5805, 0),
        CHAN5G(165, 5825, 0),
+       CHAN5G(169, 5845, 0),
 };
 
 struct ath10k *ath10k_mac_create(size_t priv_size)
@@ -7771,7 +7873,7 @@ static void ath10k_get_arvif_iter(void *data, u8 *mac,
                                  struct ieee80211_vif *vif)
 {
        struct ath10k_vif_iter *arvif_iter = data;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
 
        if (arvif->vdev_id == arvif_iter->vdev_id)
                arvif_iter->arvif = arvif;
index 1bd29ecfcdcc913ff8d3e447eb0d85c4d3c56ec2..553747bc19ed2d9eedd104fff3987e0c8e18c2a1 100644 (file)
@@ -83,17 +83,12 @@ struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
                                            u8 tid);
 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
 
-static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
-{
-       return (struct ath10k_vif *)vif->drv_priv;
-}
-
 static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
                                      struct sk_buff *skb)
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
 
        if (info->flags  & IEEE80211_TX_CTL_ASSIGN_SEQ) {
                if (arvif->tx_seq_no == 0)
index c0b6ffaf3ec1b22648fd7d32e0001d4f0fcdcfb1..7e621ee194e3d717706a3186afec2cee2b0ad5d3 100644 (file)
@@ -132,7 +132,7 @@ struct ath10k_p2p_noa_arg {
 static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
                                            struct ieee80211_vif *vif)
 {
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
        struct ath10k_p2p_noa_arg *arg = data;
 
        if (arvif->vdev_id != arg->vdev_id)
index b541a1c74488531fcd39a7b8103acba95c3557f7..6094372307aae46b296cd1cff40498a06427d8a3 100644 (file)
@@ -840,31 +840,35 @@ void ath10k_pci_rx_replenish_retry(unsigned long ptr)
        ath10k_pci_rx_post(ar);
 }
 
-static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
 {
-       u32 val = 0;
+       u32 val = 0, region = addr & 0xfffff;
 
-       switch (ar->hw_rev) {
-       case ATH10K_HW_QCA988X:
-       case ATH10K_HW_QCA9887:
-       case ATH10K_HW_QCA6174:
-       case ATH10K_HW_QCA9377:
-               val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
-                                         CORE_CTRL_ADDRESS) &
-                      0x7ff) << 21;
-               break;
-       case ATH10K_HW_QCA9888:
-       case ATH10K_HW_QCA99X0:
-       case ATH10K_HW_QCA9984:
-       case ATH10K_HW_QCA4019:
-               val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
-               break;
-       }
+       val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
+                                & 0x7ff) << 21;
+       val |= 0x100000 | region;
+       return val;
+}
 
-       val |= 0x100000 | (addr & 0xfffff);
+static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+       u32 val = 0, region = addr & 0xfffff;
+
+       val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+       val |= 0x100000 | region;
        return val;
 }
 
+static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+       struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+       if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
+               return -ENOTSUPP;
+
+       return ar_pci->targ_cpu_to_ce_addr(ar, addr);
+}
+
 /*
  * Diagnostic read/write access is provided for startup/config/debug usage.
  * Caller must guarantee proper alignment, when applicable, and single user
@@ -896,7 +900,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
         */
        alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
 
-       data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+       data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev,
                                                       alloc_nbytes,
                                                       &ce_data_base,
                                                       GFP_ATOMIC);
@@ -905,7 +909,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
                ret = -ENOMEM;
                goto done;
        }
-       memset(data_buf, 0, alloc_nbytes);
 
        remaining_bytes = nbytes;
        ce_data = ce_data_base;
@@ -1474,6 +1477,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
        ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
        ath10k_print_driver_info(ar);
        ath10k_pci_dump_registers(ar, crash_data);
+       ath10k_ce_dump_registers(ar, crash_data);
 
        spin_unlock_bh(&ar->data_lock);
 
@@ -1590,7 +1594,7 @@ void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
                /* TODO: Find appropriate register configuration for QCA99X0
                 *  to mask irq/MSI.
                 */
-                break;
+               break;
        }
 }
 
@@ -1647,6 +1651,8 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
 
        ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
 
+       napi_enable(&ar->napi);
+
        ath10k_pci_irq_enable(ar);
        ath10k_pci_rx_post(ar);
 
@@ -1937,7 +1943,7 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
 {
        u32 addr, val;
 
-       addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
+       addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
        val = ath10k_pci_read32(ar, addr);
        val |= CORE_CTRL_CPU_INTR_MASK;
        ath10k_pci_write32(ar, addr, val);
@@ -1973,7 +1979,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
                }
                break;
        case QCA9377_1_0_DEVICE_ID:
-               return 2;
+               return 4;
        }
 
        ath10k_warn(ar, "unknown number of banks, assuming 1\n");
@@ -2531,7 +2537,6 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
                ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
                goto err_ce;
        }
-       napi_enable(&ar->napi);
 
        return 0;
 
@@ -2799,7 +2804,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
        done = ath10k_htt_txrx_compl_task(ar, budget);
 
        if (done < budget) {
-               napi_complete(ctx);
+               napi_complete_done(ctx, done);
                /* In case of MSI, it is possible that interrupts are received
                 * while NAPI poll is inprogress. So pending interrupts that are
                 * received after processing all copy engine pipes by NAPI poll
@@ -3132,7 +3137,7 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
        setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
                    (unsigned long)ar);
 
-       if (QCA_REV_6174(ar))
+       if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
                ath10k_pci_override_ce_config(ar);
 
        ret = ath10k_pci_alloc_pipes(ar);
@@ -3170,6 +3175,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        bool pci_ps;
        int (*pci_soft_reset)(struct ath10k *ar);
        int (*pci_hard_reset)(struct ath10k *ar);
+       u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
 
        switch (pci_dev->device) {
        case QCA988X_2_0_DEVICE_ID:
@@ -3177,12 +3183,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                pci_ps = false;
                pci_soft_reset = ath10k_pci_warm_reset;
                pci_hard_reset = ath10k_pci_qca988x_chip_reset;
+               targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
                break;
        case QCA9887_1_0_DEVICE_ID:
                hw_rev = ATH10K_HW_QCA9887;
                pci_ps = false;
                pci_soft_reset = ath10k_pci_warm_reset;
                pci_hard_reset = ath10k_pci_qca988x_chip_reset;
+               targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
                break;
        case QCA6164_2_1_DEVICE_ID:
        case QCA6174_2_1_DEVICE_ID:
@@ -3190,30 +3198,35 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
                pci_ps = true;
                pci_soft_reset = ath10k_pci_warm_reset;
                pci_hard_reset = ath10k_pci_qca6174_chip_reset;
+               targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
                break;
        case QCA99X0_2_0_DEVICE_ID:
                hw_rev = ATH10K_HW_QCA99X0;
                pci_ps = false;
                pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
                pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+               targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
                break;
        case QCA9984_1_0_DEVICE_ID:
                hw_rev = ATH10K_HW_QCA9984;
                pci_ps = false;
                pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
                pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+               targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
                break;
        case QCA9888_2_0_DEVICE_ID:
                hw_rev = ATH10K_HW_QCA9888;
                pci_ps = false;
                pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
                pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+               targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
                break;
        case QCA9377_1_0_DEVICE_ID:
                hw_rev = ATH10K_HW_QCA9377;
                pci_ps = true;
                pci_soft_reset = NULL;
                pci_hard_reset = ath10k_pci_qca6174_chip_reset;
+               targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
                break;
        default:
                WARN_ON(1);
@@ -3240,6 +3253,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
        ar_pci->bus_ops = &ath10k_pci_bus_ops;
        ar_pci->pci_soft_reset = pci_soft_reset;
        ar_pci->pci_hard_reset = pci_hard_reset;
+       ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
 
        ar->id.vendor = pdev->vendor;
        ar->id.device = pdev->device;
index 9854ad56b2dea362834c093f2ee13a28f33e2170..c1e08ad6394039559045d73d8500cdbf5ee9e521 100644 (file)
 #include "ce.h"
 #include "ahb.h"
 
-/*
- * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite
- */
-#define DIAG_TRANSFER_LIMIT 2048
-
 /*
  * maximum number of bytes that can be
  * handled atomically by DiagRead/DiagWrite
@@ -238,6 +233,11 @@ struct ath10k_pci {
        /* Chip specific pci full reset function */
        int (*pci_hard_reset)(struct ath10k *ar);
 
+       /* chip specific methods for converting target CPU virtual address
+        * space to CE address space
+        */
+       u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
+
        /* Keep this entry in the last, memory for struct ath10k_ahb is
         * allocated (ahb support enabled case) in the continuation of
         * this struct.
index 2ffc1fe4923b9696441b4411428ba5548a231783..c061d6958bd130f7d5756d6eaea6268397dd764e 100644 (file)
@@ -278,7 +278,7 @@ static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
 {
        struct ath10k *ar = file->private_data;
        char *mode = "";
-       unsigned int len;
+       size_t len;
        enum ath10k_spectral_mode spectral_mode;
 
        mutex_lock(&ar->conf_mutex);
@@ -370,7 +370,7 @@ static ssize_t read_file_spectral_count(struct file *file,
 {
        struct ath10k *ar = file->private_data;
        char buf[32];
-       unsigned int len;
+       size_t len;
        u8 spectral_count;
 
        mutex_lock(&ar->conf_mutex);
@@ -422,7 +422,8 @@ static ssize_t read_file_spectral_bins(struct file *file,
 {
        struct ath10k *ar = file->private_data;
        char buf[32];
-       unsigned int len, bins, fft_size, bin_scale;
+       unsigned int bins, fft_size, bin_scale;
+       size_t len;
 
        mutex_lock(&ar->conf_mutex);
 
index ed85f938e3c0799795ccf117e572330ea2635c78..8bb36c18a7491a6d51e75dfaeb3bf0f1d12ff37e 100644 (file)
@@ -150,7 +150,10 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar,
                 ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
 
        /* load utf firmware image */
-       ret = request_firmware(&fw_file->firmware, filename, ar->dev);
+       ret = request_firmware_direct(&fw_file->firmware, filename, ar->dev);
+       ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode fw request '%s': %d\n",
+                  filename, ret);
+
        if (ret) {
                ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
                            filename, ret);
index f304f6632c4f469f917e92b77e2fcb29f5944039..f9188027a6f6fe73f9bd098c9c3c96ea3ee56404 100644 (file)
@@ -1105,8 +1105,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
                struct ath10k_fw_stats_pdev *dst;
 
                src = data;
-               if (data_len < sizeof(*src))
+               if (data_len < sizeof(*src)) {
+                       kfree(tb);
                        return -EPROTO;
+               }
 
                data += sizeof(*src);
                data_len -= sizeof(*src);
@@ -1126,8 +1128,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
                struct ath10k_fw_stats_vdev *dst;
 
                src = data;
-               if (data_len < sizeof(*src))
+               if (data_len < sizeof(*src)) {
+                       kfree(tb);
                        return -EPROTO;
+               }
 
                data += sizeof(*src);
                data_len -= sizeof(*src);
@@ -1145,8 +1149,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
                struct ath10k_fw_stats_peer *dst;
 
                src = data;
-               if (data_len < sizeof(*src))
+               if (data_len < sizeof(*src)) {
+                       kfree(tb);
                        return -EPROTO;
+               }
 
                data += sizeof(*src);
                data_len -= sizeof(*src);
@@ -3631,6 +3637,7 @@ static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
        .vht = WMI_TLV_PEER_VHT,
        .bw80 = WMI_TLV_PEER_80MHZ,
        .pmf = WMI_TLV_PEER_PMF,
+       .bw160 = WMI_TLV_PEER_160MHZ,
 };
 
 /************/
index b8aa6000573cd1abfa2f2c79fa53310a85b13faf..22cf011e839afc190c55c65a387351787369e37d 100644 (file)
@@ -543,6 +543,7 @@ enum wmi_tlv_peer_flags {
        WMI_TLV_PEER_VHT = 0x02000000,
        WMI_TLV_PEER_80MHZ = 0x04000000,
        WMI_TLV_PEER_PMF = 0x08000000,
+       WMI_TLV_PEER_160MHZ = 0x20000000,
 };
 
 enum wmi_tlv_tag {
index 50d6ee6afe26fc362f843af5314c1644cf033e8f..2f1743e60fa1303331fb654a79c745e725cc9eaf 100644 (file)
@@ -28,6 +28,7 @@
 #include "wmi-ops.h"
 #include "p2p.h"
 #include "hw.h"
+#include "hif.h"
 
 #define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
 #define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
@@ -1574,6 +1575,7 @@ static const struct wmi_peer_flags_map wmi_peer_flags_map = {
        .bw80 = WMI_PEER_80MHZ,
        .vht_2g = WMI_PEER_VHT_2G,
        .pmf = WMI_PEER_PMF,
+       .bw160 = WMI_PEER_160MHZ,
 };
 
 static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = {
@@ -1591,6 +1593,7 @@ static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = {
        .spatial_mux = WMI_10X_PEER_SPATIAL_MUX,
        .vht = WMI_10X_PEER_VHT,
        .bw80 = WMI_10X_PEER_80MHZ,
+       .bw160 = WMI_10X_PEER_160MHZ,
 };
 
 static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
@@ -1610,6 +1613,7 @@ static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
        .bw80 = WMI_10_2_PEER_80MHZ,
        .vht_2g = WMI_10_2_PEER_VHT_2G,
        .pmf = WMI_10_2_PEER_PMF,
+       .bw160 = WMI_10_2_PEER_160MHZ,
 };
 
 void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
@@ -1634,7 +1638,10 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
 
        ch->mhz = __cpu_to_le32(arg->freq);
        ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
-       ch->band_center_freq2 = 0;
+       if (arg->mode == MODE_11AC_VHT80_80)
+               ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2);
+       else
+               ch->band_center_freq2 = 0;
        ch->min_power = arg->min_power;
        ch->max_power = arg->max_power;
        ch->reg_power = arg->max_reg_power;
@@ -1772,7 +1779,7 @@ unlock:
 static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
                                       struct ieee80211_vif *vif)
 {
-       struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ath10k_vif *arvif = (void *)vif->drv_priv;
 
        ath10k_wmi_tx_beacon_nowait(arvif);
 }
@@ -2319,7 +2326,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
         */
        if (channel >= 1 && channel <= 14) {
                status->band = NL80211_BAND_2GHZ;
-       } else if (channel >= 36 && channel <= 165) {
+       } else if (channel >= 36 && channel <= 169) {
                status->band = NL80211_BAND_5GHZ;
        } else {
                /* Shouldn't happen unless list of advertised channels to
index 5d3dff95b2e5afac3fc8d79530cbbafc0df60bdf..386aa51435f1c4d57e0affa556153c4989b2c8e8 100644 (file)
@@ -75,7 +75,7 @@ struct wmi_cmd_hdr {
 
 /*
  * There is no signed version of __le32, so for a temporary solution come
- * up with our own version. The idea is from fs/ntfs/types.h.
+ * up with our own version. The idea is from fs/ntfs/endian.h.
  *
  * Use a_ prefix so that it doesn't conflict if we get proper support to
  * linux/types.h.
@@ -1728,8 +1728,10 @@ enum wmi_phy_mode {
        MODE_11AC_VHT20_2G = 11,
        MODE_11AC_VHT40_2G = 12,
        MODE_11AC_VHT80_2G = 13,
-       MODE_UNKNOWN    = 14,
-       MODE_MAX        = 14
+       MODE_11AC_VHT80_80 = 14,
+       MODE_11AC_VHT160 = 15,
+       MODE_UNKNOWN    = 16,
+       MODE_MAX        = 16
 };
 
 static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
@@ -1757,6 +1759,10 @@ static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
                return "11ac-vht40";
        case MODE_11AC_VHT80:
                return "11ac-vht80";
+       case MODE_11AC_VHT160:
+               return "11ac-vht160";
+       case MODE_11AC_VHT80_80:
+               return "11ac-vht80+80";
        case MODE_11AC_VHT20_2G:
                return "11ac-vht20-2g";
        case MODE_11AC_VHT40_2G:
@@ -1811,6 +1817,7 @@ struct wmi_channel {
 struct wmi_channel_arg {
        u32 freq;
        u32 band_center_freq1;
+       u32 band_center_freq2;
        bool passive;
        bool allow_ibss;
        bool allow_ht;
@@ -1875,9 +1882,18 @@ enum wmi_channel_change_cause {
 #define WMI_VHT_CAP_MAX_MPDU_LEN_MASK            0x00000003
 #define WMI_VHT_CAP_RX_LDPC                      0x00000010
 #define WMI_VHT_CAP_SGI_80MHZ                    0x00000020
+#define WMI_VHT_CAP_SGI_160MHZ                   0x00000040
 #define WMI_VHT_CAP_TX_STBC                      0x00000080
 #define WMI_VHT_CAP_RX_STBC_MASK                 0x00000300
 #define WMI_VHT_CAP_RX_STBC_MASK_SHIFT           8
+#define WMI_VHT_CAP_SU_BFER                      0x00000800
+#define WMI_VHT_CAP_SU_BFEE                      0x00001000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK              0x0000E000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT        13
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK             0x00070000
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT       16
+#define WMI_VHT_CAP_MU_BFER                      0x00080000
+#define WMI_VHT_CAP_MU_BFEE                      0x00100000
 #define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP            0x03800000
 #define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT      23
 #define WMI_VHT_CAP_RX_FIXED_ANT                 0x10000000
@@ -1926,6 +1942,8 @@ enum {
        REGDMN_MODE_11AC_VHT40PLUS   = 0x40000, /* 5Ghz, VHT40 + channels */
        REGDMN_MODE_11AC_VHT40MINUS  = 0x80000, /* 5Ghz  VHT40 - channels */
        REGDMN_MODE_11AC_VHT80       = 0x100000, /* 5Ghz, VHT80 channels */
+       REGDMN_MODE_11AC_VHT160      = 0x200000,     /* 5Ghz, VHT160 channels */
+       REGDMN_MODE_11AC_VHT80_80    = 0x400000,     /* 5Ghz, VHT80+80 channels */
        REGDMN_MODE_ALL              = 0xffffffff
 };
 
@@ -5783,6 +5801,7 @@ enum wmi_peer_chwidth {
        WMI_PEER_CHWIDTH_20MHZ = 0,
        WMI_PEER_CHWIDTH_40MHZ = 1,
        WMI_PEER_CHWIDTH_80MHZ = 2,
+       WMI_PEER_CHWIDTH_160MHZ = 3,
 };
 
 enum wmi_peer_param {
@@ -5792,6 +5811,7 @@ enum wmi_peer_param {
        WMI_PEER_CHAN_WIDTH = 0x4,
        WMI_PEER_NSS        = 0x5,
        WMI_PEER_USE_4ADDR  = 0x6,
+       WMI_PEER_DEBUG      = 0xa,
        WMI_PEER_DUMMY_VAR  = 0xff, /* dummy parameter for STA PS workaround */
 };
 
@@ -5873,6 +5893,7 @@ struct wmi_peer_flags_map {
        u32 bw80;
        u32 vht_2g;
        u32 pmf;
+       u32 bw160;
 };
 
 enum wmi_peer_flags {
@@ -5892,6 +5913,7 @@ enum wmi_peer_flags {
        WMI_PEER_80MHZ = 0x04000000,
        WMI_PEER_VHT_2G = 0x08000000,
        WMI_PEER_PMF = 0x10000000,
+       WMI_PEER_160MHZ = 0x20000000
 };
 
 enum wmi_10x_peer_flags {
@@ -5909,6 +5931,7 @@ enum wmi_10x_peer_flags {
        WMI_10X_PEER_SPATIAL_MUX = 0x00200000,
        WMI_10X_PEER_VHT = 0x02000000,
        WMI_10X_PEER_80MHZ = 0x04000000,
+       WMI_10X_PEER_160MHZ = 0x20000000
 };
 
 enum wmi_10_2_peer_flags {
@@ -5928,6 +5951,7 @@ enum wmi_10_2_peer_flags {
        WMI_10_2_PEER_80MHZ = 0x04000000,
        WMI_10_2_PEER_VHT_2G = 0x08000000,
        WMI_10_2_PEER_PMF = 0x10000000,
+       WMI_10_2_PEER_160MHZ = 0x20000000
 };
 
 /*
@@ -6581,7 +6605,7 @@ struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
 int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
                               u32 cmd_id);
-void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
+void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *arg);
 
 void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
                                     struct ath10k_fw_stats_pdev *dst);
index 2ca88b593e4c1b5ecc38338b1420da139594651e..c0794f5988b348a1877e3dda52777c0a05f5cb85 100644 (file)
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include <linux/module.h>
 #include <linux/nl80211.h>
 #include <linux/platform_device.h>
 #include <linux/etherdevice.h>
-#include <linux/export.h>
 #include <ath25_platform.h>
 #include "ath5k.h"
 #include "debug.h"
index dc44cfef75176e1710f6622fee85f1f5e9b97e11..16e052d02c94088006e380dd95bc9bec1ba28f88 100644 (file)
@@ -502,8 +502,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                        break;
                return -EOPNOTSUPP;
        default:
-               WARN_ON(1);
-               return -EINVAL;
+               return -EOPNOTSUPP;
        }
 
        mutex_lock(&ah->lock);
index ac25f1781b424e8b54cc74cff16e619f927fa20d..87e99c12d4baa373f6a1ce1b22713a98c823df27 100644 (file)
@@ -641,7 +641,6 @@ struct ath6kl_vif {
        u32 txe_intvl;
        u16 bg_scan_period;
        u8 assoc_bss_dtim_period;
-       struct net_device_stats net_stats;
        struct target_stats target_stats;
        struct wmi_connect_cmd profile;
        u16 rsn_capab;
index 1af3fed5a72caa203e9cbda00f677905ef966863..91ee542de3d79ec0903d56f2d45463af83b01a95 100644 (file)
@@ -1113,13 +1113,6 @@ static int ath6kl_close(struct net_device *dev)
        return 0;
 }
 
-static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
-{
-       struct ath6kl_vif *vif = netdev_priv(dev);
-
-       return &vif->net_stats;
-}
-
 static int ath6kl_set_features(struct net_device *dev,
                               netdev_features_t features)
 {
@@ -1285,7 +1278,6 @@ static const struct net_device_ops ath6kl_netdev_ops = {
        .ndo_open               = ath6kl_open,
        .ndo_stop               = ath6kl_close,
        .ndo_start_xmit         = ath6kl_data_tx,
-       .ndo_get_stats          = ath6kl_get_stats,
        .ndo_set_features       = ath6kl_set_features,
        .ndo_set_rx_mode        = ath6kl_set_multicast_list,
 };
index 8ec66e74d06de14f4d4dfb8b75463f823f566582..2195b1b7a8a63873989d95c655c2868d00371003 100644 (file)
@@ -713,7 +713,7 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
                 * that the packet is properly freed?
                 */
                if (s_req->busrequest) {
-                       s_req->busrequest->scat_req = 0;
+                       s_req->busrequest->scat_req = NULL;
                        ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
                }
                kfree(s_req->virt_dma_buf);
index 9df41d5e32491d3c934808df9c3c99d5de2d643f..a531e0c5c1e28514c0a8bb32f18c4f0808ec6f18 100644 (file)
@@ -405,7 +405,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
                        skb = skb_realloc_headroom(skb, dev->needed_headroom);
                        kfree_skb(tmp_skb);
                        if (skb == NULL) {
-                               vif->net_stats.tx_dropped++;
+                               dev->stats.tx_dropped++;
                                return 0;
                        }
                }
@@ -520,8 +520,8 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
 fail_tx:
        dev_kfree_skb(skb);
 
-       vif->net_stats.tx_dropped++;
-       vif->net_stats.tx_aborted_errors++;
+       dev->stats.tx_dropped++;
+       dev->stats.tx_aborted_errors++;
 
        return 0;
 }
@@ -767,7 +767,7 @@ void ath6kl_tx_complete(struct htc_target *target,
                                /* a packet was flushed  */
                                flushing[if_idx] = true;
 
-                       vif->net_stats.tx_errors++;
+                       vif->ndev->stats.tx_errors++;
 
                        if (status != -ENOSPC && status != -ECANCELED)
                                ath6kl_warn("tx complete error: %d\n", status);
@@ -783,8 +783,8 @@ void ath6kl_tx_complete(struct htc_target *target,
                                   eid, "OK");
 
                        flushing[if_idx] = false;
-                       vif->net_stats.tx_packets++;
-                       vif->net_stats.tx_bytes += skb->len;
+                       vif->ndev->stats.tx_packets++;
+                       vif->ndev->stats.tx_bytes += skb->len;
                }
 
                ath6kl_tx_clear_node_map(vif, eid, map_no);
@@ -1365,8 +1365,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
         */
        spin_lock_bh(&vif->if_lock);
 
-       vif->net_stats.rx_packets++;
-       vif->net_stats.rx_bytes += packet->act_len;
+       vif->ndev->stats.rx_packets++;
+       vif->ndev->stats.rx_bytes += packet->act_len;
 
        spin_unlock_bh(&vif->if_lock);
 
@@ -1395,8 +1395,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
            ((packet->act_len < min_hdr_len) ||
             (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
                ath6kl_info("frame len is too short or too long\n");
-               vif->net_stats.rx_errors++;
-               vif->net_stats.rx_length_errors++;
+               vif->ndev->stats.rx_errors++;
+               vif->ndev->stats.rx_length_errors++;
                dev_kfree_skb(skb);
                return;
        }
@@ -1619,7 +1619,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
                        return;
                }
        } else if (!is_broadcast_ether_addr(datap->h_dest)) {
-               vif->net_stats.multicast++;
+               vif->ndev->stats.multicast++;
        }
 
        ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
index 8f231c67dd516e3620e8b0b53203b736d8ee6521..783a38f1a626a435f5f2eb6b39f2ef9724292f06 100644 (file)
@@ -3,8 +3,8 @@ config ATH9K_HW
 config ATH9K_COMMON
        tristate
        select ATH_COMMON
-       select DEBUG_FS
-       select RELAY
+config ATH9K_COMMON_DEBUG
+       bool
 config ATH9K_DFS_DEBUGFS
        def_bool y
        depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
@@ -60,12 +60,14 @@ config ATH9K_DEBUGFS
        bool "Atheros ath9k debugging"
        depends on ATH9K && DEBUG_FS
        select MAC80211_DEBUGFS
+       select ATH9K_COMMON_DEBUG
        select RELAY
        ---help---
          Say Y, if you need access to ath9k's statistics for
          interrupts, rate control, etc.
 
          Also required for changing debug message flags at run time.
+         As well as access to the FFT/spectral data and TX99.
 
 config ATH9K_STATION_STATISTICS
        bool "Detailed station statistics"
@@ -174,8 +176,11 @@ config ATH9K_HTC
 config ATH9K_HTC_DEBUGFS
        bool "Atheros ath9k_htc debugging"
        depends on ATH9K_HTC && DEBUG_FS
+       select ATH9K_COMMON_DEBUG
+       select RELAY
        ---help---
          Say Y, if you need access to ath9k_htc's statistics.
+         As well as access to the FFT/spectral data.
 
 config ATH9K_HWRNG
        bool "Random number generator support"
index 76f9dc37500b18f70e4de8516b3ecb98ef24b511..36a40ffdce151d107c11b1bd78f964a9ca20062a 100644 (file)
@@ -60,8 +60,9 @@ obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
 ath9k_common-y:=       common.o \
                        common-init.o \
                        common-beacon.o \
-                       common-debug.o \
-                       common-spectral.o
+
+ath9k_common-$(CONFIG_ATH9K_COMMON_DEBUG) += common-debug.o \
+                                            common-spectral.o
 
 ath9k_htc-y += htc_hst.o \
                hif_usb.o \
index 8eea8d22e72e5a4c9a43ccd2d3494614562a7a4f..7922550c2159bbd6269aae15abbab79df4d99017 100644 (file)
@@ -524,7 +524,7 @@ static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
                return true;
 
        /* Setup rf parameters */
-       eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
+       eepMinorRev = ah->eep_ops->get_eeprom_rev(ah);
 
        for (i = 0; i < ah->iniBank6.ia_rows; i++)
                ah->analogBank6Data[i] = INI_RA(&ah->iniBank6, i, modesIndex);
index d480d2f3e18588843696bc3801d6a821ffbefee4..ae68f674829b1943e7ec24a6c6e351c297c392aa 100644 (file)
@@ -108,8 +108,7 @@ static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
 {
        u32 rxgain_type;
 
-       if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
-           AR5416_EEP_MINOR_VER_17) {
+       if (ah->eep_ops->get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_17) {
                rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
 
                if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
@@ -129,8 +128,7 @@ static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
 
 static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah, u32 txgain_type)
 {
-       if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
-           AR5416_EEP_MINOR_VER_19) {
+       if (ah->eep_ops->get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19) {
                if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
                        INIT_INI_ARRAY(&ah->iniModesTxGain,
                                       ar9280Modes_high_power_tx_gain_9280_2);
index f816909d9474e204c36abe20e73c16e0c259956b..4b3c9b10819766dc90cf28a4f972ae9e71099c7e 100644 (file)
@@ -220,8 +220,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
        ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
        ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
 
-       ACCESS_ONCE(ads->ds_link) = i->link;
-       ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
+       WRITE_ONCE(ads->ds_link, i->link);
+       WRITE_ONCE(ads->ds_data, i->buf_addr[0]);
 
        ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
        ctl6 = SM(i->keytype, AR_EncrType);
@@ -235,26 +235,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
 
        if ((i->is_first || i->is_last) &&
            i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
-               ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
+               WRITE_ONCE(ads->ds_ctl2, set11nTries(i->rates, 0)
                        | set11nTries(i->rates, 1)
                        | set11nTries(i->rates, 2)
                        | set11nTries(i->rates, 3)
                        | (i->dur_update ? AR_DurUpdateEna : 0)
-                       | SM(0, AR_BurstDur);
+                       | SM(0, AR_BurstDur));
 
-               ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
+               WRITE_ONCE(ads->ds_ctl3, set11nRate(i->rates, 0)
                        | set11nRate(i->rates, 1)
                        | set11nRate(i->rates, 2)
-                       | set11nRate(i->rates, 3);
+                       | set11nRate(i->rates, 3));
        } else {
-               ACCESS_ONCE(ads->ds_ctl2) = 0;
-               ACCESS_ONCE(ads->ds_ctl3) = 0;
+               WRITE_ONCE(ads->ds_ctl2, 0);
+               WRITE_ONCE(ads->ds_ctl3, 0);
        }
 
        if (!i->is_first) {
-               ACCESS_ONCE(ads->ds_ctl0) = 0;
-               ACCESS_ONCE(ads->ds_ctl1) = ctl1;
-               ACCESS_ONCE(ads->ds_ctl6) = ctl6;
+               WRITE_ONCE(ads->ds_ctl0, 0);
+               WRITE_ONCE(ads->ds_ctl1, ctl1);
+               WRITE_ONCE(ads->ds_ctl6, ctl6);
                return;
        }
 
@@ -279,7 +279,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
                break;
        }
 
-       ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
+       WRITE_ONCE(ads->ds_ctl0, (i->pkt_len & AR_FrameLen)
                | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
                | SM(i->txpower[0], AR_XmitPower0)
                | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
@@ -287,29 +287,29 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
                | (i->keyix != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
                | (i->flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
                | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
-                  (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
+                  (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)));
 
-       ACCESS_ONCE(ads->ds_ctl1) = ctl1;
-       ACCESS_ONCE(ads->ds_ctl6) = ctl6;
+       WRITE_ONCE(ads->ds_ctl1, ctl1);
+       WRITE_ONCE(ads->ds_ctl6, ctl6);
 
        if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
                return;
 
-       ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
-               | set11nPktDurRTSCTS(i->rates, 1);
+       WRITE_ONCE(ads->ds_ctl4, set11nPktDurRTSCTS(i->rates, 0)
+               | set11nPktDurRTSCTS(i->rates, 1));
 
-       ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
-               | set11nPktDurRTSCTS(i->rates, 3);
+       WRITE_ONCE(ads->ds_ctl5, set11nPktDurRTSCTS(i->rates, 2)
+               | set11nPktDurRTSCTS(i->rates, 3));
 
-       ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
+       WRITE_ONCE(ads->ds_ctl7, set11nRateFlags(i->rates, 0)
                | set11nRateFlags(i->rates, 1)
                | set11nRateFlags(i->rates, 2)
                | set11nRateFlags(i->rates, 3)
-               | SM(i->rtscts_rate, AR_RTSCTSRate);
+               | SM(i->rtscts_rate, AR_RTSCTSRate));
 
-       ACCESS_ONCE(ads->ds_ctl9) = SM(i->txpower[1], AR_XmitPower1);
-       ACCESS_ONCE(ads->ds_ctl10) = SM(i->txpower[2], AR_XmitPower2);
-       ACCESS_ONCE(ads->ds_ctl11) = SM(i->txpower[3], AR_XmitPower3);
+       WRITE_ONCE(ads->ds_ctl9, SM(i->txpower[1], AR_XmitPower1));
+       WRITE_ONCE(ads->ds_ctl10, SM(i->txpower[2], AR_XmitPower2));
+       WRITE_ONCE(ads->ds_ctl11, SM(i->txpower[3], AR_XmitPower3));
 }
 
 static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
@@ -318,7 +318,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
        struct ar5416_desc *ads = AR5416DESC(ds);
        u32 status;
 
-       status = ACCESS_ONCE(ads->ds_txstatus9);
+       status = READ_ONCE(ads->ds_txstatus9);
        if ((status & AR_TxDone) == 0)
                return -EINPROGRESS;
 
@@ -332,7 +332,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
        ts->ts_rateindex = MS(status, AR_FinalTxIdx);
        ts->ts_seqnum = MS(status, AR_SeqNum);
 
-       status = ACCESS_ONCE(ads->ds_txstatus0);
+       status = READ_ONCE(ads->ds_txstatus0);
        ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
        ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
        ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
@@ -342,7 +342,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
                ts->ba_high = ads->AR_BaBitmapHigh;
        }
 
-       status = ACCESS_ONCE(ads->ds_txstatus1);
+       status = READ_ONCE(ads->ds_txstatus1);
        if (status & AR_FrmXmitOK)
                ts->ts_status |= ATH9K_TX_ACKED;
        else {
@@ -371,7 +371,7 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
        ts->ts_longretry = MS(status, AR_DataFailCnt);
        ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
 
-       status = ACCESS_ONCE(ads->ds_txstatus5);
+       status = READ_ONCE(ads->ds_txstatus5);
        ts->ts_rssi = MS(status, AR_TxRSSICombined);
        ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
        ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
@@ -390,13 +390,13 @@ static int ar9002_hw_get_duration(struct ath_hw *ah, const void *ds, int index)
 
        switch (index) {
        case 0:
-               return MS(ACCESS_ONCE(ads->ds_ctl4), AR_PacketDur0);
+               return MS(READ_ONCE(ads->ds_ctl4), AR_PacketDur0);
        case 1:
-               return MS(ACCESS_ONCE(ads->ds_ctl4), AR_PacketDur1);
+               return MS(READ_ONCE(ads->ds_ctl4), AR_PacketDur1);
        case 2:
-               return MS(ACCESS_ONCE(ads->ds_ctl5), AR_PacketDur2);
+               return MS(READ_ONCE(ads->ds_ctl5), AR_PacketDur2);
        case 3:
-               return MS(ACCESS_ONCE(ads->ds_ctl5), AR_PacketDur3);
+               return MS(READ_ONCE(ads->ds_ctl5), AR_PacketDur3);
        default:
                return -1;
        }
index 08607d7fdb56adc865b4fc70564abca86a49488d..3dbfd86ebe36b3fe032e5814e1630232925b16b9 100644 (file)
@@ -53,7 +53,7 @@ static const struct ar9300_eeprom ar9300_default = {
                .txrxMask =  0x77, /* 4 bits tx and 4 bits rx */
                .opCapFlags = {
                        .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
-                       .eepMisc = 0,
+                       .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN,
                },
                .rfSilent = 0,
                .blueToothOptions = 0,
@@ -631,7 +631,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
                .txrxMask =  0x77, /* 4 bits tx and 4 bits rx */
                .opCapFlags = {
                        .opFlags = AR5416_OPFLAGS_11A,
-                       .eepMisc = 0,
+                       .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN,
                },
                .rfSilent = 0,
                .blueToothOptions = 0,
@@ -1210,7 +1210,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
                .txrxMask =  0x77, /* 4 bits tx and 4 bits rx */
                .opCapFlags = {
                        .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
-                       .eepMisc = 0,
+                       .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN,
                },
                .rfSilent = 0,
                .blueToothOptions = 0,
@@ -1789,7 +1789,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
                .txrxMask =  0x77, /* 4 bits tx and 4 bits rx */
                .opCapFlags = {
                        .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
-                       .eepMisc = 0,
+                       .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN,
                },
                .rfSilent = 0,
                .blueToothOptions = 0,
@@ -2367,7 +2367,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
                .txrxMask =  0x33, /* 4 bits tx and 4 bits rx */
                .opCapFlags = {
                        .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
-                       .eepMisc = 0,
+                       .eepMisc = AR9300_EEPMISC_LITTLE_ENDIAN,
                },
                .rfSilent = 0,
                .blueToothOptions = 0,
@@ -3468,7 +3468,8 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
                                        AR5416_OPFLAGS_N_5G_HT20));
        PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags.opFlags &
                                        AR5416_OPFLAGS_N_5G_HT40));
-       PR_EEP("Big Endian", !!(pBase->opCapFlags.eepMisc & 0x01));
+       PR_EEP("Big Endian", !!(pBase->opCapFlags.eepMisc &
+                               AR5416_EEPMISC_BIG_ENDIAN));
        PR_EEP("RF Silent", pBase->rfSilent);
        PR_EEP("BT option", pBase->blueToothOptions);
        PR_EEP("Device Cap", pBase->deviceCap);
@@ -5497,6 +5498,11 @@ unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
        }
 }
 
+static u8 ar9003_get_eepmisc(struct ath_hw *ah)
+{
+       return ah->eeprom.map4k.baseEepHeader.eepMisc;
+}
+
 const struct eeprom_ops eep_ar9300_ops = {
        .check_eeprom = ath9k_hw_ar9300_check_eeprom,
        .get_eeprom = ath9k_hw_ar9300_get_eeprom,
@@ -5507,5 +5513,6 @@ const struct eeprom_ops eep_ar9300_ops = {
        .set_board_values = ath9k_hw_ar9300_set_board_values,
        .set_addac = ath9k_hw_ar9300_set_addac,
        .set_txpower = ath9k_hw_ar9300_set_txpower,
-       .get_spur_channel = ath9k_hw_ar9300_get_spur_channel
+       .get_spur_channel = ath9k_hw_ar9300_get_spur_channel,
+       .get_eepmisc = ar9003_get_eepmisc
 };
index 107bcfbbe0fb31f494cb4e5a83f1dca9e290db62..bd2269c7de6bc14ed4e877b55b6df192e31bbd8b 100644 (file)
@@ -38,7 +38,6 @@
 #define AR9300_NUM_CTLS_2G           12
 #define AR9300_NUM_BAND_EDGES_5G     8
 #define AR9300_NUM_BAND_EDGES_2G     4
-#define AR9300_EEPMISC_BIG_ENDIAN    0x01
 #define AR9300_EEPMISC_WOW           0x02
 #define AR9300_CUSTOMER_DATA_SIZE    20
 
 #define AR9300_BASE_ADDR 0x3ff
 #define AR9300_BASE_ADDR_512 0x1ff
 
+/* AR5416_EEPMISC_BIG_ENDIAN not set indicates little endian */
+#define AR9300_EEPMISC_LITTLE_ENDIAN 0
+
 #define AR9300_OTP_BASE \
                ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000)
 #define AR9300_OTP_STATUS \
-               ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30018 : 0x15f18)
+               ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x31018 : 0x15f18)
 #define AR9300_OTP_STATUS_TYPE         0x7
 #define AR9300_OTP_STATUS_VALID                0x4
 #define AR9300_OTP_STATUS_ACCESS_BUSY  0x2
 #define AR9300_OTP_STATUS_SM_BUSY      0x1
 #define AR9300_OTP_READ_DATA \
-               ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3001c : 0x15f1c)
+               ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3101c : 0x15f1c)
 
 enum targetPowerHTRates {
        HT_TARGET_RATE_0_8_16,
index da84b705cbcdc476734ac5f2c41dc82443e744d6..cc5bb0a76baf3985c205f17f6db912a9eaad3b6e 100644 (file)
@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
              (i->qcu << AR_TxQcuNum_S) | desc_len;
 
        checksum += val;
-       ACCESS_ONCE(ads->info) = val;
+       WRITE_ONCE(ads->info, val);
 
        checksum += i->link;
-       ACCESS_ONCE(ads->link) = i->link;
+       WRITE_ONCE(ads->link, i->link);
 
        checksum += i->buf_addr[0];
-       ACCESS_ONCE(ads->data0) = i->buf_addr[0];
+       WRITE_ONCE(ads->data0, i->buf_addr[0]);
        checksum += i->buf_addr[1];
-       ACCESS_ONCE(ads->data1) = i->buf_addr[1];
+       WRITE_ONCE(ads->data1, i->buf_addr[1]);
        checksum += i->buf_addr[2];
-       ACCESS_ONCE(ads->data2) = i->buf_addr[2];
+       WRITE_ONCE(ads->data2, i->buf_addr[2]);
        checksum += i->buf_addr[3];
-       ACCESS_ONCE(ads->data3) = i->buf_addr[3];
+       WRITE_ONCE(ads->data3, i->buf_addr[3]);
 
        checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
-       ACCESS_ONCE(ads->ctl3) = val;
+       WRITE_ONCE(ads->ctl3, val);
        checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
-       ACCESS_ONCE(ads->ctl5) = val;
+       WRITE_ONCE(ads->ctl5, val);
        checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
-       ACCESS_ONCE(ads->ctl7) = val;
+       WRITE_ONCE(ads->ctl7, val);
        checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
-       ACCESS_ONCE(ads->ctl9) = val;
+       WRITE_ONCE(ads->ctl9, val);
 
        checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
-       ACCESS_ONCE(ads->ctl10) = checksum;
+       WRITE_ONCE(ads->ctl10, checksum);
 
        if (i->is_first || i->is_last) {
-               ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
+               WRITE_ONCE(ads->ctl13, set11nTries(i->rates, 0)
                        | set11nTries(i->rates, 1)
                        | set11nTries(i->rates, 2)
                        | set11nTries(i->rates, 3)
                        | (i->dur_update ? AR_DurUpdateEna : 0)
-                       | SM(0, AR_BurstDur);
+                       | SM(0, AR_BurstDur));
 
-               ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
+               WRITE_ONCE(ads->ctl14, set11nRate(i->rates, 0)
                        | set11nRate(i->rates, 1)
                        | set11nRate(i->rates, 2)
-                       | set11nRate(i->rates, 3);
+                       | set11nRate(i->rates, 3));
        } else {
-               ACCESS_ONCE(ads->ctl13) = 0;
-               ACCESS_ONCE(ads->ctl14) = 0;
+               WRITE_ONCE(ads->ctl13, 0);
+               WRITE_ONCE(ads->ctl14, 0);
        }
 
        ads->ctl20 = 0;
@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
 
        ctl17 = SM(i->keytype, AR_EncrType);
        if (!i->is_first) {
-               ACCESS_ONCE(ads->ctl11) = 0;
-               ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
-               ACCESS_ONCE(ads->ctl15) = 0;
-               ACCESS_ONCE(ads->ctl16) = 0;
-               ACCESS_ONCE(ads->ctl17) = ctl17;
-               ACCESS_ONCE(ads->ctl18) = 0;
-               ACCESS_ONCE(ads->ctl19) = 0;
+               WRITE_ONCE(ads->ctl11, 0);
+               WRITE_ONCE(ads->ctl12, i->is_last ? 0 : AR_TxMore);
+               WRITE_ONCE(ads->ctl15, 0);
+               WRITE_ONCE(ads->ctl16, 0);
+               WRITE_ONCE(ads->ctl17, ctl17);
+               WRITE_ONCE(ads->ctl18, 0);
+               WRITE_ONCE(ads->ctl19, 0);
                return;
        }
 
-       ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
+       WRITE_ONCE(ads->ctl11, (i->pkt_len & AR_FrameLen)
                | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
                | SM(i->txpower[0], AR_XmitPower0)
                | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
@@ -107,7 +107,7 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
                | (i->flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0)
                | (i->flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
                | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
-                  (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
+                  (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0)));
 
        ctl12 = (i->keyix != ATH9K_TXKEYIX_INVALID ?
                 SM(i->keyix, AR_DestIdx) : 0)
@@ -135,26 +135,26 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
        val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
        ctl12 |= SM(val, AR_PAPRDChainMask);
 
-       ACCESS_ONCE(ads->ctl12) = ctl12;
-       ACCESS_ONCE(ads->ctl17) = ctl17;
+       WRITE_ONCE(ads->ctl12, ctl12);
+       WRITE_ONCE(ads->ctl17, ctl17);
 
-       ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
-               | set11nPktDurRTSCTS(i->rates, 1);
+       WRITE_ONCE(ads->ctl15, set11nPktDurRTSCTS(i->rates, 0)
+               | set11nPktDurRTSCTS(i->rates, 1));
 
-       ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
-               | set11nPktDurRTSCTS(i->rates, 3);
+       WRITE_ONCE(ads->ctl16, set11nPktDurRTSCTS(i->rates, 2)
+               | set11nPktDurRTSCTS(i->rates, 3));
 
-       ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
+       WRITE_ONCE(ads->ctl18, set11nRateFlags(i->rates, 0)
                | set11nRateFlags(i->rates, 1)
                | set11nRateFlags(i->rates, 2)
                | set11nRateFlags(i->rates, 3)
-               | SM(i->rtscts_rate, AR_RTSCTSRate);
+               | SM(i->rtscts_rate, AR_RTSCTSRate));
 
-       ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
+       WRITE_ONCE(ads->ctl19, AR_Not_Sounding);
 
-       ACCESS_ONCE(ads->ctl20) = SM(i->txpower[1], AR_XmitPower1);
-       ACCESS_ONCE(ads->ctl21) = SM(i->txpower[2], AR_XmitPower2);
-       ACCESS_ONCE(ads->ctl22) = SM(i->txpower[3], AR_XmitPower3);
+       WRITE_ONCE(ads->ctl20, SM(i->txpower[1], AR_XmitPower1));
+       WRITE_ONCE(ads->ctl21, SM(i->txpower[2], AR_XmitPower2));
+       WRITE_ONCE(ads->ctl22, SM(i->txpower[3], AR_XmitPower3));
 }
 
 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
@@ -359,7 +359,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
 
        ads = &ah->ts_ring[ah->ts_tail];
 
-       status = ACCESS_ONCE(ads->status8);
+       status = READ_ONCE(ads->status8);
        if ((status & AR_TxDone) == 0)
                return -EINPROGRESS;
 
@@ -385,7 +385,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
 
        if (status & AR_TxOpExceeded)
                ts->ts_status |= ATH9K_TXERR_XTXOP;
-       status = ACCESS_ONCE(ads->status2);
+       status = READ_ONCE(ads->status2);
        ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
        ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
        ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
@@ -395,7 +395,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
                ts->ba_high = ads->status6;
        }
 
-       status = ACCESS_ONCE(ads->status3);
+       status = READ_ONCE(ads->status3);
        if (status & AR_ExcessiveRetries)
                ts->ts_status |= ATH9K_TXERR_XRETRY;
        if (status & AR_Filtered)
@@ -420,7 +420,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
        ts->ts_longretry = MS(status, AR_DataFailCnt);
        ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
 
-       status = ACCESS_ONCE(ads->status7);
+       status = READ_ONCE(ads->status7);
        ts->ts_rssi = MS(status, AR_TxRSSICombined);
        ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
        ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
@@ -437,13 +437,13 @@ static int ar9003_hw_get_duration(struct ath_hw *ah, const void *ds, int index)
 
        switch (index) {
        case 0:
-               return MS(ACCESS_ONCE(adc->ctl15), AR_PacketDur0);
+               return MS(READ_ONCE(adc->ctl15), AR_PacketDur0);
        case 1:
-               return MS(ACCESS_ONCE(adc->ctl15), AR_PacketDur1);
+               return MS(READ_ONCE(adc->ctl15), AR_PacketDur1);
        case 2:
-               return MS(ACCESS_ONCE(adc->ctl16), AR_PacketDur2);
+               return MS(READ_ONCE(adc->ctl16), AR_PacketDur2);
        case 3:
-               return MS(ACCESS_ONCE(adc->ctl16), AR_PacketDur3);
+               return MS(READ_ONCE(adc->ctl16), AR_PacketDur3);
        default:
                return 0;
        }
index 378d3458fddba61435cf720261085d2ab3b544a6..cf076719c27ec983db66794a3f41a5deb6f640d2 100644 (file)
@@ -108,10 +108,12 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
 #define ATH_AGGR_MIN_QDEPTH        2
 /* minimum h/w qdepth for non-aggregated traffic */
 #define ATH_NON_AGGR_MIN_QDEPTH    8
-#define ATH_TX_COMPLETE_POLL_INT   1000
+#define ATH_HW_CHECK_POLL_INT      1000
 #define ATH_TXFIFO_DEPTH           8
 #define ATH_TX_ERROR               0x01
 
+#define ATH_AIRTIME_QUANTUM        300 /* usec */
+
 /* Stop tx traffic 1ms before the GO goes away */
 #define ATH_P2P_PS_STOP_TIME       1000
 
@@ -247,6 +249,9 @@ struct ath_atx_tid {
        bool has_queued;
 };
 
+void __ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid);
+void ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid);
+
 struct ath_node {
        struct ath_softc *sc;
        struct ieee80211_sta *sta; /* station struct we're part of */
@@ -258,9 +263,12 @@ struct ath_node {
 
        bool sleeping;
        bool no_ps_filter;
+       s64 airtime_deficit[IEEE80211_NUM_ACS];
+       u32 airtime_rx_start;
 
 #ifdef CONFIG_ATH9K_STATION_STATISTICS
        struct ath_rx_rate_stats rx_rate_stats;
+       struct ath_airtime_stats airtime_stats;
 #endif
        u8 key_idx[4];
 
@@ -317,10 +325,16 @@ struct ath_rx {
 /* Channel Context */
 /*******************/
 
+struct ath_acq {
+       struct list_head acq_new;
+       struct list_head acq_old;
+       spinlock_t lock;
+};
+
 struct ath_chanctx {
        struct cfg80211_chan_def chandef;
        struct list_head vifs;
-       struct list_head acq[IEEE80211_NUM_ACS];
+       struct ath_acq acq[IEEE80211_NUM_ACS];
        int hw_queue_base;
 
        /* do not dereference, use for comparison only */
@@ -555,6 +569,15 @@ static inline void ath_chanctx_check_active(struct ath_softc *sc,
 
 #endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
 
+static inline void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
+{
+       spin_lock_bh(&txq->axq_lock);
+}
+static inline void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
+{
+       spin_unlock_bh(&txq->axq_lock);
+}
+
 void ath_startrecv(struct ath_softc *sc);
 bool ath_stoprecv(struct ath_softc *sc);
 u32 ath_calcrxfilter(struct ath_softc *sc);
@@ -562,8 +585,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs);
 void ath_rx_cleanup(struct ath_softc *sc);
 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
-void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq);
-void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq);
 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq);
 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
 bool ath_drain_all_txq(struct ath_softc *sc);
@@ -575,6 +596,8 @@ void ath_txq_schedule_all(struct ath_softc *sc);
 int ath_tx_init(struct ath_softc *sc, int nbufs);
 int ath_txq_update(struct ath_softc *sc, int qnum,
                   struct ath9k_tx_queue_info *q);
+u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
+                    int width, int half_gi, bool shortPreamble);
 void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
 void ath_assign_seq(struct ath_common *common, struct sk_buff *skb);
 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
@@ -722,7 +745,7 @@ void ath9k_csa_update(struct ath_softc *sc);
 #define ATH_PAPRD_TIMEOUT         100 /* msecs */
 #define ATH_PLL_WORK_INTERVAL     100
 
-void ath_tx_complete_poll_work(struct work_struct *work);
+void ath_hw_check_work(struct work_struct *work);
 void ath_reset_work(struct work_struct *work);
 bool ath_hw_check(struct ath_softc *sc);
 void ath_hw_pll_work(struct work_struct *work);
@@ -963,6 +986,11 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
 
 #define ATH9K_NUM_CHANCTX  2 /* supports 2 operating channels */
 
+#define AIRTIME_USE_TX         BIT(0)
+#define AIRTIME_USE_RX         BIT(1)
+#define AIRTIME_USE_NEW_QUEUES BIT(2)
+#define AIRTIME_ACTIVE(flags) (!!(flags & (AIRTIME_USE_TX|AIRTIME_USE_RX)))
+
 struct ath_softc {
        struct ieee80211_hw *hw;
        struct device *dev;
@@ -970,6 +998,7 @@ struct ath_softc {
        struct survey_info *cur_survey;
        struct survey_info survey[ATH9K_NUM_CHANNELS];
 
+       spinlock_t intr_lock;
        struct tasklet_struct intr_tq;
        struct tasklet_struct bcon_tasklet;
        struct ath_hw *sc_ah;
@@ -1005,6 +1034,8 @@ struct ath_softc {
        short nbcnvifs;
        unsigned long ps_usecount;
 
+       u16 airtime_flags; /* AIRTIME_* */
+
        struct ath_rx rx;
        struct ath_tx tx;
        struct ath_beacon beacon;
@@ -1023,7 +1054,7 @@ struct ath_softc {
 #ifdef CONFIG_ATH9K_DEBUGFS
        struct ath9k_debug debug;
 #endif
-       struct delayed_work tx_complete_work;
+       struct delayed_work hw_check_work;
        struct delayed_work hw_pll_work;
        struct timer_list sleep_timer;
 
index 929dd70f48eb19f0ffbcd9ee2b80913d168f47e2..b84539d89f1af23a066d2613a7a4a1d455c9929c 100644 (file)
@@ -118,8 +118,11 @@ void ath_chanctx_init(struct ath_softc *sc)
                INIT_LIST_HEAD(&ctx->vifs);
                ctx->txpower = ATH_TXPOWER_MAX;
                ctx->flush_timeout = HZ / 5; /* 200ms */
-               for (j = 0; j < ARRAY_SIZE(ctx->acq); j++)
-                       INIT_LIST_HEAD(&ctx->acq[j]);
+               for (j = 0; j < ARRAY_SIZE(ctx->acq); j++) {
+                       INIT_LIST_HEAD(&ctx->acq[j].acq_new);
+                       INIT_LIST_HEAD(&ctx->acq[j].acq_old);
+                       spin_lock_init(&ctx->acq[j].lock);
+               }
        }
 }
 
@@ -1345,8 +1348,11 @@ void ath9k_offchannel_init(struct ath_softc *sc)
        ctx->txpower = ATH_TXPOWER_MAX;
        cfg80211_chandef_create(&ctx->chandef, chan, NL80211_CHAN_HT20);
 
-       for (i = 0; i < ARRAY_SIZE(ctx->acq); i++)
-               INIT_LIST_HEAD(&ctx->acq[i]);
+       for (i = 0; i < ARRAY_SIZE(ctx->acq); i++) {
+               INIT_LIST_HEAD(&ctx->acq[i].acq_new);
+               INIT_LIST_HEAD(&ctx->acq[i].acq_old);
+               spin_lock_init(&ctx->acq[i].lock);
+       }
 
        sc->offchannel.chan.offchannel = true;
 }
index 7c9788490f7f3371286a473cc3d85cd5747e9465..3376990d3a24803cf190c736f1e0fefd1660d3d9 100644 (file)
@@ -60,6 +60,7 @@ struct ath_rx_stats {
        u32 rx_spectral;
 };
 
+#ifdef CONFIG_ATH9K_COMMON_DEBUG
 void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
                                  struct ath_hw *ah);
 void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
@@ -70,3 +71,29 @@ void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
                          struct ath_rx_stats *rxstats);
 void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
                             struct ath_rx_stats *rxstats);
+#else
+static inline void ath9k_cmn_debug_modal_eeprom(struct dentry *debugfs_phy,
+                                               struct ath_hw *ah)
+{
+}
+
+static inline void ath9k_cmn_debug_base_eeprom(struct dentry *debugfs_phy,
+                                              struct ath_hw *ah)
+{
+}
+
+static inline void ath9k_cmn_debug_stat_rx(struct ath_rx_stats *rxstats,
+                                          struct ath_rx_status *rs)
+{
+}
+
+static inline void ath9k_cmn_debug_recv(struct dentry *debugfs_phy,
+                                       struct ath_rx_stats *rxstats)
+{
+}
+
+static inline void ath9k_cmn_debug_phy_err(struct dentry *debugfs_phy,
+                                          struct ath_rx_stats *rxstats)
+{
+}
+#endif /* CONFIG_ATH9K_COMMON_DEBUG */
index eedf86b67cf517f4b9a400333cbc000ad28441d7..0ffa23a615682400905bf31f22b5c9942a8ce282 100644 (file)
@@ -482,7 +482,7 @@ ath_cmn_is_fft_buf_full(struct ath_spec_scan_priv *spec_priv)
        struct rchan *rc = spec_priv->rfs_chan_spec_scan;
 
        for_each_online_cpu(i)
-               ret += relay_buf_full(rc->buf[i]);
+               ret += relay_buf_full(*per_cpu_ptr(rc->buf, i));
 
        i = num_online_cpus();
 
@@ -1075,7 +1075,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = {
 
 void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
 {
-       if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) {
+       if (spec_priv->rfs_chan_spec_scan) {
                relay_close(spec_priv->rfs_chan_spec_scan);
                spec_priv->rfs_chan_spec_scan = NULL;
        }
index 998743be9c6724de510d1dc33d62eb67df32577d..5d1a51d83aa646006f20298e206bb391830b5a78 100644 (file)
@@ -151,6 +151,7 @@ static inline u8 spectral_bitmap_weight(u8 *bins)
        return bins[0] & 0x3f;
 }
 
+#ifdef CONFIG_ATH9K_COMMON_DEBUG
 void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv, struct dentry *debugfs_phy);
 void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv);
 
@@ -161,5 +162,27 @@ int ath9k_cmn_spectral_scan_config(struct ath_common *common,
                               enum spectral_mode spectral_mode);
 int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv, struct ieee80211_hdr *hdr,
                    struct ath_rx_status *rs, u64 tsf);
+#else
+static inline void ath9k_cmn_spectral_init_debug(struct ath_spec_scan_priv *spec_priv,
+                                                struct dentry *debugfs_phy)
+{
+}
+
+static inline void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
+{
+}
+
+static inline void ath9k_cmn_spectral_scan_trigger(struct ath_common *common,
+                                                  struct ath_spec_scan_priv *spec_priv)
+{
+}
+
+static inline int ath_cmn_process_fft(struct ath_spec_scan_priv *spec_priv,
+                                     struct ieee80211_hdr *hdr,
+                                     struct ath_rx_status *rs, u64 tsf)
+{
+       return 0;
+}
+#endif /* CONFIG_ATH9K_COMMON_DEBUG */
 
 #endif /* SPECTRAL_H */
index 89a94dd5f2cb4182e01ff0671d12c5c24a973c3d..43930c336987ee4fc802b3219a65fe33fd98073e 100644 (file)
@@ -1399,5 +1399,8 @@ int ath9k_init_debug(struct ath_hw *ah)
        debugfs_create_file("tpc", S_IRUSR | S_IWUSR,
                            sc->debug.debugfs_phy, sc, &fops_tpc);
 
+       debugfs_create_u16("airtime_flags", S_IRUSR | S_IWUSR,
+                          sc->debug.debugfs_phy, &sc->airtime_flags);
+
        return 0;
 }
index a078cdd3170da1f0dddfe93ce73ad3042c5fdc5d..249f8141cd0060dc08c9384b9370bc0e6420f870 100644 (file)
@@ -221,6 +221,11 @@ struct ath_rx_rate_stats {
        } cck_stats[4];
 };
 
+struct ath_airtime_stats {
+       u32 rx_airtime;
+       u32 tx_airtime;
+};
+
 #define ANT_MAIN 0
 #define ANT_ALT  1
 
@@ -314,12 +319,20 @@ ath9k_debug_sync_cause(struct ath_softc *sc, u32 sync_cause)
 void ath_debug_rate_stats(struct ath_softc *sc,
                          struct ath_rx_status *rs,
                          struct sk_buff *skb);
+void ath_debug_airtime(struct ath_softc *sc,
+                      struct ath_node *an,
+                      u32 rx, u32 tx);
 #else
 static inline void ath_debug_rate_stats(struct ath_softc *sc,
                                        struct ath_rx_status *rs,
                                        struct sk_buff *skb)
 {
 }
+static inline void ath_debug_airtime(struct ath_softc *sc,
+                             struct ath_node *an,
+                             u32 rx, u32 tx)
+{
+}
 #endif /* CONFIG_ATH9K_STATION_STATISTICS */
 
 #endif /* DEBUG_H */
index 2a3a3c4671bcdba5659a5d99937df68a658605e9..524cbf13ca9c25d4d6fbc80d50f601d67b2c3f97 100644 (file)
@@ -242,6 +242,59 @@ static const struct file_operations fops_node_recv = {
        .llseek = default_llseek,
 };
 
+void ath_debug_airtime(struct ath_softc *sc,
+               struct ath_node *an,
+               u32 rx,
+               u32 tx)
+{
+       struct ath_airtime_stats *astats = &an->airtime_stats;
+
+       astats->rx_airtime += rx;
+       astats->tx_airtime += tx;
+}
+
+static ssize_t read_airtime(struct file *file, char __user *user_buf,
+                       size_t count, loff_t *ppos)
+{
+       struct ath_node *an = file->private_data;
+       struct ath_airtime_stats *astats;
+       static const char *qname[4] = {
+               "VO", "VI", "BE", "BK"
+       };
+       u32 len = 0, size = 256;
+       char *buf;
+       size_t retval;
+       int i;
+
+       buf = kzalloc(size, GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       astats = &an->airtime_stats;
+
+       len += scnprintf(buf + len, size - len, "RX: %u us\n", astats->rx_airtime);
+       len += scnprintf(buf + len, size - len, "TX: %u us\n", astats->tx_airtime);
+       len += scnprintf(buf + len, size - len, "Deficit: ");
+       for (i = 0; i < 4; i++)
+               len += scnprintf(buf+len, size - len, "%s: %lld us ", qname[i], an->airtime_deficit[i]);
+       if (len < size)
+               buf[len++] = '\n';
+
+       retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       kfree(buf);
+
+       return retval;
+}
+
+
+static const struct file_operations fops_airtime = {
+       .read = read_airtime,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+
 void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
                           struct ieee80211_vif *vif,
                           struct ieee80211_sta *sta,
@@ -251,4 +304,5 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
 
        debugfs_create_file("node_aggr", S_IRUGO, dir, an, &fops_node_aggr);
        debugfs_create_file("node_recv", S_IRUGO, dir, an, &fops_node_recv);
+       debugfs_create_file("airtime", S_IRUGO, dir, an, &fops_airtime);
 }
index a449588a8009eecf71a8bcc1a2959ef7c30c48de..fb80ec86e53d07690ea186148305a18bcc7fdc9e 100644 (file)
@@ -160,6 +160,7 @@ int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size)
        u16 magic;
        u16 *eepdata;
        int i;
+       bool needs_byteswap = false;
        struct ath_common *common = ath9k_hw_common(ah);
 
        if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
@@ -167,31 +168,40 @@ int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size)
                return -EIO;
        }
 
-       *swap_needed = false;
        if (swab16(magic) == AR5416_EEPROM_MAGIC) {
+               needs_byteswap = true;
+               ath_dbg(common, EEPROM,
+                       "EEPROM needs byte-swapping to correct endianness.\n");
+       } else if (magic != AR5416_EEPROM_MAGIC) {
+               if (ath9k_hw_use_flash(ah)) {
+                       ath_dbg(common, EEPROM,
+                               "Ignoring invalid EEPROM magic (0x%04x).\n",
+                               magic);
+               } else {
+                       ath_err(common,
+                               "Invalid EEPROM magic (0x%04x).\n", magic);
+                       return -EINVAL;
+               }
+       }
+
+       if (needs_byteswap) {
                if (ah->ah_flags & AH_NO_EEP_SWAP) {
                        ath_info(common,
                                 "Ignoring endianness difference in EEPROM magic bytes.\n");
                } else {
-                       *swap_needed = true;
-               }
-       } else if (magic != AR5416_EEPROM_MAGIC) {
-               if (ath9k_hw_use_flash(ah))
-                       return 0;
+                       eepdata = (u16 *)(&ah->eeprom);
 
-               ath_err(common,
-                       "Invalid EEPROM Magic (0x%04x).\n", magic);
-               return -EINVAL;
+                       for (i = 0; i < size; i++)
+                               eepdata[i] = swab16(eepdata[i]);
+               }
        }
 
-       eepdata = (u16 *)(&ah->eeprom);
-
-       if (*swap_needed) {
+       if (ah->eep_ops->get_eepmisc(ah) & AR5416_EEPMISC_BIG_ENDIAN) {
+               *swap_needed = true;
                ath_dbg(common, EEPROM,
-                       "EEPROM Endianness is not native.. Changing.\n");
-
-               for (i = 0; i < size; i++)
-                       eepdata[i] = swab16(eepdata[i]);
+                       "Big Endian EEPROM detected according to EEPMISC register.\n");
+       } else {
+               *swap_needed = false;
        }
 
        return 0;
index 4465c6566f2053d78f095ce01b7c70c984d8c768..30bf722e33ede0f1a778e45d3a01875e66ff1dcb 100644 (file)
 #include <net/cfg80211.h>
 #include "ar9003_eeprom.h"
 
+/* helpers to swap EEPROM fields, which are stored as __le16 or __le32. Since
+ * we are 100% sure about it we __force these to u16/u32 for the swab calls to
+ * silence the sparse checks. These macros are used when we have a Big Endian
+ * EEPROM (according to AR5416_EEPMISC_BIG_ENDIAN) and need to convert the
+ * fields to __le16/__le32.
+ */
+#define EEPROM_FIELD_SWAB16(field) \
+       (field = (__force __le16)swab16((__force u16)field))
+#define EEPROM_FIELD_SWAB32(field) \
+       (field = (__force __le32)swab32((__force u32)field))
+
 #ifdef __BIG_ENDIAN
 #define AR5416_EEPROM_MAGIC 0x5aa5
 #else
 #define FBIN2FREQ(x, y)                ((y) ? (2300 + x) : (4800 + 5 * x))
 #define ath9k_hw_use_flash(_ah)        (!(_ah->ah_flags & AH_USE_EEPROM))
 
-#define AR5416_VER_MASK (eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK)
 #define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \
                                 ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
 #define OLC_FOR_AR9287_10_LATER (AR_SREV_9287_11_OR_LATER(ah) && \
 
 #define AR5416_EEP_NO_BACK_VER       0x1
 #define AR5416_EEP_VER               0xE
+#define AR5416_EEP_VER_MAJOR_SHIFT   12
+#define AR5416_EEP_VER_MAJOR_MASK    0xF000
 #define AR5416_EEP_VER_MINOR_MASK    0x0FFF
 #define AR5416_EEP_MINOR_VER_2       0x2
 #define AR5416_EEP_MINOR_VER_3       0x3
 #define AR5416_EEP_TXGAIN_ORIGINAL         0
 #define AR5416_EEP_TXGAIN_HIGH_POWER       1
 
+/* Endianness of EEPROM content */
+#define AR5416_EEPMISC_BIG_ENDIAN          0x01
+
 #define AR5416_EEP4K_START_LOC                64
 #define AR5416_EEP4K_NUM_2G_CAL_PIERS         3
 #define AR5416_EEP4K_NUM_2G_CCK_TARGET_POWERS 3
 #define AR9280_TX_GAIN_TABLE_SIZE 22
 
 #define AR9287_EEP_VER               0xE
-#define AR9287_EEP_VER_MINOR_MASK    0xFFF
 #define AR9287_EEP_MINOR_VER_1       0x1
 #define AR9287_EEP_MINOR_VER_2       0x2
 #define AR9287_EEP_MINOR_VER_3       0x3
 #define AR9287_NUM_CTLS                12
 #define AR9287_NUM_BAND_EDGES          4
 #define AR9287_PD_GAIN_ICEPTS           1
-#define AR9287_EEPMISC_BIG_ENDIAN       0x01
 #define AR9287_EEPMISC_WOW              0x02
 #define AR9287_MAX_CHAINS               2
 #define AR9287_ANT_16S                  32
@@ -228,7 +241,6 @@ enum eeprom_param {
        EEP_DB_5,
        EEP_OB_2,
        EEP_DB_2,
-       EEP_MINOR_REV,
        EEP_TX_MASK,
        EEP_RX_MASK,
        EEP_FSTCLK_5G,
@@ -269,19 +281,19 @@ enum ath9k_hal_freq_band {
 };
 
 struct base_eep_header {
-       u16 length;
-       u16 checksum;
-       u16 version;
+       __le16 length;
+       __le16 checksum;
+       __le16 version;
        u8 opCapFlags;
        u8 eepMisc;
-       u16 regDmn[2];
+       __le16 regDmn[2];
        u8 macAddr[6];
        u8 rxMask;
        u8 txMask;
-       u16 rfSilent;
-       u16 blueToothOptions;
-       u16 deviceCap;
-       u32 binBuildNumber;
+       __le16 rfSilent;
+       __le16 blueToothOptions;
+       __le16 deviceCap;
+       __le32 binBuildNumber;
        u8 deviceType;
        u8 pwdclkind;
        u8 fastClk5g;
@@ -299,33 +311,33 @@ struct base_eep_header {
 } __packed;
 
 struct base_eep_header_4k {
-       u16 length;
-       u16 checksum;
-       u16 version;
+       __le16 length;
+       __le16 checksum;
+       __le16 version;
        u8 opCapFlags;
        u8 eepMisc;
-       u16 regDmn[2];
+       __le16 regDmn[2];
        u8 macAddr[6];
        u8 rxMask;
        u8 txMask;
-       u16 rfSilent;
-       u16 blueToothOptions;
-       u16 deviceCap;
-       u32 binBuildNumber;
+       __le16 rfSilent;
+       __le16 blueToothOptions;
+       __le16 deviceCap;
+       __le32 binBuildNumber;
        u8 deviceType;
        u8 txGainType;
 } __packed;
 
 
 struct spur_chan {
-       u16 spurChan;
+       __le16 spurChan;
        u8 spurRangeLow;
        u8 spurRangeHigh;
 } __packed;
 
 struct modal_eep_header {
-       u32 antCtrlChain[AR5416_MAX_CHAINS];
-       u32 antCtrlCommon;
+       __le32 antCtrlChain[AR5416_MAX_CHAINS];
+       __le32 antCtrlCommon;
        u8 antennaGainCh[AR5416_MAX_CHAINS];
        u8 switchSettling;
        u8 txRxAttenCh[AR5416_MAX_CHAINS];
@@ -360,7 +372,7 @@ struct modal_eep_header {
        u8 db_ch1;
        u8 lna_ctl;
        u8 miscBits;
-       u16 xpaBiasLvlFreq[3];
+       __le16 xpaBiasLvlFreq[3];
        u8 futureModal[6];
 
        struct spur_chan spurChans[AR_EEPROM_MODAL_SPURS];
@@ -374,8 +386,8 @@ struct calDataPerFreqOpLoop {
 } __packed;
 
 struct modal_eep_4k_header {
-       u32 antCtrlChain[AR5416_EEP4K_MAX_CHAINS];
-       u32 antCtrlCommon;
+       __le32 antCtrlChain[AR5416_EEP4K_MAX_CHAINS];
+       __le32 antCtrlCommon;
        u8 antennaGainCh[AR5416_EEP4K_MAX_CHAINS];
        u8 switchSettling;
        u8 txRxAttenCh[AR5416_EEP4K_MAX_CHAINS];
@@ -439,19 +451,19 @@ struct modal_eep_4k_header {
 } __packed;
 
 struct base_eep_ar9287_header {
-       u16 length;
-       u16 checksum;
-       u16 version;
+       __le16 length;
+       __le16 checksum;
+       __le16 version;
        u8 opCapFlags;
        u8 eepMisc;
-       u16 regDmn[2];
+       __le16 regDmn[2];
        u8 macAddr[6];
        u8 rxMask;
        u8 txMask;
-       u16 rfSilent;
-       u16 blueToothOptions;
-       u16 deviceCap;
-       u32 binBuildNumber;
+       __le16 rfSilent;
+       __le16 blueToothOptions;
+       __le16 deviceCap;
+       __le32 binBuildNumber;
        u8 deviceType;
        u8 openLoopPwrCntl;
        int8_t pwrTableOffset;
@@ -461,8 +473,8 @@ struct base_eep_ar9287_header {
 } __packed;
 
 struct modal_eep_ar9287_header {
-       u32 antCtrlChain[AR9287_MAX_CHAINS];
-       u32 antCtrlCommon;
+       __le32 antCtrlChain[AR9287_MAX_CHAINS];
+       __le32 antCtrlCommon;
        int8_t antennaGainCh[AR9287_MAX_CHAINS];
        u8 switchSettling;
        u8 txRxAttenCh[AR9287_MAX_CHAINS];
@@ -653,6 +665,7 @@ struct eeprom_ops {
                           u16 cfgCtl, u8 twiceAntennaReduction,
                           u8 powerLimit, bool test);
        u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz);
+       u8 (*get_eepmisc)(struct ath_hw *ah);
 };
 
 void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val);
index 5da0826bf1be031472cb8c9a19743c7d2fe21154..b8c0a08066a010c22e4031f0254dde0f66749824 100644 (file)
 
 static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah)
 {
-       return ((ah->eeprom.map4k.baseEepHeader.version >> 12) & 0xF);
+       u16 version = le16_to_cpu(ah->eeprom.map4k.baseEepHeader.version);
+
+       return (version & AR5416_EEP_VER_MAJOR_MASK) >>
+               AR5416_EEP_VER_MAJOR_SHIFT;
 }
 
 static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
 {
-       return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF);
+       u16 version = le16_to_cpu(ah->eeprom.map4k.baseEepHeader.version);
+
+       return version & AR5416_EEP_VER_MINOR_MASK;
 }
 
 #define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
@@ -67,12 +72,12 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
                return __ath9k_hw_4k_fill_eeprom(ah);
 }
 
-#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS)
+#ifdef CONFIG_ATH9K_COMMON_DEBUG
 static u32 ath9k_dump_4k_modal_eeprom(char *buf, u32 len, u32 size,
                                      struct modal_eep_4k_header *modal_hdr)
 {
-       PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]);
-       PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon);
+       PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0]));
+       PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon));
        PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
        PR_EEP("Switch Settle", modal_hdr->switchSettling);
        PR_EEP("Chain0 TxRxAtten", modal_hdr->txRxAttenCh[0]);
@@ -127,6 +132,7 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
 {
        struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
        struct base_eep_header_4k *pBase = &eep->baseEepHeader;
+       u32 binBuildNumber = le32_to_cpu(pBase->binBuildNumber);
 
        if (!dump_base_hdr) {
                len += scnprintf(buf + len, size - len,
@@ -136,12 +142,12 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
                goto out;
        }
 
-       PR_EEP("Major Version", pBase->version >> 12);
-       PR_EEP("Minor Version", pBase->version & 0xFFF);
-       PR_EEP("Checksum", pBase->checksum);
-       PR_EEP("Length", pBase->length);
-       PR_EEP("RegDomain1", pBase->regDmn[0]);
-       PR_EEP("RegDomain2", pBase->regDmn[1]);
+       PR_EEP("Major Version", ath9k_hw_4k_get_eeprom_ver(ah));
+       PR_EEP("Minor Version", ath9k_hw_4k_get_eeprom_rev(ah));
+       PR_EEP("Checksum", le16_to_cpu(pBase->checksum));
+       PR_EEP("Length", le16_to_cpu(pBase->length));
+       PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0]));
+       PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1]));
        PR_EEP("TX Mask", pBase->txMask);
        PR_EEP("RX Mask", pBase->rxMask);
        PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
@@ -154,10 +160,10 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
                                        AR5416_OPFLAGS_N_5G_HT20));
        PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags &
                                        AR5416_OPFLAGS_N_5G_HT40));
-       PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01));
-       PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF);
-       PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF);
-       PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
+       PR_EEP("Big Endian", !!(pBase->eepMisc & AR5416_EEPMISC_BIG_ENDIAN));
+       PR_EEP("Cal Bin Major Ver", (binBuildNumber >> 24) & 0xFF);
+       PR_EEP("Cal Bin Minor Ver", (binBuildNumber >> 16) & 0xFF);
+       PR_EEP("Cal Bin Build", (binBuildNumber >> 8) & 0xFF);
        PR_EEP("TX Gain type", pBase->txGainType);
 
        len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
@@ -189,54 +195,31 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
                return err;
 
        if (need_swap)
-               el = swab16(eep->baseEepHeader.length);
+               el = swab16((__force u16)eep->baseEepHeader.length);
        else
-               el = eep->baseEepHeader.length;
+               el = le16_to_cpu(eep->baseEepHeader.length);
 
        el = min(el / sizeof(u16), SIZE_EEPROM_4K);
        if (!ath9k_hw_nvram_validate_checksum(ah, el))
                return -EINVAL;
 
        if (need_swap) {
-               u32 integer;
-               u16 word;
-
-               word = swab16(eep->baseEepHeader.length);
-               eep->baseEepHeader.length = word;
-
-               word = swab16(eep->baseEepHeader.checksum);
-               eep->baseEepHeader.checksum = word;
-
-               word = swab16(eep->baseEepHeader.version);
-               eep->baseEepHeader.version = word;
-
-               word = swab16(eep->baseEepHeader.regDmn[0]);
-               eep->baseEepHeader.regDmn[0] = word;
-
-               word = swab16(eep->baseEepHeader.regDmn[1]);
-               eep->baseEepHeader.regDmn[1] = word;
-
-               word = swab16(eep->baseEepHeader.rfSilent);
-               eep->baseEepHeader.rfSilent = word;
-
-               word = swab16(eep->baseEepHeader.blueToothOptions);
-               eep->baseEepHeader.blueToothOptions = word;
-
-               word = swab16(eep->baseEepHeader.deviceCap);
-               eep->baseEepHeader.deviceCap = word;
-
-               integer = swab32(eep->modalHeader.antCtrlCommon);
-               eep->modalHeader.antCtrlCommon = integer;
-
-               for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) {
-                       integer = swab32(eep->modalHeader.antCtrlChain[i]);
-                       eep->modalHeader.antCtrlChain[i] = integer;
-               }
-
-               for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
-                       word = swab16(eep->modalHeader.spurChans[i].spurChan);
-                       eep->modalHeader.spurChans[i].spurChan = word;
-               }
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.length);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.checksum);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.version);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[0]);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[1]);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.rfSilent);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.blueToothOptions);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.deviceCap);
+               EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlCommon);
+
+               for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++)
+                       EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlChain[i]);
+
+               for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++)
+                       EEPROM_FIELD_SWAB16(
+                               eep->modalHeader.spurChans[i].spurChan);
        }
 
        if (!ath9k_hw_nvram_check_version(ah, AR5416_EEP_VER,
@@ -254,9 +237,6 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
        struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
        struct modal_eep_4k_header *pModal = &eep->modalHeader;
        struct base_eep_header_4k *pBase = &eep->baseEepHeader;
-       u16 ver_minor;
-
-       ver_minor = pBase->version & AR5416_EEP_VER_MINOR_MASK;
 
        switch (param) {
        case EEP_NFTHRESH_2:
@@ -268,19 +248,17 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
        case EEP_MAC_MSW:
                return get_unaligned_be16(pBase->macAddr + 4);
        case EEP_REG_0:
-               return pBase->regDmn[0];
+               return le16_to_cpu(pBase->regDmn[0]);
        case EEP_OP_CAP:
-               return pBase->deviceCap;
+               return le16_to_cpu(pBase->deviceCap);
        case EEP_OP_MODE:
                return pBase->opCapFlags;
        case EEP_RF_SILENT:
-               return pBase->rfSilent;
+               return le16_to_cpu(pBase->rfSilent);
        case EEP_OB_2:
                return pModal->ob_0;
        case EEP_DB_2:
                return pModal->db1_1;
-       case EEP_MINOR_REV:
-               return ver_minor;
        case EEP_TX_MASK:
                return pBase->txMask;
        case EEP_RX_MASK:
@@ -319,14 +297,12 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
 
        xpdMask = pEepData->modalHeader.xpdGain;
 
-       if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
-           AR5416_EEP_MINOR_VER_2) {
+       if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2)
                pdGainOverlap_t2 =
                        pEepData->modalHeader.pdGainOverlap;
-       } else {
+       else
                pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5),
                                            AR_PHY_TPCRG5_PD_GAIN_OVERLAP));
-       }
 
        pCalBChans = pEepData->calFreqPier2G;
        numPiers = AR5416_EEP4K_NUM_2G_CAL_PIERS;
@@ -612,10 +588,8 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
 
        memset(ratesArray, 0, sizeof(ratesArray));
 
-       if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
-           AR5416_EEP_MINOR_VER_2) {
+       if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2)
                ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
-       }
 
        ath9k_hw_set_4k_power_per_rate_table(ah, chan,
                                             &ratesArray[0], cfgCtl,
@@ -728,15 +702,14 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
 {
        ENABLE_REG_RMW_BUFFER(ah);
        REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0,
-               pModal->antCtrlChain[0], 0);
+               le32_to_cpu(pModal->antCtrlChain[0]), 0);
 
        REG_RMW(ah, AR_PHY_TIMING_CTRL4(0),
                SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) |
                SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF),
                AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF);
 
-       if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
-           AR5416_EEP_MINOR_VER_3) {
+       if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) {
                txRxAttenLocal = pModal->txRxAttenCh[0];
 
                REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ,
@@ -795,7 +768,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
        pModal = &eep->modalHeader;
        txRxAttenLocal = 23;
 
-       REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon);
+       REG_WRITE(ah, AR_PHY_SWITCH_COM, le32_to_cpu(pModal->antCtrlCommon));
 
        /* Single chain for 4K EEPROM*/
        ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal);
@@ -1014,16 +987,14 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
        REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62,
                      pModal->thresh62);
 
-       if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
-                                               AR5416_EEP_MINOR_VER_2) {
+       if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) {
                REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START,
                              pModal->txFrameToDataStart);
                REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON,
                              pModal->txFrameToPaOn);
        }
 
-       if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
-                                               AR5416_EEP_MINOR_VER_3) {
+       if (ath9k_hw_4k_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) {
                if (IS_CHAN_HT40(chan))
                        REG_RMW_FIELD(ah, AR_PHY_SETTLING,
                                      AR_PHY_SETTLING_SWITCH,
@@ -1061,7 +1032,12 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
 
 static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
 {
-       return ah->eeprom.map4k.modalHeader.spurChans[i].spurChan;
+       return le16_to_cpu(ah->eeprom.map4k.modalHeader.spurChans[i].spurChan);
+}
+
+static u8 ath9k_hw_4k_get_eepmisc(struct ath_hw *ah)
+{
+       return ah->eeprom.map4k.baseEepHeader.eepMisc;
 }
 
 const struct eeprom_ops eep_4k_ops = {
@@ -1073,5 +1049,6 @@ const struct eeprom_ops eep_4k_ops = {
        .get_eeprom_rev         = ath9k_hw_4k_get_eeprom_rev,
        .set_board_values       = ath9k_hw_4k_set_board_values,
        .set_txpower            = ath9k_hw_4k_set_txpower,
-       .get_spur_channel       = ath9k_hw_4k_get_spur_channel
+       .get_spur_channel       = ath9k_hw_4k_get_spur_channel,
+       .get_eepmisc            = ath9k_hw_4k_get_eepmisc
 };
index 1a019a39eda1048fdf07e969338de768616a97b9..3caa149b10131eca4b444306eff31688a525fa83 100644 (file)
 
 static int ath9k_hw_ar9287_get_eeprom_ver(struct ath_hw *ah)
 {
-       return (ah->eeprom.map9287.baseEepHeader.version >> 12) & 0xF;
+       u16 version = le16_to_cpu(ah->eeprom.map9287.baseEepHeader.version);
+
+       return (version & AR5416_EEP_VER_MAJOR_MASK) >>
+               AR5416_EEP_VER_MAJOR_SHIFT;
 }
 
 static int ath9k_hw_ar9287_get_eeprom_rev(struct ath_hw *ah)
 {
-       return (ah->eeprom.map9287.baseEepHeader.version) & 0xFFF;
+       u16 version = le16_to_cpu(ah->eeprom.map9287.baseEepHeader.version);
+
+       return version & AR5416_EEP_VER_MINOR_MASK;
 }
 
 static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
@@ -70,13 +75,13 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
                return __ath9k_hw_ar9287_fill_eeprom(ah);
 }
 
-#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS)
+#ifdef CONFIG_ATH9K_COMMON_DEBUG
 static u32 ar9287_dump_modal_eeprom(char *buf, u32 len, u32 size,
                                    struct modal_eep_ar9287_header *modal_hdr)
 {
-       PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]);
-       PR_EEP("Chain1 Ant. Control", modal_hdr->antCtrlChain[1]);
-       PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon);
+       PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0]));
+       PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1]));
+       PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon));
        PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
        PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]);
        PR_EEP("Switch Settle", modal_hdr->switchSettling);
@@ -123,6 +128,7 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
 {
        struct ar9287_eeprom *eep = &ah->eeprom.map9287;
        struct base_eep_ar9287_header *pBase = &eep->baseEepHeader;
+       u32 binBuildNumber = le32_to_cpu(pBase->binBuildNumber);
 
        if (!dump_base_hdr) {
                len += scnprintf(buf + len, size - len,
@@ -132,12 +138,12 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
                goto out;
        }
 
-       PR_EEP("Major Version", pBase->version >> 12);
-       PR_EEP("Minor Version", pBase->version & 0xFFF);
-       PR_EEP("Checksum", pBase->checksum);
-       PR_EEP("Length", pBase->length);
-       PR_EEP("RegDomain1", pBase->regDmn[0]);
-       PR_EEP("RegDomain2", pBase->regDmn[1]);
+       PR_EEP("Major Version", ath9k_hw_ar9287_get_eeprom_ver(ah));
+       PR_EEP("Minor Version", ath9k_hw_ar9287_get_eeprom_rev(ah));
+       PR_EEP("Checksum", le16_to_cpu(pBase->checksum));
+       PR_EEP("Length", le16_to_cpu(pBase->length));
+       PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0]));
+       PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1]));
        PR_EEP("TX Mask", pBase->txMask);
        PR_EEP("RX Mask", pBase->rxMask);
        PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
@@ -150,10 +156,10 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
                                        AR5416_OPFLAGS_N_5G_HT20));
        PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags &
                                        AR5416_OPFLAGS_N_5G_HT40));
-       PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01));
-       PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF);
-       PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF);
-       PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
+       PR_EEP("Big Endian", !!(pBase->eepMisc & AR5416_EEPMISC_BIG_ENDIAN));
+       PR_EEP("Cal Bin Major Ver", (binBuildNumber >> 24) & 0xFF);
+       PR_EEP("Cal Bin Minor Ver", (binBuildNumber >> 16) & 0xFF);
+       PR_EEP("Cal Bin Build", (binBuildNumber >> 8) & 0xFF);
        PR_EEP("Power Table Offset", pBase->pwrTableOffset);
        PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
 
@@ -177,8 +183,7 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
 
 static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
 {
-       u32 el, integer;
-       u16 word;
+       u32 el;
        int i, err;
        bool need_swap;
        struct ar9287_eeprom *eep = &ah->eeprom.map9287;
@@ -188,51 +193,31 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
                return err;
 
        if (need_swap)
-               el = swab16(eep->baseEepHeader.length);
+               el = swab16((__force u16)eep->baseEepHeader.length);
        else
-               el = eep->baseEepHeader.length;
+               el = le16_to_cpu(eep->baseEepHeader.length);
 
        el = min(el / sizeof(u16), SIZE_EEPROM_AR9287);
        if (!ath9k_hw_nvram_validate_checksum(ah, el))
                return -EINVAL;
 
        if (need_swap) {
-               word = swab16(eep->baseEepHeader.length);
-               eep->baseEepHeader.length = word;
-
-               word = swab16(eep->baseEepHeader.checksum);
-               eep->baseEepHeader.checksum = word;
-
-               word = swab16(eep->baseEepHeader.version);
-               eep->baseEepHeader.version = word;
-
-               word = swab16(eep->baseEepHeader.regDmn[0]);
-               eep->baseEepHeader.regDmn[0] = word;
-
-               word = swab16(eep->baseEepHeader.regDmn[1]);
-               eep->baseEepHeader.regDmn[1] = word;
-
-               word = swab16(eep->baseEepHeader.rfSilent);
-               eep->baseEepHeader.rfSilent = word;
-
-               word = swab16(eep->baseEepHeader.blueToothOptions);
-               eep->baseEepHeader.blueToothOptions = word;
-
-               word = swab16(eep->baseEepHeader.deviceCap);
-               eep->baseEepHeader.deviceCap = word;
-
-               integer = swab32(eep->modalHeader.antCtrlCommon);
-               eep->modalHeader.antCtrlCommon = integer;
-
-               for (i = 0; i < AR9287_MAX_CHAINS; i++) {
-                       integer = swab32(eep->modalHeader.antCtrlChain[i]);
-                       eep->modalHeader.antCtrlChain[i] = integer;
-               }
-
-               for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
-                       word = swab16(eep->modalHeader.spurChans[i].spurChan);
-                       eep->modalHeader.spurChans[i].spurChan = word;
-               }
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.length);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.checksum);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.version);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[0]);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[1]);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.rfSilent);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.blueToothOptions);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.deviceCap);
+               EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlCommon);
+
+               for (i = 0; i < AR9287_MAX_CHAINS; i++)
+                       EEPROM_FIELD_SWAB32(eep->modalHeader.antCtrlChain[i]);
+
+               for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++)
+                       EEPROM_FIELD_SWAB16(
+                               eep->modalHeader.spurChans[i].spurChan);
        }
 
        if (!ath9k_hw_nvram_check_version(ah, AR9287_EEP_VER,
@@ -250,9 +235,7 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah,
        struct ar9287_eeprom *eep = &ah->eeprom.map9287;
        struct modal_eep_ar9287_header *pModal = &eep->modalHeader;
        struct base_eep_ar9287_header *pBase = &eep->baseEepHeader;
-       u16 ver_minor;
-
-       ver_minor = pBase->version & AR9287_EEP_VER_MINOR_MASK;
+       u16 ver_minor = ath9k_hw_ar9287_get_eeprom_rev(ah);
 
        switch (param) {
        case EEP_NFTHRESH_2:
@@ -264,15 +247,13 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah,
        case EEP_MAC_MSW:
                return get_unaligned_be16(pBase->macAddr + 4);
        case EEP_REG_0:
-               return pBase->regDmn[0];
+               return le16_to_cpu(pBase->regDmn[0]);
        case EEP_OP_CAP:
-               return pBase->deviceCap;
+               return le16_to_cpu(pBase->deviceCap);
        case EEP_OP_MODE:
                return pBase->opCapFlags;
        case EEP_RF_SILENT:
-               return pBase->rfSilent;
-       case EEP_MINOR_REV:
-               return ver_minor;
+               return le16_to_cpu(pBase->rfSilent);
        case EEP_TX_MASK:
                return pBase->txMask;
        case EEP_RX_MASK:
@@ -387,8 +368,7 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
 
        xpdMask = pEepData->modalHeader.xpdGain;
 
-       if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >=
-           AR9287_EEP_MINOR_VER_2)
+       if (ath9k_hw_ar9287_get_eeprom_rev(ah) >= AR9287_EEP_MINOR_VER_2)
                pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap;
        else
                pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5),
@@ -737,8 +717,7 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
 
        memset(ratesArray, 0, sizeof(ratesArray));
 
-       if ((pEepData->baseEepHeader.version & AR9287_EEP_VER_MINOR_MASK) >=
-           AR9287_EEP_MINOR_VER_2)
+       if (ath9k_hw_ar9287_get_eeprom_rev(ah) >= AR9287_EEP_MINOR_VER_2)
                ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
 
        ath9k_hw_set_ar9287_power_per_rate_table(ah, chan,
@@ -879,13 +858,13 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
 
        pModal = &eep->modalHeader;
 
-       REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon);
+       REG_WRITE(ah, AR_PHY_SWITCH_COM, le32_to_cpu(pModal->antCtrlCommon));
 
        for (i = 0; i < AR9287_MAX_CHAINS; i++) {
                regChainOffset = i * 0x1000;
 
                REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
-                         pModal->antCtrlChain[i]);
+                         le32_to_cpu(pModal->antCtrlChain[i]));
 
                REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
                          (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset)
@@ -983,7 +962,14 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
 static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
                                            u16 i, bool is2GHz)
 {
-       return ah->eeprom.map9287.modalHeader.spurChans[i].spurChan;
+       __le16 spur_ch = ah->eeprom.map9287.modalHeader.spurChans[i].spurChan;
+
+       return le16_to_cpu(spur_ch);
+}
+
+static u8 ath9k_hw_ar9287_get_eepmisc(struct ath_hw *ah)
+{
+       return ah->eeprom.map9287.baseEepHeader.eepMisc;
 }
 
 const struct eeprom_ops eep_ar9287_ops = {
@@ -995,5 +981,6 @@ const struct eeprom_ops eep_ar9287_ops = {
        .get_eeprom_rev         = ath9k_hw_ar9287_get_eeprom_rev,
        .set_board_values       = ath9k_hw_ar9287_set_board_values,
        .set_txpower            = ath9k_hw_ar9287_set_txpower,
-       .get_spur_channel       = ath9k_hw_ar9287_get_spur_channel
+       .get_spur_channel       = ath9k_hw_ar9287_get_spur_channel,
+       .get_eepmisc            = ath9k_hw_ar9287_get_eepmisc
 };
index 959682f7909c0b52324ec9cde6063883a11b02af..56b44fc7a8e6a2a5c9cf0808b776b79157214844 100644 (file)
@@ -79,12 +79,17 @@ static void ath9k_olc_get_pdadcs(struct ath_hw *ah,
 
 static int ath9k_hw_def_get_eeprom_ver(struct ath_hw *ah)
 {
-       return ((ah->eeprom.def.baseEepHeader.version >> 12) & 0xF);
+       u16 version = le16_to_cpu(ah->eeprom.def.baseEepHeader.version);
+
+       return (version & AR5416_EEP_VER_MAJOR_MASK) >>
+               AR5416_EEP_VER_MAJOR_SHIFT;
 }
 
 static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
 {
-       return ((ah->eeprom.def.baseEepHeader.version) & 0xFFF);
+       u16 version = le16_to_cpu(ah->eeprom.def.baseEepHeader.version);
+
+       return version & AR5416_EEP_VER_MINOR_MASK;
 }
 
 #define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
@@ -126,14 +131,14 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
                return __ath9k_hw_def_fill_eeprom(ah);
 }
 
-#if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS)
+#ifdef CONFIG_ATH9K_COMMON_DEBUG
 static u32 ath9k_def_dump_modal_eeprom(char *buf, u32 len, u32 size,
                                       struct modal_eep_header *modal_hdr)
 {
-       PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]);
-       PR_EEP("Chain1 Ant. Control", modal_hdr->antCtrlChain[1]);
-       PR_EEP("Chain2 Ant. Control", modal_hdr->antCtrlChain[2]);
-       PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon);
+       PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0]));
+       PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1]));
+       PR_EEP("Chain2 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[2]));
+       PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon));
        PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
        PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]);
        PR_EEP("Chain2 Ant. Gain", modal_hdr->antennaGainCh[2]);
@@ -189,9 +194,9 @@ static u32 ath9k_def_dump_modal_eeprom(char *buf, u32 len, u32 size,
        PR_EEP("Chain1 OutputBias", modal_hdr->ob_ch1);
        PR_EEP("Chain1 DriverBias", modal_hdr->db_ch1);
        PR_EEP("LNA Control", modal_hdr->lna_ctl);
-       PR_EEP("XPA Bias Freq0", modal_hdr->xpaBiasLvlFreq[0]);
-       PR_EEP("XPA Bias Freq1", modal_hdr->xpaBiasLvlFreq[1]);
-       PR_EEP("XPA Bias Freq2", modal_hdr->xpaBiasLvlFreq[2]);
+       PR_EEP("XPA Bias Freq0", le16_to_cpu(modal_hdr->xpaBiasLvlFreq[0]));
+       PR_EEP("XPA Bias Freq1", le16_to_cpu(modal_hdr->xpaBiasLvlFreq[1]));
+       PR_EEP("XPA Bias Freq2", le16_to_cpu(modal_hdr->xpaBiasLvlFreq[2]));
 
        return len;
 }
@@ -201,6 +206,7 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
 {
        struct ar5416_eeprom_def *eep = &ah->eeprom.def;
        struct base_eep_header *pBase = &eep->baseEepHeader;
+       u32 binBuildNumber = le32_to_cpu(pBase->binBuildNumber);
 
        if (!dump_base_hdr) {
                len += scnprintf(buf + len, size - len,
@@ -214,12 +220,12 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
                goto out;
        }
 
-       PR_EEP("Major Version", pBase->version >> 12);
-       PR_EEP("Minor Version", pBase->version & 0xFFF);
-       PR_EEP("Checksum", pBase->checksum);
-       PR_EEP("Length", pBase->length);
-       PR_EEP("RegDomain1", pBase->regDmn[0]);
-       PR_EEP("RegDomain2", pBase->regDmn[1]);
+       PR_EEP("Major Version", ath9k_hw_def_get_eeprom_ver(ah));
+       PR_EEP("Minor Version", ath9k_hw_def_get_eeprom_rev(ah));
+       PR_EEP("Checksum", le16_to_cpu(pBase->checksum));
+       PR_EEP("Length", le16_to_cpu(pBase->length));
+       PR_EEP("RegDomain1", le16_to_cpu(pBase->regDmn[0]));
+       PR_EEP("RegDomain2", le16_to_cpu(pBase->regDmn[1]));
        PR_EEP("TX Mask", pBase->txMask);
        PR_EEP("RX Mask", pBase->rxMask);
        PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A));
@@ -232,10 +238,10 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
                                        AR5416_OPFLAGS_N_5G_HT20));
        PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags &
                                        AR5416_OPFLAGS_N_5G_HT40));
-       PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01));
-       PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF);
-       PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF);
-       PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF);
+       PR_EEP("Big Endian", !!(pBase->eepMisc & AR5416_EEPMISC_BIG_ENDIAN));
+       PR_EEP("Cal Bin Major Ver", (binBuildNumber >> 24) & 0xFF);
+       PR_EEP("Cal Bin Minor Ver", (binBuildNumber >> 16) & 0xFF);
+       PR_EEP("Cal Bin Build", (binBuildNumber >> 8) & 0xFF);
        PR_EEP("OpenLoop Power Ctrl", pBase->openLoopPwrCntl);
 
        len += scnprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
@@ -268,61 +274,40 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
                return err;
 
        if (need_swap)
-               el = swab16(eep->baseEepHeader.length);
+               el = swab16((__force u16)eep->baseEepHeader.length);
        else
-               el = eep->baseEepHeader.length;
+               el = le16_to_cpu(eep->baseEepHeader.length);
 
        el = min(el / sizeof(u16), SIZE_EEPROM_DEF);
        if (!ath9k_hw_nvram_validate_checksum(ah, el))
                return -EINVAL;
 
        if (need_swap) {
-               u32 integer, j;
-               u16 word;
-
-               word = swab16(eep->baseEepHeader.length);
-               eep->baseEepHeader.length = word;
-
-               word = swab16(eep->baseEepHeader.checksum);
-               eep->baseEepHeader.checksum = word;
-
-               word = swab16(eep->baseEepHeader.version);
-               eep->baseEepHeader.version = word;
-
-               word = swab16(eep->baseEepHeader.regDmn[0]);
-               eep->baseEepHeader.regDmn[0] = word;
-
-               word = swab16(eep->baseEepHeader.regDmn[1]);
-               eep->baseEepHeader.regDmn[1] = word;
-
-               word = swab16(eep->baseEepHeader.rfSilent);
-               eep->baseEepHeader.rfSilent = word;
-
-               word = swab16(eep->baseEepHeader.blueToothOptions);
-               eep->baseEepHeader.blueToothOptions = word;
+               u32 j;
 
-               word = swab16(eep->baseEepHeader.deviceCap);
-               eep->baseEepHeader.deviceCap = word;
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.length);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.checksum);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.version);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[0]);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.regDmn[1]);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.rfSilent);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.blueToothOptions);
+               EEPROM_FIELD_SWAB16(eep->baseEepHeader.deviceCap);
 
                for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) {
                        struct modal_eep_header *pModal =
                                &eep->modalHeader[j];
-                       integer = swab32(pModal->antCtrlCommon);
-                       pModal->antCtrlCommon = integer;
+                       EEPROM_FIELD_SWAB32(pModal->antCtrlCommon);
 
-                       for (i = 0; i < AR5416_MAX_CHAINS; i++) {
-                               integer = swab32(pModal->antCtrlChain[i]);
-                               pModal->antCtrlChain[i] = integer;
-                       }
-                       for (i = 0; i < 3; i++) {
-                               word = swab16(pModal->xpaBiasLvlFreq[i]);
-                               pModal->xpaBiasLvlFreq[i] = word;
-                       }
+                       for (i = 0; i < AR5416_MAX_CHAINS; i++)
+                               EEPROM_FIELD_SWAB32(pModal->antCtrlChain[i]);
 
-                       for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
-                               word = swab16(pModal->spurChans[i].spurChan);
-                               pModal->spurChans[i].spurChan = word;
-                       }
+                       for (i = 0; i < 3; i++)
+                               EEPROM_FIELD_SWAB16(pModal->xpaBiasLvlFreq[i]);
+
+                       for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++)
+                               EEPROM_FIELD_SWAB16(
+                                       pModal->spurChans[i].spurChan);
                }
        }
 
@@ -332,7 +317,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
 
        /* Enable fixup for AR_AN_TOP2 if necessary */
        if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
-           ((eep->baseEepHeader.version & 0xff) > 0x0a) &&
+           ((le16_to_cpu(eep->baseEepHeader.version) & 0xff) > 0x0a) &&
            (eep->baseEepHeader.pwdclkind == 0))
                ah->need_an_top2_fixup = true;
 
@@ -365,13 +350,13 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
        case EEP_MAC_MSW:
                return get_unaligned_be16(pBase->macAddr + 4);
        case EEP_REG_0:
-               return pBase->regDmn[0];
+               return le16_to_cpu(pBase->regDmn[0]);
        case EEP_OP_CAP:
-               return pBase->deviceCap;
+               return le16_to_cpu(pBase->deviceCap);
        case EEP_OP_MODE:
                return pBase->opCapFlags;
        case EEP_RF_SILENT:
-               return pBase->rfSilent;
+               return le16_to_cpu(pBase->rfSilent);
        case EEP_OB_5:
                return pModal[0].ob;
        case EEP_DB_5:
@@ -380,8 +365,6 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
                return pModal[1].ob;
        case EEP_DB_2:
                return pModal[1].db;
-       case EEP_MINOR_REV:
-               return AR5416_VER_MASK;
        case EEP_TX_MASK:
                return pBase->txMask;
        case EEP_RX_MASK:
@@ -393,27 +376,27 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
        case EEP_TXGAIN_TYPE:
                return pBase->txGainType;
        case EEP_OL_PWRCTRL:
-               if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19)
+               if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19)
                        return pBase->openLoopPwrCntl ? true : false;
                else
                        return false;
        case EEP_RC_CHAIN_MASK:
-               if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19)
+               if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19)
                        return pBase->rcChainMask;
                else
                        return 0;
        case EEP_DAC_HPWR_5G:
-               if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20)
+               if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_20)
                        return pBase->dacHiPwrMode_5G;
                else
                        return 0;
        case EEP_FRAC_N_5G:
-               if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_22)
+               if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_22)
                        return pBase->frac_n_5g;
                else
                        return 0;
        case EEP_PWR_TABLE_OFFSET:
-               if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_21)
+               if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_21)
                        return pBase->pwr_table_offset;
                else
                        return AR5416_PWR_TABLE_OFFSET_DB;
@@ -436,7 +419,7 @@ static void ath9k_hw_def_set_gain(struct ath_hw *ah,
                                  u8 txRxAttenLocal, int regChainOffset, int i)
 {
        ENABLE_REG_RMW_BUFFER(ah);
-       if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
+       if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) {
                txRxAttenLocal = pModal->txRxAttenCh[i];
 
                if (AR_SREV_9280_20_OR_LATER(ah)) {
@@ -487,11 +470,13 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
        struct ar5416_eeprom_def *eep = &ah->eeprom.def;
        int i, regChainOffset;
        u8 txRxAttenLocal;
+       u32 antCtrlCommon;
 
        pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
        txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44;
+       antCtrlCommon = le32_to_cpu(pModal->antCtrlCommon);
 
-       REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon & 0xffff);
+       REG_WRITE(ah, AR_PHY_SWITCH_COM, antCtrlCommon & 0xffff);
 
        for (i = 0; i < AR5416_MAX_CHAINS; i++) {
                if (AR_SREV_9280(ah)) {
@@ -505,7 +490,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
                        regChainOffset = i * 0x1000;
 
                REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset,
-                         pModal->antCtrlChain[i]);
+                         le32_to_cpu(pModal->antCtrlChain[i]));
 
                REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset,
                          (REG_READ(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset) &
@@ -605,7 +590,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
                              pModal->thresh62);
        }
 
-       if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_2) {
+       if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) {
                REG_RMW_FIELD(ah, AR_PHY_RF_CTL2,
                              AR_PHY_TX_END_DATA_START,
                              pModal->txFrameToDataStart);
@@ -613,7 +598,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
                              pModal->txFrameToPaOn);
        }
 
-       if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) {
+       if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_3) {
                if (IS_CHAN_HT40(chan))
                        REG_RMW_FIELD(ah, AR_PHY_SETTLING,
                                      AR_PHY_SETTLING_SWITCH,
@@ -621,13 +606,14 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
        }
 
        if (AR_SREV_9280_20_OR_LATER(ah) &&
-           AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_19)
+           ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_19)
                REG_RMW_FIELD(ah, AR_PHY_CCK_TX_CTRL,
                              AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK,
                              pModal->miscBits);
 
 
-       if (AR_SREV_9280_20(ah) && AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_20) {
+       if (AR_SREV_9280_20(ah) &&
+           ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_20) {
                if (IS_CHAN_2GHZ(chan))
                        REG_RMW_FIELD(ah, AR_AN_TOP1, AR_AN_TOP1_DACIPMODE,
                                        eep->baseEepHeader.dacLpMode);
@@ -651,7 +637,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
 static void ath9k_hw_def_set_addac(struct ath_hw *ah,
                                   struct ath9k_channel *chan)
 {
-#define XPA_LVL_FREQ(cnt) (pModal->xpaBiasLvlFreq[cnt])
+#define XPA_LVL_FREQ(cnt) (le16_to_cpu(pModal->xpaBiasLvlFreq[cnt]))
        struct modal_eep_header *pModal;
        struct ar5416_eeprom_def *eep = &ah->eeprom.def;
        u8 biaslevel;
@@ -798,8 +784,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
 
        pwr_table_offset = ah->eep_ops->get_eeprom(ah, EEP_PWR_TABLE_OFFSET);
 
-       if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
-           AR5416_EEP_MINOR_VER_2) {
+       if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2) {
                pdGainOverlap_t2 =
                        pEepData->modalHeader[modalIdx].pdGainOverlap;
        } else {
@@ -1171,10 +1156,8 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
 
        memset(ratesArray, 0, sizeof(ratesArray));
 
-       if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
-           AR5416_EEP_MINOR_VER_2) {
+       if (ath9k_hw_def_get_eeprom_rev(ah) >= AR5416_EEP_MINOR_VER_2)
                ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc;
-       }
 
        ath9k_hw_set_def_power_per_rate_table(ah, chan,
                                               &ratesArray[0], cfgCtl,
@@ -1314,7 +1297,14 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
 
 static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
 {
-       return ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan;
+       __le16 spch = ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan;
+
+       return le16_to_cpu(spch);
+}
+
+static u8 ath9k_hw_def_get_eepmisc(struct ath_hw *ah)
+{
+       return ah->eeprom.def.baseEepHeader.eepMisc;
 }
 
 const struct eeprom_ops eep_def_ops = {
@@ -1327,5 +1317,6 @@ const struct eeprom_ops eep_def_ops = {
        .set_board_values       = ath9k_hw_def_set_board_values,
        .set_addac              = ath9k_hw_def_set_addac,
        .set_txpower            = ath9k_hw_def_set_txpower,
-       .get_spur_channel       = ath9k_hw_def_get_spur_channel
+       .get_spur_channel       = ath9k_hw_def_get_spur_channel,
+       .get_eepmisc            = ath9k_hw_def_get_eepmisc
 };
index a35f78be8dec68d65f3fe7ff77a2f5edc244bd73..8c5c2dd8fa7f13c4d12ebb98fa354deff212e124 100644 (file)
@@ -731,7 +731,7 @@ u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
                udelay(100);
 
                if (WARN_ON_ONCE(i >= 100)) {
-                       ath_err(common, "PLL4 meaurement not done\n");
+                       ath_err(common, "PLL4 measurement not done\n");
                        break;
                }
 
@@ -1603,6 +1603,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
        int count = 50;
        u32 reg, last_val;
 
+       /* Check if chip failed to wake up */
+       if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
+               return false;
+
        if (AR_SREV_9300(ah))
                return !ath9k_hw_detect_mac_hang(ah);
 
index 20794660d6aec42cbb4a5dcc52944127512e40e7..fa4b3cc1ba22c8dcf3882dd51b738dd412ddea75 100644 (file)
@@ -620,6 +620,8 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
 
        /* Will be cleared in ath9k_start() */
        set_bit(ATH_OP_INVALID, &common->op_flags);
+       sc->airtime_flags = (AIRTIME_USE_TX | AIRTIME_USE_RX |
+                            AIRTIME_USE_NEW_QUEUES);
 
        sc->sc_ah = ah;
        sc->dfs_detector = dfs_pattern_detector_init(common, NL80211_DFS_UNSET);
@@ -667,6 +669,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
                common->bt_ant_diversity = 1;
 
        spin_lock_init(&common->cc_lock);
+       spin_lock_init(&sc->intr_lock);
        spin_lock_init(&sc->sc_serial_rw);
        spin_lock_init(&sc->sc_pm_lock);
        spin_lock_init(&sc->chan_lock);
@@ -679,6 +682,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
        INIT_WORK(&sc->hw_reset_work, ath_reset_work);
        INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
        INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
+       INIT_DELAYED_WORK(&sc->hw_check_work, ath_hw_check_work);
 
        ath9k_init_channel_context(sc);
 
index 5ad0feeebc8669c530e6ae4aeba536f4375840e7..27c50562dc47017f5f07f7dd9249f8b2f86c59bf 100644 (file)
  * TX polling - checks if the TX engine is stuck somewhere
  * and issues a chip reset if so.
  */
-void ath_tx_complete_poll_work(struct work_struct *work)
+static bool ath_tx_complete_check(struct ath_softc *sc)
 {
-       struct ath_softc *sc = container_of(work, struct ath_softc,
-                                           tx_complete_work.work);
        struct ath_txq *txq;
        int i;
-       bool needreset = false;
-
 
-       if (sc->tx99_state) {
-               ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
-                       "skip tx hung detection on tx99\n");
-               return;
-       }
+       if (sc->tx99_state)
+               return true;
 
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
                txq = sc->tx.txq_map[i];
@@ -41,25 +34,36 @@ void ath_tx_complete_poll_work(struct work_struct *work)
                ath_txq_lock(sc, txq);
                if (txq->axq_depth) {
                        if (txq->axq_tx_inprogress) {
-                               needreset = true;
                                ath_txq_unlock(sc, txq);
-                               break;
-                       } else {
-                               txq->axq_tx_inprogress = true;
+                               goto reset;
                        }
+
+                       txq->axq_tx_inprogress = true;
                }
                ath_txq_unlock(sc, txq);
        }
 
-       if (needreset) {
-               ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
-                       "tx hung, resetting the chip\n");
-               ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
+       return true;
+
+reset:
+       ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+               "tx hung, resetting the chip\n");
+       ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
+       return false;
+
+}
+
+void ath_hw_check_work(struct work_struct *work)
+{
+       struct ath_softc *sc = container_of(work, struct ath_softc,
+                                           hw_check_work.work);
+
+       if (!ath_hw_check(sc) ||
+           !ath_tx_complete_check(sc))
                return;
-       }
 
-       ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
-                                    msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
+       ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
+                                    msecs_to_jiffies(ATH_HW_CHECK_POLL_INT));
 }
 
 /*
index bba85d1a6cd1bc44852d1845bede4918bc2b5ec7..d937c39b3a0b3569a7e4a67ec24973e17c9e4211 100644 (file)
@@ -805,21 +805,12 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah)
 }
 EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
 
-void ath9k_hw_enable_interrupts(struct ath_hw *ah)
+static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
 {
        struct ath_common *common = ath9k_hw_common(ah);
        u32 sync_default = AR_INTR_SYNC_DEFAULT;
        u32 async_mask;
 
-       if (!(ah->imask & ATH9K_INT_GLOBAL))
-               return;
-
-       if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
-               ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
-                       atomic_read(&ah->intr_ref_cnt));
-               return;
-       }
-
        if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
            AR_SREV_9561(ah))
                sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
@@ -841,6 +832,39 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
        ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
                REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
 }
+
+void ath9k_hw_resume_interrupts(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!(ah->imask & ATH9K_INT_GLOBAL))
+               return;
+
+       if (atomic_read(&ah->intr_ref_cnt) != 0) {
+               ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
+                       atomic_read(&ah->intr_ref_cnt));
+               return;
+       }
+
+       __ath9k_hw_enable_interrupts(ah);
+}
+EXPORT_SYMBOL(ath9k_hw_resume_interrupts);
+
+void ath9k_hw_enable_interrupts(struct ath_hw *ah)
+{
+       struct ath_common *common = ath9k_hw_common(ah);
+
+       if (!(ah->imask & ATH9K_INT_GLOBAL))
+               return;
+
+       if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
+               ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
+                       atomic_read(&ah->intr_ref_cnt));
+               return;
+       }
+
+       __ath9k_hw_enable_interrupts(ah);
+}
 EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
 
 void ath9k_hw_set_interrupts(struct ath_hw *ah)
index 3bab01435a8613263a0e6bd2365f9e0b37f44660..770fc11b41d183eae8f6b6a4599347732af88f1d 100644 (file)
@@ -744,6 +744,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah);
 void ath9k_hw_enable_interrupts(struct ath_hw *ah);
 void ath9k_hw_disable_interrupts(struct ath_hw *ah);
 void ath9k_hw_kill_interrupts(struct ath_hw *ah);
+void ath9k_hw_resume_interrupts(struct ath_hw *ah);
 
 void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
 
index 59e3bd0f4c2062b599a6a5527af27bbf63b87d58..9e65d14e7b1e4984e566ef2ed29b0a2fd7bbf56f 100644 (file)
@@ -70,10 +70,10 @@ static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq,
                goto out;
 
        if (txq->mac80211_qnum >= 0) {
-               struct list_head *list;
+               struct ath_acq *acq;
 
-               list = &sc->cur_chan->acq[txq->mac80211_qnum];
-               if (!list_empty(list))
+               acq = &sc->cur_chan->acq[txq->mac80211_qnum];
+               if (!list_empty(&acq->acq_new) || !list_empty(&acq->acq_old))
                        pending = true;
        }
 out:
@@ -181,7 +181,7 @@ void ath9k_ps_restore(struct ath_softc *sc)
 static void __ath_cancel_work(struct ath_softc *sc)
 {
        cancel_work_sync(&sc->paprd_work);
-       cancel_delayed_work_sync(&sc->tx_complete_work);
+       cancel_delayed_work_sync(&sc->hw_check_work);
        cancel_delayed_work_sync(&sc->hw_pll_work);
 
 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
@@ -198,7 +198,8 @@ void ath_cancel_work(struct ath_softc *sc)
 
 void ath_restart_work(struct ath_softc *sc)
 {
-       ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
+       ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
+                                    ATH_HW_CHECK_POLL_INT);
 
        if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
                ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
@@ -373,21 +374,20 @@ void ath9k_tasklet(unsigned long data)
        struct ath_common *common = ath9k_hw_common(ah);
        enum ath_reset_type type;
        unsigned long flags;
-       u32 status = sc->intrstatus;
+       u32 status;
        u32 rxmask;
 
+       spin_lock_irqsave(&sc->intr_lock, flags);
+       status = sc->intrstatus;
+       sc->intrstatus = 0;
+       spin_unlock_irqrestore(&sc->intr_lock, flags);
+
        ath9k_ps_wakeup(sc);
        spin_lock(&sc->sc_pcu_lock);
 
        if (status & ATH9K_INT_FATAL) {
                type = RESET_TYPE_FATAL_INT;
                ath9k_queue_reset(sc, type);
-
-               /*
-                * Increment the ref. counter here so that
-                * interrupts are enabled in the reset routine.
-                */
-               atomic_inc(&ah->intr_ref_cnt);
                ath_dbg(common, RESET, "FATAL: Skipping interrupts\n");
                goto out;
        }
@@ -403,11 +403,6 @@ void ath9k_tasklet(unsigned long data)
                        type = RESET_TYPE_BB_WATCHDOG;
                        ath9k_queue_reset(sc, type);
 
-                       /*
-                        * Increment the ref. counter here so that
-                        * interrupts are enabled in the reset routine.
-                        */
-                       atomic_inc(&ah->intr_ref_cnt);
                        ath_dbg(common, RESET,
                                "BB_WATCHDOG: Skipping interrupts\n");
                        goto out;
@@ -420,7 +415,6 @@ void ath9k_tasklet(unsigned long data)
                if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) {
                        type = RESET_TYPE_TX_GTT;
                        ath9k_queue_reset(sc, type);
-                       atomic_inc(&ah->intr_ref_cnt);
                        ath_dbg(common, RESET,
                                "GTT: Skipping interrupts\n");
                        goto out;
@@ -477,7 +471,7 @@ void ath9k_tasklet(unsigned long data)
        ath9k_btcoex_handle_interrupt(sc, status);
 
        /* re-enable hardware interrupt */
-       ath9k_hw_enable_interrupts(ah);
+       ath9k_hw_resume_interrupts(ah);
 out:
        spin_unlock(&sc->sc_pcu_lock);
        ath9k_ps_restore(sc);
@@ -541,7 +535,9 @@ irqreturn_t ath_isr(int irq, void *dev)
                return IRQ_NONE;
 
        /* Cache the status */
-       sc->intrstatus = status;
+       spin_lock(&sc->intr_lock);
+       sc->intrstatus |= status;
+       spin_unlock(&sc->intr_lock);
 
        if (status & SCHED_INTR)
                sched = true;
@@ -587,7 +583,7 @@ chip_reset:
 
        if (sched) {
                /* turn off every interrupt */
-               ath9k_hw_disable_interrupts(ah);
+               ath9k_hw_kill_interrupts(ah);
                tasklet_schedule(&sc->intr_tq);
        }
 
@@ -2091,7 +2087,7 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
        int timeout;
        bool drain_txq;
 
-       cancel_delayed_work_sync(&sc->tx_complete_work);
+       cancel_delayed_work_sync(&sc->hw_check_work);
 
        if (ah->ah_flags & AH_UNPLUGGED) {
                ath_dbg(common, ANY, "Device has been unplugged!\n");
@@ -2129,7 +2125,8 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
                ath9k_ps_restore(sc);
        }
 
-       ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
+       ieee80211_queue_delayed_work(hw, &sc->hw_check_work,
+                                    ATH_HW_CHECK_POLL_INT);
 }
 
 static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
index fb4ba27d92b7b8010d1d6ce8c5b155635c088fbf..d79837fe333f45ebd3203649d039e891aa0d0125 100644 (file)
@@ -1002,6 +1002,70 @@ static void ath9k_apply_ampdu_details(struct ath_softc *sc,
        }
 }
 
+static void ath_rx_count_airtime(struct ath_softc *sc,
+                                struct ath_rx_status *rs,
+                                struct sk_buff *skb)
+{
+       struct ath_node *an;
+       struct ath_acq *acq;
+       struct ath_vif *avp;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
+       struct ieee80211_sta *sta;
+       struct ieee80211_rx_status *rxs;
+       const struct ieee80211_rate *rate;
+       bool is_sgi, is_40, is_sp;
+       int phy;
+       u16 len = rs->rs_datalen;
+       u32 airtime = 0;
+       u8 tidno, acno;
+
+       if (!ieee80211_is_data(hdr->frame_control))
+               return;
+
+       rcu_read_lock();
+
+       sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL);
+       if (!sta)
+               goto exit;
+       an = (struct ath_node *) sta->drv_priv;
+       avp = (struct ath_vif *) an->vif->drv_priv;
+       tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+       acno = TID_TO_WME_AC(tidno);
+       acq = &avp->chanctx->acq[acno];
+
+       rxs = IEEE80211_SKB_RXCB(skb);
+
+       is_sgi = !!(rxs->flag & RX_FLAG_SHORT_GI);
+       is_40 = !!(rxs->flag & RX_FLAG_40MHZ);
+       is_sp = !!(rxs->flag & RX_FLAG_SHORTPRE);
+
+       if (!!(rxs->flag & RX_FLAG_HT)) {
+               /* MCS rates */
+
+               airtime += ath_pkt_duration(sc, rxs->rate_idx, len,
+                                       is_40, is_sgi, is_sp);
+       } else {
+
+               phy = IS_CCK_RATE(rs->rs_rate) ? WLAN_RC_PHY_CCK : WLAN_RC_PHY_OFDM;
+               rate = &common->sbands[rxs->band].bitrates[rxs->rate_idx];
+               airtime += ath9k_hw_computetxtime(ah, phy, rate->bitrate * 100,
+                                               len, rxs->rate_idx, is_sp);
+       }
+
+       if (!!(sc->airtime_flags & AIRTIME_USE_RX)) {
+               spin_lock_bh(&acq->lock);
+               an->airtime_deficit[acno] -= airtime;
+               if (an->airtime_deficit[acno] <= 0)
+                       __ath_tx_queue_tid(sc, ATH_AN_2_TID(an, tidno));
+               spin_unlock_bh(&acq->lock);
+       }
+       ath_debug_airtime(sc, an, airtime, 0);
+exit:
+       rcu_read_unlock();
+}
+
 int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
 {
        struct ath_rxbuf *bf;
@@ -1148,6 +1212,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
                ath9k_antenna_check(sc, &rs);
                ath9k_apply_ampdu_details(sc, &rs, rxs);
                ath_debug_rate_stats(sc, &rs, skb);
+               ath_rx_count_airtime(sc, &rs, skb);
 
                hdr = (struct ieee80211_hdr *)skb->data;
                if (ieee80211_is_ack(hdr->frame_control))
index 4e2f3ac266c3750d069a2176b97dd1726b310f57..396bf05c6bf69da7c1f6b43ce20848d1dbdde230 100644 (file)
@@ -97,18 +97,6 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
        dev_kfree_skb(skb);
 }
 
-void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
-       __acquires(&txq->axq_lock)
-{
-       spin_lock_bh(&txq->axq_lock);
-}
-
-void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
-       __releases(&txq->axq_lock)
-{
-       spin_unlock_bh(&txq->axq_lock);
-}
-
 void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
        __releases(&txq->axq_lock)
 {
@@ -124,21 +112,44 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
                ath_tx_status(hw, skb);
 }
 
-static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq,
-                            struct ath_atx_tid *tid)
+void __ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
 {
-       struct list_head *list;
        struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
        struct ath_chanctx *ctx = avp->chanctx;
+       struct ath_acq *acq;
+       struct list_head *tid_list;
+       u8 acno = TID_TO_WME_AC(tid->tidno);
 
-       if (!ctx)
+       if (!ctx || !list_empty(&tid->list))
                return;
 
-       list = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
-       if (list_empty(&tid->list))
-               list_add_tail(&tid->list, list);
+
+       acq = &ctx->acq[acno];
+       if ((sc->airtime_flags & AIRTIME_USE_NEW_QUEUES) &&
+           tid->an->airtime_deficit[acno] > 0)
+               tid_list = &acq->acq_new;
+       else
+               tid_list = &acq->acq_old;
+
+       list_add_tail(&tid->list, tid_list);
 }
 
+void ath_tx_queue_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
+{
+       struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv;
+       struct ath_chanctx *ctx = avp->chanctx;
+       struct ath_acq *acq;
+
+       if (!ctx || !list_empty(&tid->list))
+               return;
+
+       acq = &ctx->acq[TID_TO_WME_AC(tid->tidno)];
+       spin_lock_bh(&acq->lock);
+       __ath_tx_queue_tid(sc, tid);
+       spin_unlock_bh(&acq->lock);
+}
+
+
 void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue)
 {
        struct ath_softc *sc = hw->priv;
@@ -153,7 +164,7 @@ void ath9k_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *queue)
        ath_txq_lock(sc, txq);
 
        tid->has_queued = true;
-       ath_tx_queue_tid(sc, txq, tid);
+       ath_tx_queue_tid(sc, tid);
        ath_txq_schedule(sc, txq);
 
        ath_txq_unlock(sc, txq);
@@ -660,7 +671,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
 
                skb_queue_splice_tail(&bf_pending, &tid->retry_q);
                if (!an->sleeping) {
-                       ath_tx_queue_tid(sc, txq, tid);
+                       ath_tx_queue_tid(sc, tid);
 
                        if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
                                tid->clear_ps_filter = true;
@@ -688,6 +699,33 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
     return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
 }
 
+static void ath_tx_count_airtime(struct ath_softc *sc, struct ath_node *an,
+                                struct ath_atx_tid *tid, struct ath_buf *bf,
+                                struct ath_tx_status *ts)
+{
+       struct ath_txq *txq = tid->txq;
+       u32 airtime = 0;
+       int i;
+
+       airtime += ts->duration * (ts->ts_longretry + 1);
+       for(i = 0; i < ts->ts_rateindex; i++) {
+               int rate_dur = ath9k_hw_get_duration(sc->sc_ah, bf->bf_desc, i);
+               airtime += rate_dur * bf->rates[i].count;
+       }
+
+       if (sc->airtime_flags & AIRTIME_USE_TX) {
+               int q = txq->mac80211_qnum;
+               struct ath_acq *acq = &sc->cur_chan->acq[q];
+
+               spin_lock_bh(&acq->lock);
+               an->airtime_deficit[q] -= airtime;
+               if (an->airtime_deficit[q] <= 0)
+                       __ath_tx_queue_tid(sc, tid);
+               spin_unlock_bh(&acq->lock);
+       }
+       ath_debug_airtime(sc, an, 0, airtime);
+}
+
 static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
                                  struct ath_tx_status *ts, struct ath_buf *bf,
                                  struct list_head *bf_head)
@@ -715,6 +753,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
        if (sta) {
                struct ath_node *an = (struct ath_node *)sta->drv_priv;
                tid = ath_get_skb_tid(sc, an, bf->bf_mpdu);
+               ath_tx_count_airtime(sc, an, tid, bf, ts);
                if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
                        tid->clear_ps_filter = true;
        }
@@ -1068,8 +1107,8 @@ finish:
  * width  - 0 for 20 MHz, 1 for 40 MHz
  * half_gi - to use 4us v/s 3.6 us for symbol time
  */
-static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
-                           int width, int half_gi, bool shortPreamble)
+u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
+                    int width, int half_gi, bool shortPreamble)
 {
        u32 nbits, nsymbits, duration, nsymbols;
        int streams;
@@ -1151,8 +1190,9 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
                if (is_40) {
                        u8 power_ht40delta;
                        struct ar5416_eeprom_def *eep = &ah->eeprom.def;
+                       u16 eeprom_rev = ah->eep_ops->get_eeprom_rev(ah);
 
-                       if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_2) {
+                       if (eeprom_rev >= AR5416_EEP_MINOR_VER_2) {
                                bool is_2ghz;
                                struct modal_eep_header *pmodal;
 
@@ -1467,7 +1507,7 @@ ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
 }
 
 static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
-                             struct ath_atx_tid *tid, bool *stop)
+                             struct ath_atx_tid *tid)
 {
        struct ath_buf *bf;
        struct ieee80211_tx_info *tx_info;
@@ -1489,7 +1529,6 @@ static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
        if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
            (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
                __skb_queue_tail(&tid->retry_q, bf->bf_mpdu);
-               *stop = true;
                return false;
        }
 
@@ -1613,7 +1652,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
                ath_txq_lock(sc, txq);
                tid->clear_ps_filter = true;
                if (ath_tid_has_buffered(tid)) {
-                       ath_tx_queue_tid(sc, txq, tid);
+                       ath_tx_queue_tid(sc, tid);
                        ath_txq_schedule(sc, txq);
                }
                ath_txq_unlock_complete(sc, txq);
@@ -1912,9 +1951,10 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
 void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
 {
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       struct ath_atx_tid *tid, *last_tid;
+       struct ath_atx_tid *tid;
        struct list_head *tid_list;
-       bool sent = false;
+       struct ath_acq *acq;
+       bool active = AIRTIME_ACTIVE(sc->airtime_flags);
 
        if (txq->mac80211_qnum < 0)
                return;
@@ -1923,48 +1963,55 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
                return;
 
        spin_lock_bh(&sc->chan_lock);
-       tid_list = &sc->cur_chan->acq[txq->mac80211_qnum];
-
-       if (list_empty(tid_list)) {
-               spin_unlock_bh(&sc->chan_lock);
-               return;
-       }
-
        rcu_read_lock();
+       acq = &sc->cur_chan->acq[txq->mac80211_qnum];
 
-       last_tid = list_entry(tid_list->prev, struct ath_atx_tid, list);
-       while (!list_empty(tid_list)) {
-               bool stop = false;
-
-               if (sc->cur_chan->stopped)
-                       break;
-
-               tid = list_first_entry(tid_list, struct ath_atx_tid, list);
-               list_del_init(&tid->list);
+       if (sc->cur_chan->stopped)
+               goto out;
 
-               if (ath_tx_sched_aggr(sc, txq, tid, &stop))
-                       sent = true;
+begin:
+       tid_list = &acq->acq_new;
+       if (list_empty(tid_list)) {
+               tid_list = &acq->acq_old;
+               if (list_empty(tid_list))
+                       goto out;
+       }
+       tid = list_first_entry(tid_list, struct ath_atx_tid, list);
 
-               /*
-                * add tid to round-robin queue if more frames
-                * are pending for the tid
-                */
-               if (ath_tid_has_buffered(tid))
-                       ath_tx_queue_tid(sc, txq, tid);
+       if (active && tid->an->airtime_deficit[txq->mac80211_qnum] <= 0) {
+               spin_lock_bh(&acq->lock);
+               tid->an->airtime_deficit[txq->mac80211_qnum] += ATH_AIRTIME_QUANTUM;
+               list_move_tail(&tid->list, &acq->acq_old);
+               spin_unlock_bh(&acq->lock);
+               goto begin;
+       }
 
-               if (stop)
-                       break;
+       if (!ath_tid_has_buffered(tid)) {
+               spin_lock_bh(&acq->lock);
+               if ((tid_list == &acq->acq_new) && !list_empty(&acq->acq_old))
+                       list_move_tail(&tid->list, &acq->acq_old);
+               else {
+                       list_del_init(&tid->list);
+               }
+               spin_unlock_bh(&acq->lock);
+               goto begin;
+       }
 
-               if (tid == last_tid) {
-                       if (!sent)
-                               break;
 
-                       sent = false;
-                       last_tid = list_entry(tid_list->prev,
-                                             struct ath_atx_tid, list);
+       /*
+        * If we succeed in scheduling something, immediately restart to make
+        * sure we keep the HW busy.
+        */
+       if(ath_tx_sched_aggr(sc, txq, tid)) {
+               if (!active) {
+                       spin_lock_bh(&acq->lock);
+                       list_move_tail(&tid->list, &acq->acq_old);
+                       spin_unlock_bh(&acq->lock);
                }
+               goto begin;
        }
 
+out:
        rcu_read_unlock();
        spin_unlock_bh(&sc->chan_lock);
 }
@@ -2805,8 +2852,6 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
                return error;
        }
 
-       INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
-
        if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
                error = ath_tx_edma_init(sc);
 
@@ -2818,6 +2863,9 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
        struct ath_atx_tid *tid;
        int tidno, acno;
 
+       for (acno = 0; acno < IEEE80211_NUM_ACS; acno++)
+               an->airtime_deficit[acno] = ATH_AIRTIME_QUANTUM;
+
        for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
                tid = ath_node_to_tid(an, tidno);
                tid->an        = an;
index 591ebaea826564bf339f82b8152394dca022b5e8..4b83e87f0b9450bf431b48a168901371f1cab33c 100644 (file)
@@ -1,6 +1,8 @@
 config WCN36XX
        tristate "Qualcomm Atheros WCN3660/3680 support"
        depends on MAC80211 && HAS_DMA
+       depends on QCOM_WCNSS_CTRL || QCOM_WCNSS_CTRL=n
+       depends on QCOM_SMD || QCOM_SMD=n
        ---help---
          This module adds support for wireless adapters based on
          Qualcomm Atheros WCN3660 and WCN3680 mobile chipsets.
index 231fd022f0f54e96b58c522061d6441cf650c404..87dfdaf9044cdf8b41aba6b8345e9d2f9dd325a6 100644 (file)
@@ -23,6 +23,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/interrupt.h>
+#include <linux/soc/qcom/smem_state.h>
 #include "wcn36xx.h"
 #include "txrx.h"
 
@@ -151,9 +152,12 @@ int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
                goto out_err;
 
        /* Initialize SMSM state  Clear TX Enable RING EMPTY STATE */
-       ret = wcn->ctrl_ops->smsm_change_state(
-               WCN36XX_SMSM_WLAN_TX_ENABLE,
-               WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
+       ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
+                                         WCN36XX_SMSM_WLAN_TX_ENABLE |
+                                         WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
+                                         WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
+       if (ret)
+               goto out_err;
 
        return 0;
 
@@ -678,9 +682,9 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
         * notify chip about new frame through SMSM bus.
         */
        if (is_low &&  vif_priv->pw_state == WCN36XX_BMPS) {
-               wcn->ctrl_ops->smsm_change_state(
-                                 0,
-                                 WCN36XX_SMSM_WLAN_TX_ENABLE);
+               qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
+                                           WCN36XX_SMSM_WLAN_TX_ENABLE,
+                                           WCN36XX_SMSM_WLAN_TX_ENABLE);
        } else {
                /* indicate End Of Packet and generate interrupt on descriptor
                 * done.
index 4f87ef1e1eb866808ecc0b071b28d7bd4bbaeff1..b765c647319dd6e3a4f20ed25554272f0f240eab 100644 (file)
@@ -350,6 +350,8 @@ enum wcn36xx_hal_host_msg_type {
 
        WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233,
 
+       WCN36XX_HAL_PRINT_REG_INFO_IND = 259,
+
        WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE
 };
 
@@ -4703,4 +4705,18 @@ struct stats_class_b_ind {
        u32 rx_time_total;
 };
 
+/* WCN36XX_HAL_PRINT_REG_INFO_IND */
+struct wcn36xx_hal_print_reg_info_ind {
+       struct wcn36xx_hal_msg_header header;
+
+       u32 count;
+       u32 scenario;
+       u32 reason;
+
+       struct {
+               u32 addr;
+               u32 value;
+       } regs[];
+} __packed;
+
 #endif /* _HAL_H_ */
index e1d59da2ad20fca15deea2fb8c01e6830509d7da..7a0c2e7da7f643333506e595d39fd988b535825e 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/soc/qcom/smd.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/soc/qcom/wcnss_ctrl.h>
 #include "wcn36xx.h"
 
 unsigned int wcn36xx_dbg_mask;
@@ -564,23 +568,81 @@ out:
        return ret;
 }
 
-static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw,
-                                 struct ieee80211_vif *vif,
-                                 const u8 *mac_addr)
+static void wcn36xx_hw_scan_worker(struct work_struct *work)
 {
-       struct wcn36xx *wcn = hw->priv;
+       struct wcn36xx *wcn = container_of(work, struct wcn36xx, scan_work);
+       struct cfg80211_scan_request *req = wcn->scan_req;
+       u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX];
+       struct cfg80211_scan_info scan_info = {};
+       bool aborted = false;
+       int i;
+
+       wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 scan %d channels worker\n", req->n_channels);
+
+       for (i = 0; i < req->n_channels; i++)
+               channels[i] = req->channels[i]->hw_value;
+
+       wcn36xx_smd_update_scan_params(wcn, channels, req->n_channels);
 
        wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN);
-       wcn36xx_smd_start_scan(wcn);
+       for (i = 0; i < req->n_channels; i++) {
+               mutex_lock(&wcn->scan_lock);
+               aborted = wcn->scan_aborted;
+               mutex_unlock(&wcn->scan_lock);
+
+               if (aborted)
+                       break;
+
+               wcn->scan_freq = req->channels[i]->center_freq;
+               wcn->scan_band = req->channels[i]->band;
+
+               wcn36xx_smd_start_scan(wcn, req->channels[i]->hw_value);
+               msleep(30);
+               wcn36xx_smd_end_scan(wcn, req->channels[i]->hw_value);
+
+               wcn->scan_freq = 0;
+       }
+       wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
+
+       scan_info.aborted = aborted;
+       ieee80211_scan_completed(wcn->hw, &scan_info);
+
+       mutex_lock(&wcn->scan_lock);
+       wcn->scan_req = NULL;
+       mutex_unlock(&wcn->scan_lock);
 }
 
-static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw,
-                                    struct ieee80211_vif *vif)
+static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
+                          struct ieee80211_vif *vif,
+                          struct ieee80211_scan_request *hw_req)
 {
        struct wcn36xx *wcn = hw->priv;
 
-       wcn36xx_smd_end_scan(wcn);
-       wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
+       mutex_lock(&wcn->scan_lock);
+       if (wcn->scan_req) {
+               mutex_unlock(&wcn->scan_lock);
+               return -EBUSY;
+       }
+
+       wcn->scan_aborted = false;
+       wcn->scan_req = &hw_req->req;
+       mutex_unlock(&wcn->scan_lock);
+
+       schedule_work(&wcn->scan_work);
+
+       return 0;
+}
+
+static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
+                                  struct ieee80211_vif *vif)
+{
+       struct wcn36xx *wcn = hw->priv;
+
+       mutex_lock(&wcn->scan_lock);
+       wcn->scan_aborted = true;
+       mutex_unlock(&wcn->scan_lock);
+
+       cancel_work_sync(&wcn->scan_work);
 }
 
 static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
@@ -993,8 +1055,8 @@ static const struct ieee80211_ops wcn36xx_ops = {
        .configure_filter       = wcn36xx_configure_filter,
        .tx                     = wcn36xx_tx,
        .set_key                = wcn36xx_set_key,
-       .sw_scan_start          = wcn36xx_sw_scan_start,
-       .sw_scan_complete       = wcn36xx_sw_scan_complete,
+       .hw_scan                = wcn36xx_hw_scan,
+       .cancel_hw_scan         = wcn36xx_cancel_hw_scan,
        .bss_info_changed       = wcn36xx_bss_info_changed,
        .set_rts_threshold      = wcn36xx_set_rts_threshold,
        .sta_add                = wcn36xx_sta_add,
@@ -1019,6 +1081,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
        ieee80211_hw_set(wcn->hw, SUPPORTS_PS);
        ieee80211_hw_set(wcn->hw, SIGNAL_DBM);
        ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
+       ieee80211_hw_set(wcn->hw, SINGLE_SCAN_ON_ALL_BANDS);
 
        wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
                BIT(NL80211_IFTYPE_AP) |
@@ -1028,6 +1091,9 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
        wcn->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wcn_band_2ghz;
        wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz;
 
+       wcn->hw->wiphy->max_scan_ssids = WCN36XX_MAX_SCAN_SSIDS;
+       wcn->hw->wiphy->max_scan_ie_len = WCN36XX_MAX_SCAN_IE_LEN;
+
        wcn->hw->wiphy->cipher_suites = cipher_suites;
        wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
 
@@ -1058,8 +1124,7 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
        int ret;
 
        /* Set TX IRQ */
-       res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
-                                          "wcnss_wlantx_irq");
+       res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "tx");
        if (!res) {
                wcn36xx_err("failed to get tx_irq\n");
                return -ENOENT;
@@ -1067,14 +1132,29 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
        wcn->tx_irq = res->start;
 
        /* Set RX IRQ */
-       res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
-                                          "wcnss_wlanrx_irq");
+       res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "rx");
        if (!res) {
                wcn36xx_err("failed to get rx_irq\n");
                return -ENOENT;
        }
        wcn->rx_irq = res->start;
 
+       /* Acquire SMSM tx enable handle */
+       wcn->tx_enable_state = qcom_smem_state_get(&pdev->dev,
+                       "tx-enable", &wcn->tx_enable_state_bit);
+       if (IS_ERR(wcn->tx_enable_state)) {
+               wcn36xx_err("failed to get tx-enable state\n");
+               return PTR_ERR(wcn->tx_enable_state);
+       }
+
+       /* Acquire SMSM tx rings empty handle */
+       wcn->tx_rings_empty_state = qcom_smem_state_get(&pdev->dev,
+                       "tx-rings-empty", &wcn->tx_rings_empty_state_bit);
+       if (IS_ERR(wcn->tx_rings_empty_state)) {
+               wcn36xx_err("failed to get tx-rings-empty state\n");
+               return PTR_ERR(wcn->tx_rings_empty_state);
+       }
+
        mmio_node = of_parse_phandle(pdev->dev.parent->of_node, "qcom,mmio", 0);
        if (!mmio_node) {
                wcn36xx_err("failed to acquire qcom,mmio reference\n");
@@ -1115,11 +1195,14 @@ static int wcn36xx_probe(struct platform_device *pdev)
 {
        struct ieee80211_hw *hw;
        struct wcn36xx *wcn;
+       void *wcnss;
        int ret;
-       u8 addr[ETH_ALEN];
+       const u8 *addr;
 
        wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n");
 
+       wcnss = dev_get_drvdata(pdev->dev.parent);
+
        hw = ieee80211_alloc_hw(sizeof(struct wcn36xx), &wcn36xx_ops);
        if (!hw) {
                wcn36xx_err("failed to alloc hw\n");
@@ -1130,11 +1213,26 @@ static int wcn36xx_probe(struct platform_device *pdev)
        wcn = hw->priv;
        wcn->hw = hw;
        wcn->dev = &pdev->dev;
-       wcn->ctrl_ops = pdev->dev.platform_data;
-
        mutex_init(&wcn->hal_mutex);
+       mutex_init(&wcn->scan_lock);
 
-       if (!wcn->ctrl_ops->get_hw_mac(addr)) {
+       INIT_WORK(&wcn->scan_work, wcn36xx_hw_scan_worker);
+
+       wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process);
+       if (IS_ERR(wcn->smd_channel)) {
+               wcn36xx_err("failed to open WLAN_CTRL channel\n");
+               ret = PTR_ERR(wcn->smd_channel);
+               goto out_wq;
+       }
+
+       qcom_smd_set_drvdata(wcn->smd_channel, hw);
+
+       addr = of_get_property(pdev->dev.of_node, "local-mac-address", &ret);
+       if (addr && ret != ETH_ALEN) {
+               wcn36xx_err("invalid local-mac-address\n");
+               ret = -EINVAL;
+               goto out_wq;
+       } else if (addr) {
                wcn36xx_info("mac address: %pM\n", addr);
                SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
        }
@@ -1158,6 +1256,7 @@ out_wq:
 out_err:
        return ret;
 }
+
 static int wcn36xx_remove(struct platform_device *pdev)
 {
        struct ieee80211_hw *hw = platform_get_drvdata(pdev);
@@ -1165,45 +1264,37 @@ static int wcn36xx_remove(struct platform_device *pdev)
        wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
 
        release_firmware(wcn->nv);
-       mutex_destroy(&wcn->hal_mutex);
 
        ieee80211_unregister_hw(hw);
+
+       qcom_smem_state_put(wcn->tx_enable_state);
+       qcom_smem_state_put(wcn->tx_rings_empty_state);
+
        iounmap(wcn->dxe_base);
        iounmap(wcn->ccu_base);
+
+       mutex_destroy(&wcn->hal_mutex);
        ieee80211_free_hw(hw);
 
        return 0;
 }
-static const struct platform_device_id wcn36xx_platform_id_table[] = {
-       {
-               .name = "wcn36xx",
-               .driver_data = 0
-       },
+
+static const struct of_device_id wcn36xx_of_match[] = {
+       { .compatible = "qcom,wcnss-wlan" },
        {}
 };
-MODULE_DEVICE_TABLE(platform, wcn36xx_platform_id_table);
+MODULE_DEVICE_TABLE(of, wcn36xx_of_match);
 
 static struct platform_driver wcn36xx_driver = {
        .probe      = wcn36xx_probe,
        .remove     = wcn36xx_remove,
        .driver         = {
                .name   = "wcn36xx",
+               .of_match_table = wcn36xx_of_match,
        },
-       .id_table    = wcn36xx_platform_id_table,
 };
 
-static int __init wcn36xx_init(void)
-{
-       platform_driver_register(&wcn36xx_driver);
-       return 0;
-}
-module_init(wcn36xx_init);
-
-static void __exit wcn36xx_exit(void)
-{
-       platform_driver_unregister(&wcn36xx_driver);
-}
-module_exit(wcn36xx_exit);
+module_platform_driver(wcn36xx_driver);
 
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
index a443992320f2ed3aa2152b93d2d8ecb1819ec2fb..1c2966f7db7a3f7c41660027444e17f4a908c54d 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/etherdevice.h>
 #include <linux/firmware.h>
 #include <linux/bitops.h>
+#include <linux/soc/qcom/smd.h>
 #include "smd.h"
 
 struct wcn36xx_cfg_val {
@@ -253,7 +254,7 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
 
        init_completion(&wcn->hal_rsp_compl);
        start = jiffies;
-       ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
+       ret = qcom_smd_send(wcn->smd_channel, wcn->hal_buf, len);
        if (ret) {
                wcn36xx_err("HAL TX failed\n");
                goto out;
@@ -521,7 +522,7 @@ out:
        return ret;
 }
 
-int wcn36xx_smd_start_scan(struct wcn36xx *wcn)
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel)
 {
        struct wcn36xx_hal_start_scan_req_msg msg_body;
        int ret = 0;
@@ -529,7 +530,7 @@ int wcn36xx_smd_start_scan(struct wcn36xx *wcn)
        mutex_lock(&wcn->hal_mutex);
        INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
 
-       msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+       msg_body.scan_channel = scan_channel;
 
        PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
 
@@ -551,7 +552,7 @@ out:
        return ret;
 }
 
-int wcn36xx_smd_end_scan(struct wcn36xx *wcn)
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel)
 {
        struct wcn36xx_hal_end_scan_req_msg msg_body;
        int ret = 0;
@@ -559,7 +560,7 @@ int wcn36xx_smd_end_scan(struct wcn36xx *wcn)
        mutex_lock(&wcn->hal_mutex);
        INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
 
-       msg_body.scan_channel = WCN36XX_HW_CHANNEL(wcn);
+       msg_body.scan_channel = scan_channel;
 
        PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
 
@@ -2108,6 +2109,30 @@ static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
        return -ENOENT;
 }
 
+static int wcn36xx_smd_print_reg_info_ind(struct wcn36xx *wcn,
+                                         void *buf,
+                                         size_t len)
+{
+       struct wcn36xx_hal_print_reg_info_ind *rsp = buf;
+       int i;
+
+       if (len < sizeof(*rsp)) {
+               wcn36xx_warn("Corrupted print reg info indication\n");
+               return -EIO;
+       }
+
+       wcn36xx_dbg(WCN36XX_DBG_HAL,
+                   "reginfo indication, scenario: 0x%x reason: 0x%x\n",
+                   rsp->scenario, rsp->reason);
+
+       for (i = 0; i < rsp->count; i++) {
+               wcn36xx_dbg(WCN36XX_DBG_HAL, "\t0x%x: 0x%x\n",
+                           rsp->regs[i].addr, rsp->regs[i].value);
+       }
+
+       return 0;
+}
+
 int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
 {
        struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
@@ -2180,9 +2205,12 @@ out:
        return ret;
 }
 
-static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
+int wcn36xx_smd_rsp_process(struct qcom_smd_channel *channel,
+                           const void *buf, size_t len)
 {
-       struct wcn36xx_hal_msg_header *msg_header = buf;
+       const struct wcn36xx_hal_msg_header *msg_header = buf;
+       struct ieee80211_hw *hw = qcom_smd_get_drvdata(channel);
+       struct wcn36xx *wcn = hw->priv;
        struct wcn36xx_hal_ind_msg *msg_ind;
        wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len);
 
@@ -2233,15 +2261,12 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
        case WCN36XX_HAL_OTA_TX_COMPL_IND:
        case WCN36XX_HAL_MISSED_BEACON_IND:
        case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
-               msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_KERNEL);
+       case WCN36XX_HAL_PRINT_REG_INFO_IND:
+               msg_ind = kmalloc(sizeof(*msg_ind) + len, GFP_ATOMIC);
                if (!msg_ind) {
-                       /*
-                        * FIXME: Do something smarter then just
-                        * printing an error.
-                        */
                        wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
                                    msg_header->msg_type);
-                       break;
+                       return -ENOMEM;
                }
 
                msg_ind->msg_len = len;
@@ -2257,6 +2282,8 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
                wcn36xx_err("SMD_EVENT (%d) not supported\n",
                              msg_header->msg_type);
        }
+
+       return 0;
 }
 static void wcn36xx_ind_smd_work(struct work_struct *work)
 {
@@ -2294,6 +2321,11 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
                                                   hal_ind_msg->msg,
                                                   hal_ind_msg->msg_len);
                break;
+       case WCN36XX_HAL_PRINT_REG_INFO_IND:
+               wcn36xx_smd_print_reg_info_ind(wcn,
+                                              hal_ind_msg->msg,
+                                              hal_ind_msg->msg_len);
+               break;
        default:
                wcn36xx_err("SMD_EVENT (%d) not supported\n",
                              msg_header->msg_type);
@@ -2315,22 +2347,13 @@ int wcn36xx_smd_open(struct wcn36xx *wcn)
        INIT_LIST_HEAD(&wcn->hal_ind_queue);
        spin_lock_init(&wcn->hal_ind_lock);
 
-       ret = wcn->ctrl_ops->open(wcn, wcn36xx_smd_rsp_process);
-       if (ret) {
-               wcn36xx_err("failed to open control channel\n");
-               goto free_wq;
-       }
-
-       return ret;
+       return 0;
 
-free_wq:
-       destroy_workqueue(wcn->hal_ind_wq);
 out:
        return ret;
 }
 
 void wcn36xx_smd_close(struct wcn36xx *wcn)
 {
-       wcn->ctrl_ops->close();
        destroy_workqueue(wcn->hal_ind_wq);
 }
index df80cbbd9d1bed9fea3daca6f65b87b8f053d7d3..8892ccd67b144903ae25cde3287e79f951e5a8c6 100644 (file)
@@ -51,6 +51,7 @@ struct wcn36xx_hal_ind_msg {
 };
 
 struct wcn36xx;
+struct qcom_smd_channel;
 
 int wcn36xx_smd_open(struct wcn36xx *wcn);
 void wcn36xx_smd_close(struct wcn36xx *wcn);
@@ -59,8 +60,8 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn);
 int wcn36xx_smd_start(struct wcn36xx *wcn);
 int wcn36xx_smd_stop(struct wcn36xx *wcn);
 int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode);
-int wcn36xx_smd_start_scan(struct wcn36xx *wcn);
-int wcn36xx_smd_end_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel);
+int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel);
 int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
                            enum wcn36xx_hal_sys_mode mode);
 int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count);
@@ -127,6 +128,10 @@ int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
 int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
 
 int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
+
+int wcn36xx_smd_rsp_process(struct qcom_smd_channel *channel,
+                           const void *buf, size_t len);
+
 int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
                            struct ieee80211_vif *vif,
                            struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp);
index 1f34c2e912d7d9d7e7e39d4684972d4173a405b0..8c387a0a3c091c474e8f7dd948dc31b9060c0813 100644 (file)
@@ -45,9 +45,20 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
        skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
        skb_pull(skb, bd->pdu.mpdu_header_off);
 
+       hdr = (struct ieee80211_hdr *) skb->data;
+       fc = __le16_to_cpu(hdr->frame_control);
+       sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
+
+       /* When scanning associate beacons to this */
+       if (ieee80211_is_beacon(hdr->frame_control) && wcn->scan_freq) {
+               status.freq = wcn->scan_freq;
+               status.band = wcn->scan_band;
+       } else {
+               status.freq = WCN36XX_CENTER_FREQ(wcn);
+               status.band = WCN36XX_BAND(wcn);
+       }
+
        status.mactime = 10;
-       status.freq = WCN36XX_CENTER_FREQ(wcn);
-       status.band = WCN36XX_BAND(wcn);
        status.signal = -get_rssi0(bd);
        status.antenna = 1;
        status.rate_idx = 1;
@@ -61,10 +72,6 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
 
        memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
 
-       hdr = (struct ieee80211_hdr *) skb->data;
-       fc = __le16_to_cpu(hdr->frame_control);
-       sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
-
        if (ieee80211_is_beacon(hdr->frame_control)) {
                wcn36xx_dbg(WCN36XX_DBG_BEACON, "beacon skb %p len %d fc %04x sn %d\n",
                            skb, skb->len, fc, sn);
index 22242d18e1fe3b44f7e66ff5f30ab914838e35e8..7423998ddeb492cab624134fcb5d363db42093d9 100644 (file)
@@ -35,6 +35,9 @@
 /* How many frames until we start a-mpdu TX session */
 #define WCN36XX_AMPDU_START_THRESH     20
 
+#define WCN36XX_MAX_SCAN_SSIDS         9
+#define WCN36XX_MAX_SCAN_IE_LEN                500
+
 extern unsigned int wcn36xx_dbg_mask;
 
 enum wcn36xx_debug_mask {
@@ -103,19 +106,6 @@ struct nv_data {
        u8      table;
 };
 
-/* Interface for platform control path
- *
- * @open: hook must be called when wcn36xx wants to open control channel.
- * @tx: sends a buffer.
- */
-struct wcn36xx_platform_ctrl_ops {
-       int (*open)(void *drv_priv, void *rsp_cb);
-       void (*close)(void);
-       int (*tx)(char *buf, size_t len);
-       int (*get_hw_mac)(u8 *addr);
-       int (*smsm_change_state)(u32 clear_mask, u32 set_mask);
-};
-
 /**
  * struct wcn36xx_vif - holds VIF related fields
  *
@@ -205,7 +195,13 @@ struct wcn36xx {
        void __iomem            *ccu_base;
        void __iomem            *dxe_base;
 
-       struct wcn36xx_platform_ctrl_ops *ctrl_ops;
+       struct qcom_smd_channel *smd_channel;
+
+       struct qcom_smem_state  *tx_enable_state;
+       unsigned                tx_enable_state_bit;
+       struct qcom_smem_state  *tx_rings_empty_state;
+       unsigned                tx_rings_empty_state_bit;
+
        /*
         * smd_buf must be protected with smd_mutex to garantee
         * that all messages are sent one after another
@@ -219,6 +215,13 @@ struct wcn36xx {
        spinlock_t              hal_ind_lock;
        struct list_head        hal_ind_queue;
 
+       struct work_struct      scan_work;
+       struct cfg80211_scan_request *scan_req;
+       int                     scan_freq;
+       int                     scan_band;
+       struct mutex            scan_lock;
+       bool                    scan_aborted;
+
        /* DXE channels */
        struct wcn36xx_dxe_ch   dxe_tx_l_ch;    /* TX low */
        struct wcn36xx_dxe_ch   dxe_tx_h_ch;    /* TX high */
index 6aa3ff4240a9b541c209ac8f926193e7a9bc8632..83155b5ddbfb1fe8018e59760a7c2b571215b0ae 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
  */
 
 #include <linux/etherdevice.h>
+#include <linux/moduleparam.h>
 #include "wil6210.h"
 #include "wmi.h"
 
 #define WIL_MAX_ROC_DURATION_MS 5000
 
+bool disable_ap_sme;
+module_param(disable_ap_sme, bool, 0444);
+MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
+
 #define CHAN60G(_channel, _flags) {                            \
        .band                   = NL80211_BAND_60GHZ,           \
        .center_freq            = 56160 + (2160 * (_channel)),  \
@@ -62,9 +67,16 @@ wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
        },
        [NL80211_IFTYPE_AP] = {
                .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
-               BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+               BIT(IEEE80211_STYPE_PROBE_RESP >> 4) |
+               BIT(IEEE80211_STYPE_ASSOC_RESP >> 4) |
+               BIT(IEEE80211_STYPE_DISASSOC >> 4),
                .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
-               BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+               BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+               BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
+               BIT(IEEE80211_STYPE_DISASSOC >> 4) |
+               BIT(IEEE80211_STYPE_AUTH >> 4) |
+               BIT(IEEE80211_STYPE_DEAUTH >> 4) |
+               BIT(IEEE80211_STYPE_REASSOC_REQ >> 4)
        },
        [NL80211_IFTYPE_P2P_CLIENT] = {
                .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
@@ -194,7 +206,7 @@ static int wil_cfg80211_get_station(struct wiphy *wiphy,
 
        int cid = wil_find_cid(wil, mac);
 
-       wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+       wil_dbg_misc(wil, "get_station: %pM CID %d\n", mac, cid);
        if (cid < 0)
                return cid;
 
@@ -233,7 +245,7 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy,
                return -ENOENT;
 
        ether_addr_copy(mac, wil->sta[cid].addr);
-       wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+       wil_dbg_misc(wil, "dump_station: %pM CID %d\n", mac, cid);
 
        rc = wil_cid_fill_sinfo(wil, cid, sinfo);
 
@@ -250,16 +262,15 @@ wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name,
        struct net_device *ndev = wil_to_ndev(wil);
        struct wireless_dev *p2p_wdev;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "add_iface\n");
 
        if (type != NL80211_IFTYPE_P2P_DEVICE) {
-               wil_err(wil, "%s: unsupported iftype %d\n", __func__, type);
+               wil_err(wil, "unsupported iftype %d\n", type);
                return ERR_PTR(-EINVAL);
        }
 
        if (wil->p2p_wdev) {
-               wil_err(wil, "%s: P2P_DEVICE interface already created\n",
-                       __func__);
+               wil_err(wil, "P2P_DEVICE interface already created\n");
                return ERR_PTR(-EINVAL);
        }
 
@@ -282,11 +293,10 @@ static int wil_cfg80211_del_iface(struct wiphy *wiphy,
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "del_iface\n");
 
        if (wdev != wil->p2p_wdev) {
-               wil_err(wil, "%s: delete of incorrect interface 0x%p\n",
-                       __func__, wdev);
+               wil_err(wil, "delete of incorrect interface 0x%p\n", wdev);
                return -EINVAL;
        }
 
@@ -304,7 +314,7 @@ static int wil_cfg80211_change_iface(struct wiphy *wiphy,
        struct wireless_dev *wdev = wil_to_wdev(wil);
        int rc;
 
-       wil_dbg_misc(wil, "%s() type=%d\n", __func__, type);
+       wil_dbg_misc(wil, "change_iface: type=%d\n", type);
 
        if (netif_running(wil_to_ndev(wil)) && !wil_is_recovery_blocked(wil)) {
                wil_dbg_misc(wil, "interface is up. resetting...\n");
@@ -351,8 +361,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
        uint i, n;
        int rc;
 
-       wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
-                    __func__, wdev, wdev->iftype);
+       wil_dbg_misc(wil, "scan: wdev=0x%p iftype=%d\n", wdev, wdev->iftype);
 
        /* check we are client side */
        switch (wdev->iftype) {
@@ -557,7 +566,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        int rc = 0;
        enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "connect\n");
        wil_print_connect_params(wil, sme);
 
        if (test_bit(wil_status_fwconnecting, wil->status) ||
@@ -593,6 +602,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
                goto out;
        }
        wil->privacy = sme->privacy;
+       wil->pbss = sme->pbss;
 
        if (wil->privacy) {
                /* For secure assoc, remove old keys */
@@ -689,12 +699,11 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
        int rc;
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
-       wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code);
+       wil_dbg_misc(wil, "disconnect: reason=%d\n", reason_code);
 
        if (!(test_bit(wil_status_fwconnecting, wil->status) ||
              test_bit(wil_status_fwconnected, wil->status))) {
-               wil_err(wil, "%s: Disconnect was called while disconnected\n",
-                       __func__);
+               wil_err(wil, "Disconnect was called while disconnected\n");
                return 0;
        }
 
@@ -702,7 +711,7 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
                      WMI_DISCONNECT_EVENTID, NULL, 0,
                      WIL6210_DISCONNECT_TO_MS);
        if (rc)
-               wil_err(wil, "%s: disconnect error %d\n", __func__, rc);
+               wil_err(wil, "disconnect error %d\n", rc);
 
        return rc;
 }
@@ -750,7 +759,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
         * different from currently "listened" channel and fail if it is.
         */
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "mgmt_tx\n");
        print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len);
 
        cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
@@ -811,7 +820,7 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
                        break;
                }
        }
-       wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]);
+       wil_dbg_misc(wil, "detect_key_usage: -> %s\n", key_usage_str[rc]);
 
        return rc;
 }
@@ -916,13 +925,13 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
                return -EINVAL;
        }
 
-       wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
+       wil_dbg_misc(wil, "add_key: %pM %s[%d] PN %*phN\n",
                     mac_addr, key_usage_str[key_usage], key_index,
                     params->seq_len, params->seq);
 
        if (IS_ERR(cs)) {
-               wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
-                       __func__, mac_addr, key_usage_str[key_usage], key_index,
+               wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n",
+                       mac_addr, key_usage_str[key_usage], key_index,
                        params->seq_len, params->seq);
                return -EINVAL;
        }
@@ -931,8 +940,8 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
 
        if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
                wil_err(wil,
-                       "Wrong PN len %d, %s(%pM %s[%d] PN %*phN)\n",
-                       params->seq_len, __func__, mac_addr,
+                       "Wrong PN len %d, %pM %s[%d] PN %*phN\n",
+                       params->seq_len, mac_addr,
                        key_usage_str[key_usage], key_index,
                        params->seq_len, params->seq);
                return -EINVAL;
@@ -956,11 +965,11 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
        struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
                                                            mac_addr);
 
-       wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
+       wil_dbg_misc(wil, "del_key: %pM %s[%d]\n", mac_addr,
                     key_usage_str[key_usage], key_index);
 
        if (IS_ERR(cs))
-               wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
+               wil_info(wil, "Not connected, %pM %s[%d]\n",
                         mac_addr, key_usage_str[key_usage], key_index);
 
        if (!IS_ERR_OR_NULL(cs))
@@ -977,7 +986,7 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
-       wil_dbg_misc(wil, "%s: entered\n", __func__);
+       wil_dbg_misc(wil, "set_default_key: entered\n");
        return 0;
 }
 
@@ -990,8 +999,9 @@ static int wil_remain_on_channel(struct wiphy *wiphy,
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
        int rc;
 
-       wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d iftype=%d\n",
-                    __func__, chan->center_freq, duration, wdev->iftype);
+       wil_dbg_misc(wil,
+                    "remain_on_channel: center_freq=%d, duration=%d iftype=%d\n",
+                    chan->center_freq, duration, wdev->iftype);
 
        rc = wil_p2p_listen(wil, wdev, duration, chan, cookie);
        return rc;
@@ -1003,7 +1013,7 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "cancel_remain_on_channel\n");
 
        return wil_p2p_cancel_listen(wil, cookie);
 }
@@ -1159,9 +1169,9 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
        if (pbss)
                wmi_nettype = WMI_NETTYPE_P2P;
 
-       wil_dbg_misc(wil, "%s: is_go=%d\n", __func__, is_go);
+       wil_dbg_misc(wil, "start_ap: is_go=%d\n", is_go);
        if (is_go && !pbss) {
-               wil_err(wil, "%s: P2P GO must be in PBSS\n", __func__);
+               wil_err(wil, "P2P GO must be in PBSS\n");
                return -ENOTSUPP;
        }
 
@@ -1216,7 +1226,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
        int rc;
        u32 privacy = 0;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "change_beacon\n");
        wil_print_bcon_data(bcon);
 
        if (bcon->tail &&
@@ -1255,7 +1265,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
        struct cfg80211_crypto_settings *crypto = &info->crypto;
        u8 hidden_ssid;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "start_ap\n");
 
        if (!channel) {
                wil_err(wil, "AP: No channel???\n");
@@ -1306,7 +1316,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "stop_ap\n");
 
        netif_carrier_off(ndev);
        wil_set_recovery_state(wil, fw_recovery_idle);
@@ -1322,13 +1332,35 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
        return 0;
 }
 
+static int wil_cfg80211_add_station(struct wiphy *wiphy,
+                                   struct net_device *dev,
+                                   const u8 *mac,
+                                   struct station_parameters *params)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+       wil_dbg_misc(wil, "add station %pM aid %d\n", mac, params->aid);
+
+       if (!disable_ap_sme) {
+               wil_err(wil, "not supported with AP SME enabled\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (params->aid > WIL_MAX_DMG_AID) {
+               wil_err(wil, "invalid aid\n");
+               return -EINVAL;
+       }
+
+       return wmi_new_sta(wil, mac, params->aid);
+}
+
 static int wil_cfg80211_del_station(struct wiphy *wiphy,
                                    struct net_device *dev,
                                    struct station_del_parameters *params)
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
-       wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac,
+       wil_dbg_misc(wil, "del_station: %pM, reason=%d\n", params->mac,
                     params->reason_code);
 
        mutex_lock(&wil->mutex);
@@ -1338,6 +1370,52 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy,
        return 0;
 }
 
+static int wil_cfg80211_change_station(struct wiphy *wiphy,
+                                      struct net_device *dev,
+                                      const u8 *mac,
+                                      struct station_parameters *params)
+{
+       struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+       int authorize;
+       int cid, i;
+       struct vring_tx_data *txdata = NULL;
+
+       wil_dbg_misc(wil, "change station %pM mask 0x%x set 0x%x\n", mac,
+                    params->sta_flags_mask, params->sta_flags_set);
+
+       if (!disable_ap_sme) {
+               wil_dbg_misc(wil, "not supported with AP SME enabled\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
+               return 0;
+
+       cid = wil_find_cid(wil, mac);
+       if (cid < 0) {
+               wil_err(wil, "station not found\n");
+               return -ENOLINK;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(wil->vring2cid_tid); i++)
+               if (wil->vring2cid_tid[i][0] == cid) {
+                       txdata = &wil->vring_tx_data[i];
+                       break;
+               }
+
+       if (!txdata) {
+               wil_err(wil, "vring data not found\n");
+               return -ENOLINK;
+       }
+
+       authorize = params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED);
+       txdata->dot1x_open = authorize ? 1 : 0;
+       wil_dbg_misc(wil, "cid %d vring %d authorize %d\n", cid, i,
+                    txdata->dot1x_open);
+
+       return 0;
+}
+
 /* probe_client handling */
 static void wil_probe_client_handle(struct wil6210_priv *wil,
                                    struct wil_probe_client_req *req)
@@ -1387,7 +1465,7 @@ void wil_probe_client_flush(struct wil6210_priv *wil)
 {
        struct wil_probe_client_req *req, *t;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "probe_client_flush\n");
 
        mutex_lock(&wil->probe_client_mutex);
 
@@ -1407,7 +1485,7 @@ static int wil_cfg80211_probe_client(struct wiphy *wiphy,
        struct wil_probe_client_req *req;
        int cid = wil_find_cid(wil, peer);
 
-       wil_dbg_misc(wil, "%s(%pM => CID %d)\n", __func__, peer, cid);
+       wil_dbg_misc(wil, "probe_client: %pM => CID %d\n", peer, cid);
 
        if (cid < 0)
                return -ENOLINK;
@@ -1435,7 +1513,7 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy,
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
        if (params->ap_isolate >= 0) {
-               wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__,
+               wil_dbg_misc(wil, "change_bss: ap_isolate %d => %d\n",
                             wil->ap_isolate, params->ap_isolate);
                wil->ap_isolate = params->ap_isolate;
        }
@@ -1448,7 +1526,7 @@ static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy,
 {
        struct wil6210_priv *wil = wiphy_to_wil(wiphy);
 
-       wil_dbg_misc(wil, "%s: entered\n", __func__);
+       wil_dbg_misc(wil, "start_p2p_device: entered\n");
        wil->p2p.p2p_dev_started = 1;
        return 0;
 }
@@ -1462,7 +1540,7 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
        if (!p2p->p2p_dev_started)
                return;
 
-       wil_dbg_misc(wil, "%s: entered\n", __func__);
+       wil_dbg_misc(wil, "stop_p2p_device: entered\n");
        mutex_lock(&wil->mutex);
        mutex_lock(&wil->p2p_wdev_mutex);
        wil_p2p_stop_radio_operations(wil);
@@ -1499,7 +1577,7 @@ static int wil_cfg80211_set_power_mgmt(struct wiphy *wiphy,
        return rc;
 }
 
-static struct cfg80211_ops wil_cfg80211_ops = {
+static const struct cfg80211_ops wil_cfg80211_ops = {
        .add_virtual_intf = wil_cfg80211_add_iface,
        .del_virtual_intf = wil_cfg80211_del_iface,
        .scan = wil_cfg80211_scan,
@@ -1521,7 +1599,9 @@ static struct cfg80211_ops wil_cfg80211_ops = {
        .change_beacon = wil_cfg80211_change_beacon,
        .start_ap = wil_cfg80211_start_ap,
        .stop_ap = wil_cfg80211_stop_ap,
+       .add_station = wil_cfg80211_add_station,
        .del_station = wil_cfg80211_del_station,
+       .change_station = wil_cfg80211_change_station,
        .probe_client = wil_cfg80211_probe_client,
        .change_bss = wil_cfg80211_change_bss,
        /* P2P device */
@@ -1542,10 +1622,11 @@ static void wil_wiphy_init(struct wiphy *wiphy)
                                 BIT(NL80211_IFTYPE_P2P_GO) |
                                 BIT(NL80211_IFTYPE_P2P_DEVICE) |
                                 BIT(NL80211_IFTYPE_MONITOR);
-       wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
-                       WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+       wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
                        WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
                        WIPHY_FLAG_PS_ON_BY_DEFAULT;
+       if (!disable_ap_sme)
+               wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME;
        dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
                __func__, wiphy->flags);
        wiphy->probe_resp_offload =
index 5e4058a4037b414d1c5c2dc764f96960e6cd3c14..3e8cdf12fedad5251a36aaedecf7470f80ef4d7e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -364,13 +364,13 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
 }
 
 static const struct dbg_off isr_off[] = {
-       {"ICC", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICC), doff_io32},
-       {"ICR", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICR), doff_io32},
-       {"ICM", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICM), doff_io32},
-       {"ICS",           S_IWUSR, offsetof(struct RGF_ICR, ICS), doff_io32},
-       {"IMV", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, IMV), doff_io32},
-       {"IMS",           S_IWUSR, offsetof(struct RGF_ICR, IMS), doff_io32},
-       {"IMC",           S_IWUSR, offsetof(struct RGF_ICR, IMC), doff_io32},
+       {"ICC", 0644, offsetof(struct RGF_ICR, ICC), doff_io32},
+       {"ICR", 0644, offsetof(struct RGF_ICR, ICR), doff_io32},
+       {"ICM", 0644, offsetof(struct RGF_ICR, ICM), doff_io32},
+       {"ICS", 0244, offsetof(struct RGF_ICR, ICS), doff_io32},
+       {"IMV", 0644, offsetof(struct RGF_ICR, IMV), doff_io32},
+       {"IMS", 0244, offsetof(struct RGF_ICR, IMS), doff_io32},
+       {"IMC", 0244, offsetof(struct RGF_ICR, IMC), doff_io32},
        {},
 };
 
@@ -390,9 +390,9 @@ static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
 }
 
 static const struct dbg_off pseudo_isr_off[] = {
-       {"CAUSE",   S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32},
-       {"MASK_SW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32},
-       {"MASK_FW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32},
+       {"CAUSE",   0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32},
+       {"MASK_SW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32},
+       {"MASK_FW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32},
        {},
 };
 
@@ -411,40 +411,40 @@ static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
 }
 
 static const struct dbg_off lgc_itr_cnt_off[] = {
-       {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32},
-       {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32},
-       {"CTL",  S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32},
+       {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32},
+       {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32},
+       {"CTL",  0644, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32},
        {},
 };
 
 static const struct dbg_off tx_itr_cnt_off[] = {
-       {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH),
+       {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH),
         doff_io32},
-       {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA),
+       {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA),
         doff_io32},
-       {"CTL",  S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL),
+       {"CTL",  0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL),
         doff_io32},
-       {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH),
+       {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH),
         doff_io32},
-       {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA),
+       {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA),
         doff_io32},
-       {"IDL_CTL",  S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL),
+       {"IDL_CTL",  0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL),
         doff_io32},
        {},
 };
 
 static const struct dbg_off rx_itr_cnt_off[] = {
-       {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH),
+       {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH),
         doff_io32},
-       {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA),
+       {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA),
         doff_io32},
-       {"CTL",  S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL),
+       {"CTL",  0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL),
         doff_io32},
-       {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH),
+       {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH),
         doff_io32},
-       {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA),
+       {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA),
         doff_io32},
-       {"IDL_CTL",  S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL),
+       {"IDL_CTL",  0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL),
         doff_io32},
        {},
 };
@@ -813,7 +813,7 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
        rc = wil_cfg80211_mgmt_tx(wiphy, wdev, &params, NULL);
 
        kfree(frame);
-       wil_info(wil, "%s() -> %d\n", __func__, rc);
+       wil_info(wil, "-> %d\n", rc);
 
        return len;
 }
@@ -855,7 +855,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
        rc1 = wmi_send(wil, cmdid, cmd, cmdlen);
        kfree(wmi);
 
-       wil_info(wil, "%s(0x%04x[%d]) -> %d\n", __func__, cmdid, cmdlen, rc1);
+       wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1);
 
        return rc;
 }
@@ -1379,6 +1379,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
        for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
                struct wil_sta_info *p = &wil->sta[i];
                char *status = "unknown";
+               u8 aid = 0;
 
                switch (p->status) {
                case wil_sta_unused:
@@ -1389,9 +1390,10 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
                        break;
                case wil_sta_connected:
                        status = "connected";
+                       aid = p->aid;
                        break;
                }
-               seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
+               seq_printf(s, "[%d] %pM %s AID %d\n", i, p->addr, status, aid);
 
                if (p->status == wil_sta_connected) {
                        spin_lock_bh(&p->tid_rx_lock);
@@ -1622,7 +1624,7 @@ static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
                blob->data = (void * __force)wil->csr + HOSTADDR(map->host);
                blob->size = map->to - map->from;
                snprintf(name, sizeof(name), "blob_%s", map->name);
-               wil_debugfs_create_ioblob(name, S_IRUGO, dbg, wil_blob);
+               wil_debugfs_create_ioblob(name, 0444, dbg, wil_blob);
        }
 }
 
@@ -1632,29 +1634,29 @@ static const struct {
        umode_t mode;
        const struct file_operations *fops;
 } dbg_files[] = {
-       {"mbox",        S_IRUGO,                &fops_mbox},
-       {"vrings",      S_IRUGO,                &fops_vring},
-       {"stations",    S_IRUGO,                &fops_sta},
-       {"desc",        S_IRUGO,                &fops_txdesc},
-       {"bf",          S_IRUGO,                &fops_bf},
-       {"ssid",        S_IRUGO | S_IWUSR,      &fops_ssid},
-       {"mem_val",     S_IRUGO,                &fops_memread},
-       {"reset",                 S_IWUSR,      &fops_reset},
-       {"rxon",                  S_IWUSR,      &fops_rxon},
-       {"tx_mgmt",               S_IWUSR,      &fops_txmgmt},
-       {"wmi_send",              S_IWUSR,      &fops_wmi},
-       {"back",        S_IRUGO | S_IWUSR,      &fops_back},
-       {"pmccfg",      S_IRUGO | S_IWUSR,      &fops_pmccfg},
-       {"pmcdata",     S_IRUGO,                &fops_pmcdata},
-       {"temp",        S_IRUGO,                &fops_temp},
-       {"freq",        S_IRUGO,                &fops_freq},
-       {"link",        S_IRUGO,                &fops_link},
-       {"info",        S_IRUGO,                &fops_info},
-       {"recovery",    S_IRUGO | S_IWUSR,      &fops_recovery},
-       {"led_cfg",     S_IRUGO | S_IWUSR,      &fops_led_cfg},
-       {"led_blink_time",      S_IRUGO | S_IWUSR,      &fops_led_blink_time},
-       {"fw_capabilities",     S_IRUGO,        &fops_fw_capabilities},
-       {"fw_version",  S_IRUGO,                &fops_fw_version},
+       {"mbox",        0444,           &fops_mbox},
+       {"vrings",      0444,           &fops_vring},
+       {"stations", 0444,              &fops_sta},
+       {"desc",        0444,           &fops_txdesc},
+       {"bf",          0444,           &fops_bf},
+       {"ssid",        0644,           &fops_ssid},
+       {"mem_val",     0644,           &fops_memread},
+       {"reset",       0244,           &fops_reset},
+       {"rxon",        0244,           &fops_rxon},
+       {"tx_mgmt",     0244,           &fops_txmgmt},
+       {"wmi_send", 0244,              &fops_wmi},
+       {"back",        0644,           &fops_back},
+       {"pmccfg",      0644,           &fops_pmccfg},
+       {"pmcdata",     0444,           &fops_pmcdata},
+       {"temp",        0444,           &fops_temp},
+       {"freq",        0444,           &fops_freq},
+       {"link",        0444,           &fops_link},
+       {"info",        0444,           &fops_info},
+       {"recovery", 0644,              &fops_recovery},
+       {"led_cfg",     0644,           &fops_led_cfg},
+       {"led_blink_time",      0644,   &fops_led_blink_time},
+       {"fw_capabilities",     0444,   &fops_fw_capabilities},
+       {"fw_version",  0444,           &fops_fw_version},
 };
 
 static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1693,30 +1695,32 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
 
 /* fields in struct wil6210_priv */
 static const struct dbg_off dbg_wil_off[] = {
-       WIL_FIELD(privacy,      S_IRUGO,                doff_u32),
-       WIL_FIELD(status[0],    S_IRUGO | S_IWUSR,      doff_ulong),
-       WIL_FIELD(hw_version,   S_IRUGO,                doff_x32),
-       WIL_FIELD(recovery_count, S_IRUGO,              doff_u32),
-       WIL_FIELD(ap_isolate,   S_IRUGO,                doff_u32),
-       WIL_FIELD(discovery_mode, S_IRUGO | S_IWUSR,    doff_u8),
+       WIL_FIELD(privacy,      0444,           doff_u32),
+       WIL_FIELD(status[0],    0644,   doff_ulong),
+       WIL_FIELD(hw_version,   0444,   doff_x32),
+       WIL_FIELD(recovery_count, 0444, doff_u32),
+       WIL_FIELD(ap_isolate,   0444,   doff_u32),
+       WIL_FIELD(discovery_mode, 0644, doff_u8),
+       WIL_FIELD(chip_revision, 0444,  doff_u8),
+       WIL_FIELD(abft_len, 0644,               doff_u8),
        {},
 };
 
 static const struct dbg_off dbg_wil_regs[] = {
-       {"RGF_MAC_MTRL_COUNTER_0", S_IRUGO, HOSTADDR(RGF_MAC_MTRL_COUNTER_0),
+       {"RGF_MAC_MTRL_COUNTER_0", 0444, HOSTADDR(RGF_MAC_MTRL_COUNTER_0),
                doff_io32},
-       {"RGF_USER_USAGE_1", S_IRUGO, HOSTADDR(RGF_USER_USAGE_1), doff_io32},
+       {"RGF_USER_USAGE_1", 0444, HOSTADDR(RGF_USER_USAGE_1), doff_io32},
        {},
 };
 
 /* static parameters */
 static const struct dbg_off dbg_statics[] = {
-       {"desc_index",  S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32},
-       {"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32},
-       {"mem_addr",    S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32},
-       {"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh,
+       {"desc_index",  0644, (ulong)&dbg_txdesc_index, doff_u32},
+       {"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32},
+       {"mem_addr",    0644, (ulong)&mem_addr, doff_u32},
+       {"vring_idle_trsh", 0644, (ulong)&vring_idle_trsh,
         doff_u32},
-       {"led_polarity", S_IRUGO | S_IWUSR, (ulong)&led_polarity, doff_u8},
+       {"led_polarity", 0644, (ulong)&led_polarity, doff_u8},
        {},
 };
 
index 7053b62ca8d313ac593143bb4619ed13db252de7..adcfef4dabf756da33acd4d6bab05f09f9afdb7e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -27,7 +27,7 @@ static int wil_ethtoolops_begin(struct net_device *ndev)
 
        mutex_lock(&wil->mutex);
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "ethtoolops_begin\n");
 
        return 0;
 }
@@ -36,7 +36,7 @@ static void wil_ethtoolops_complete(struct net_device *ndev)
 {
        struct wil6210_priv *wil = ndev_to_wil(ndev);
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "ethtoolops_complete\n");
 
        mutex_unlock(&wil->mutex);
 }
@@ -48,7 +48,7 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
        u32 tx_itr_en, tx_itr_val = 0;
        u32 rx_itr_en, rx_itr_val = 0;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
 
        tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
        if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
@@ -68,7 +68,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
 {
        struct wil6210_priv *wil = ndev_to_wil(ndev);
 
-       wil_dbg_misc(wil, "%s(rx %d usec, tx %d usec)\n", __func__,
+       wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
                     cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
 
        if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
index 82aae2d705b41803fee7b44ab174be875b159f96..540fc20984d8fe8c65ef56b074f47030da518084 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2015,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -19,8 +19,9 @@
 #include "wil6210.h"
 #include "fw.h"
 
-MODULE_FIRMWARE(WIL_FW_NAME);
-MODULE_FIRMWARE(WIL_FW2_NAME);
+MODULE_FIRMWARE(WIL_FW_NAME_DEFAULT);
+MODULE_FIRMWARE(WIL_FW_NAME_SPARROW_PLUS);
+MODULE_FIRMWARE(WIL_BOARD_FILE_NAME);
 
 static
 void wil_memset_toio_32(volatile void __iomem *dst, u32 val,
index 8f40eb301924b1b0100af4f09732c556236734dc..f4901587c0057a9fc749e29a4e5bd7f0ccc5b1f3 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -537,3 +537,22 @@ out:
        release_firmware(fw);
        return rc;
 }
+
+/**
+ * wil_fw_verify_file_exists - checks if firmware file exist
+ *
+ * @wil: driver context
+ * @name: firmware file name
+ *
+ * return value - boolean, true for success, false for failure
+ */
+bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name)
+{
+       const struct firmware *fw;
+       int rc;
+
+       rc = request_firmware(&fw, name, wil_to_dev(wil));
+       if (!rc)
+               release_firmware(fw);
+       return rc != -ENOENT;
+}
index 64046e0bd0a228ae724da21aa4592a6cdd8dea4e..cab1e5c0e3747de4ee4c12fe30852d9631610b89 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -94,7 +94,7 @@ static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
 
 static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
 {
-       wil_dbg_irq(wil, "%s: mask_halp(%s)\n", __func__,
+       wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n",
                    mask_halp ? "true" : "false");
 
        wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
@@ -103,7 +103,7 @@ static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
 
 void wil6210_mask_halp(struct wil6210_priv *wil)
 {
-       wil_dbg_irq(wil, "%s()\n", __func__);
+       wil_dbg_irq(wil, "mask_halp\n");
 
        wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
              BIT_DMA_EP_MISC_ICR_HALP);
@@ -111,7 +111,7 @@ void wil6210_mask_halp(struct wil6210_priv *wil)
 
 static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
 {
-       wil_dbg_irq(wil, "%s()\n", __func__);
+       wil_dbg_irq(wil, "mask_irq_pseudo\n");
 
        wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE);
 
@@ -134,7 +134,7 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
 
 static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
 {
-       wil_dbg_irq(wil, "%s: unmask_halp(%s)\n", __func__,
+       wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n",
                    unmask_halp ? "true" : "false");
 
        wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
@@ -143,7 +143,7 @@ static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
 
 static void wil6210_unmask_halp(struct wil6210_priv *wil)
 {
-       wil_dbg_irq(wil, "%s()\n", __func__);
+       wil_dbg_irq(wil, "unmask_halp\n");
 
        wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
              BIT_DMA_EP_MISC_ICR_HALP);
@@ -151,7 +151,7 @@ static void wil6210_unmask_halp(struct wil6210_priv *wil)
 
 static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
 {
-       wil_dbg_irq(wil, "%s()\n", __func__);
+       wil_dbg_irq(wil, "unmask_irq_pseudo\n");
 
        set_bit(wil_status_irqen, wil->status);
 
@@ -160,7 +160,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
 
 void wil_mask_irq(struct wil6210_priv *wil)
 {
-       wil_dbg_irq(wil, "%s()\n", __func__);
+       wil_dbg_irq(wil, "mask_irq\n");
 
        wil6210_mask_irq_tx(wil);
        wil6210_mask_irq_rx(wil);
@@ -170,7 +170,7 @@ void wil_mask_irq(struct wil6210_priv *wil)
 
 void wil_unmask_irq(struct wil6210_priv *wil)
 {
-       wil_dbg_irq(wil, "%s()\n", __func__);
+       wil_dbg_irq(wil, "unmask_irq\n");
 
        wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
              WIL_ICR_ICC_VALUE);
@@ -187,7 +187,7 @@ void wil_unmask_irq(struct wil6210_priv *wil)
 
 void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
 {
-       wil_dbg_irq(wil, "%s()\n", __func__);
+       wil_dbg_irq(wil, "configure_interrupt_moderation\n");
 
        /* disable interrupt moderation for monitor
         * to get better timestamp precision
@@ -400,7 +400,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
        }
 
        if (isr & BIT_DMA_EP_MISC_ICR_HALP) {
-               wil_dbg_irq(wil, "%s: HALP IRQ invoked\n", __func__);
+               wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
                wil6210_mask_halp(wil);
                isr &= ~BIT_DMA_EP_MISC_ICR_HALP;
                complete(&wil->halp.comp);
@@ -599,7 +599,7 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
 
 void wil6210_set_halp(struct wil6210_priv *wil)
 {
-       wil_dbg_irq(wil, "%s()\n", __func__);
+       wil_dbg_irq(wil, "set_halp\n");
 
        wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
              BIT_DMA_EP_MISC_ICR_HALP);
@@ -607,7 +607,7 @@ void wil6210_set_halp(struct wil6210_priv *wil)
 
 void wil6210_clear_halp(struct wil6210_priv *wil)
 {
-       wil_dbg_irq(wil, "%s()\n", __func__);
+       wil_dbg_irq(wil, "clear_halp\n");
 
        wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
              BIT_DMA_EP_MISC_ICR_HALP);
@@ -618,7 +618,7 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
 {
        int rc;
 
-       wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx");
+       wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx");
 
        rc = request_threaded_irq(irq, wil6210_hardirq,
                                  wil6210_thread_irq,
@@ -629,7 +629,7 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
 
 void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
 {
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "fini_irq:\n");
 
        wil_mask_irq(wil);
        free_irq(irq, wil);
index e2e021bcaa03d0020e0c7b966083df24a4989e1d..efb1f59aafd9956e5995eede2d606f202cc639ca 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
 #define WAIT_FOR_SCAN_ABORT_MS 1000
 
 bool debug_fw; /* = false; */
-module_param(debug_fw, bool, S_IRUGO);
+module_param(debug_fw, bool, 0444);
 MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
 
 static bool oob_mode;
-module_param(oob_mode, bool, S_IRUGO);
+module_param(oob_mode, bool, 0444);
 MODULE_PARM_DESC(oob_mode,
                 " enable out of the box (OOB) mode in FW, for diagnostics and certification");
 
 bool no_fw_recovery;
-module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
+module_param(no_fw_recovery, bool, 0644);
 MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
 
 /* if not set via modparam, will be set to default value of 1/8 of
  * rx ring size during init flow
  */
 unsigned short rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_INIT;
-module_param(rx_ring_overflow_thrsh, ushort, S_IRUGO);
+module_param(rx_ring_overflow_thrsh, ushort, 0444);
 MODULE_PARM_DESC(rx_ring_overflow_thrsh,
                 " RX ring overflow threshold in descriptors.");
 
@@ -73,7 +73,7 @@ static const struct kernel_param_ops mtu_max_ops = {
        .get = param_get_uint,
 };
 
-module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, S_IRUGO);
+module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, 0444);
 MODULE_PARM_DESC(mtu_max, " Max MTU value.");
 
 static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT;
@@ -102,11 +102,11 @@ static const struct kernel_param_ops ring_order_ops = {
        .get = param_get_uint,
 };
 
-module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, S_IRUGO);
+module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, 0444);
 MODULE_PARM_DESC(rx_ring_order, " Rx ring order; size = 1 << order");
-module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, S_IRUGO);
+module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, 0444);
 MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
-module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, S_IRUGO);
+module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444);
 MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order");
 
 #define RST_DELAY (20) /* msec, for loop in @wil_target_reset */
@@ -172,12 +172,16 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
        struct wil_sta_info *sta = &wil->sta[cid];
 
        might_sleep();
-       wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
-                    sta->status);
+       wil_dbg_misc(wil, "disconnect_cid: CID %d, status %d\n",
+                    cid, sta->status);
        /* inform upper/lower layers */
        if (sta->status != wil_sta_unused) {
-               if (!from_event)
-                       wmi_disconnect_sta(wil, sta->addr, reason_code, true);
+               if (!from_event) {
+                       bool del_sta = (wdev->iftype == NL80211_IFTYPE_AP) ?
+                                               disable_ap_sme : false;
+                       wmi_disconnect_sta(wil, sta->addr, reason_code,
+                                          true, del_sta);
+               }
 
                switch (wdev->iftype) {
                case NL80211_IFTYPE_AP:
@@ -237,7 +241,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
                return;
 
        might_sleep();
-       wil_info(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid,
+       wil_info(wil, "bssid=%pM, reason=%d, ev%s\n", bssid,
                 reason_code, from_event ? "+" : "-");
 
        /* Cases are:
@@ -347,7 +351,7 @@ static int wil_wait_for_recovery(struct wil6210_priv *wil)
 
 void wil_set_recovery_state(struct wil6210_priv *wil, int state)
 {
-       wil_dbg_misc(wil, "%s(%d -> %d)\n", __func__,
+       wil_dbg_misc(wil, "set_recovery_state: %d -> %d\n",
                     wil->recovery_state, state);
 
        wil->recovery_state = state;
@@ -489,7 +493,7 @@ int wil_priv_init(struct wil6210_priv *wil)
 {
        uint i;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "priv_init\n");
 
        memset(wil->sta, 0, sizeof(wil->sta));
        for (i = 0; i < WIL6210_MAX_CID; i++)
@@ -564,7 +568,7 @@ out_wmi_wq:
 void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
                        u16 reason_code, bool from_event)
 {
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "disconnect\n");
 
        del_timer_sync(&wil->connect_timer);
        _wil6210_disconnect(wil, bssid, reason_code, from_event);
@@ -572,7 +576,7 @@ void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
 
 void wil_priv_deinit(struct wil6210_priv *wil)
 {
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "priv_deinit\n");
 
        wil_set_recovery_state(wil, fw_recovery_idle);
        del_timer_sync(&wil->scan_timer);
@@ -605,7 +609,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
 
 static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable)
 {
-       wil_info(wil, "%s: enable=%d\n", __func__, enable);
+       wil_info(wil, "enable=%d\n", enable);
        if (enable)
                wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
        else
@@ -861,7 +865,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 {
        int rc;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "reset\n");
 
        WARN_ON(!mutex_is_locked(&wil->mutex));
        WARN_ON(test_bit(wil_status_napi_en, wil->status));
@@ -884,9 +888,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
                rc = wil->platform_ops.notify(wil->platform_handle,
                                              WIL_PLATFORM_EVT_PRE_RESET);
                if (rc)
-                       wil_err(wil,
-                               "%s: PRE_RESET platform notify failed, rc %d\n",
-                               __func__, rc);
+                       wil_err(wil, "PRE_RESET platform notify failed, rc %d\n",
+                               rc);
        }
 
        set_bit(wil_status_resetting, wil->status);
@@ -915,7 +918,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
        flush_workqueue(wil->wmi_wq);
 
        wil_bl_crash_info(wil, false);
+       wil_disable_irq(wil);
        rc = wil_target_reset(wil);
+       wil6210_clear_irq(wil);
+       wil_enable_irq(wil);
        wil_rx_fini(wil);
        if (rc) {
                wil_bl_crash_info(wil, true);
@@ -930,16 +936,16 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
 
        wil_set_oob_mode(wil, oob_mode);
        if (load_fw) {
-               wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME,
-                        WIL_FW2_NAME);
+               wil_info(wil, "Use firmware <%s> + board <%s>\n",
+                        wil->wil_fw_name, WIL_BOARD_FILE_NAME);
 
                wil_halt_cpu(wil);
                memset(wil->fw_version, 0, sizeof(wil->fw_version));
                /* Loading f/w from the file */
-               rc = wil_request_firmware(wil, WIL_FW_NAME, true);
+               rc = wil_request_firmware(wil, wil->wil_fw_name, true);
                if (rc)
                        return rc;
-               rc = wil_request_firmware(wil, WIL_FW2_NAME, true);
+               rc = wil_request_firmware(wil, WIL_BOARD_FILE_NAME, true);
                if (rc)
                        return rc;
 
@@ -976,8 +982,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
                /* check FW is responsive */
                rc = wmi_echo(wil);
                if (rc) {
-                       wil_err(wil, "%s: wmi_echo failed, rc %d\n",
-                               __func__, rc);
+                       wil_err(wil, "wmi_echo failed, rc %d\n", rc);
                        return rc;
                }
 
@@ -987,9 +992,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
                        rc = wil->platform_ops.notify(wil->platform_handle,
                                                      WIL_PLATFORM_EVT_FW_RDY);
                        if (rc) {
-                               wil_err(wil,
-                                       "%s: FW_RDY notify failed, rc %d\n",
-                                       __func__, rc);
+                               wil_err(wil, "FW_RDY notify failed, rc %d\n",
+                                       rc);
                                rc = 0;
                        }
                }
@@ -1073,7 +1077,7 @@ int wil_up(struct wil6210_priv *wil)
 {
        int rc;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "up\n");
 
        mutex_lock(&wil->mutex);
        rc = __wil_up(wil);
@@ -1113,7 +1117,7 @@ int wil_down(struct wil6210_priv *wil)
 {
        int rc;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "down\n");
 
        wil_set_recovery_state(wil, fw_recovery_idle);
        mutex_lock(&wil->mutex);
@@ -1146,25 +1150,24 @@ void wil_halp_vote(struct wil6210_priv *wil)
 
        mutex_lock(&wil->halp.lock);
 
-       wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+       wil_dbg_irq(wil, "halp_vote: start, HALP ref_cnt (%d)\n",
                    wil->halp.ref_cnt);
 
        if (++wil->halp.ref_cnt == 1) {
                wil6210_set_halp(wil);
                rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
                if (!rc) {
-                       wil_err(wil, "%s: HALP vote timed out\n", __func__);
+                       wil_err(wil, "HALP vote timed out\n");
                        /* Mask HALP as done in case the interrupt is raised */
                        wil6210_mask_halp(wil);
                } else {
                        wil_dbg_irq(wil,
-                                   "%s: HALP vote completed after %d ms\n",
-                                   __func__,
+                                   "halp_vote: HALP vote completed after %d ms\n",
                                    jiffies_to_msecs(to_jiffies - rc));
                }
        }
 
-       wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+       wil_dbg_irq(wil, "halp_vote: end, HALP ref_cnt (%d)\n",
                    wil->halp.ref_cnt);
 
        mutex_unlock(&wil->halp.lock);
@@ -1176,15 +1179,15 @@ void wil_halp_unvote(struct wil6210_priv *wil)
 
        mutex_lock(&wil->halp.lock);
 
-       wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+       wil_dbg_irq(wil, "halp_unvote: start, HALP ref_cnt (%d)\n",
                    wil->halp.ref_cnt);
 
        if (--wil->halp.ref_cnt == 0) {
                wil6210_clear_halp(wil);
-               wil_dbg_irq(wil, "%s: HALP unvote\n", __func__);
+               wil_dbg_irq(wil, "HALP unvote\n");
        }
 
-       wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+       wil_dbg_irq(wil, "halp_unvote:end, HALP ref_cnt (%d)\n",
                    wil->halp.ref_cnt);
 
        mutex_unlock(&wil->halp.lock);
index 6676001dcbcadcd689099a42658640c2a2b8aea9..708facd5f667d991127dc189e229b36d387891f5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -22,10 +22,11 @@ static int wil_open(struct net_device *ndev)
 {
        struct wil6210_priv *wil = ndev_to_wil(ndev);
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "open\n");
 
-       if (debug_fw) {
-               wil_err(wil, "%s() while in debug_fw mode\n", __func__);
+       if (debug_fw ||
+           test_bit(WMI_FW_CAPABILITY_WMI_ONLY, wil->fw_capabilities)) {
+               wil_err(wil, "while in debug_fw or wmi_only mode\n");
                return -EINVAL;
        }
 
@@ -36,7 +37,7 @@ static int wil_stop(struct net_device *ndev)
 {
        struct wil6210_priv *wil = ndev_to_wil(ndev);
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "stop\n");
 
        return wil_down(wil);
 }
@@ -68,7 +69,7 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
        done = budget - quota;
 
        if (done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, done);
                wil6210_unmask_irq_rx(wil);
                wil_dbg_txrx(wil, "NAPI RX complete\n");
        }
@@ -132,7 +133,7 @@ void *wil_if_alloc(struct device *dev)
        wil->wdev = wdev;
        wil->radio_wdev = wdev;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "if_alloc\n");
 
        rc = wil_priv_init(wil);
        if (rc) {
@@ -179,7 +180,7 @@ void wil_if_free(struct wil6210_priv *wil)
 {
        struct net_device *ndev = wil_to_ndev(wil);
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "if_free\n");
 
        if (!ndev)
                return;
@@ -234,7 +235,7 @@ void wil_if_remove(struct wil6210_priv *wil)
        struct net_device *ndev = wil_to_ndev(wil);
        struct wireless_dev *wdev = wil_to_wdev(wil);
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "if_remove\n");
 
        unregister_netdev(ndev);
        wiphy_unregister(wdev->wiphy);
index fbae99525e0104498e2ed1c89d0fb1b4021093d3..792484756654bcd5e27b831409411f2840c5bd8e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -69,7 +69,7 @@ void wil_p2p_discovery_timer_fn(ulong x)
 {
        struct wil6210_priv *wil = (void *)x;
 
-       wil_dbg_misc(wil, "%s\n", __func__);
+       wil_dbg_misc(wil, "p2p_discovery_timer_fn\n");
 
        schedule_work(&wil->p2p.discovery_expired_work);
 }
@@ -80,27 +80,25 @@ int wil_p2p_search(struct wil6210_priv *wil,
        int rc;
        struct wil_p2p_info *p2p = &wil->p2p;
 
-       wil_dbg_misc(wil, "%s: channel %d\n",
-                    __func__, P2P_DMG_SOCIAL_CHANNEL);
+       wil_dbg_misc(wil, "p2p_search: channel %d\n", P2P_DMG_SOCIAL_CHANNEL);
 
        lockdep_assert_held(&wil->mutex);
 
        if (p2p->discovery_started) {
-               wil_err(wil, "%s: search failed. discovery already ongoing\n",
-                       __func__);
+               wil_err(wil, "search failed. discovery already ongoing\n");
                rc = -EBUSY;
                goto out;
        }
 
        rc = wmi_p2p_cfg(wil, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI);
        if (rc) {
-               wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
+               wil_err(wil, "wmi_p2p_cfg failed\n");
                goto out;
        }
 
        rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
        if (rc) {
-               wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
+               wil_err(wil, "wmi_set_ssid failed\n");
                goto out_stop;
        }
 
@@ -108,8 +106,7 @@ int wil_p2p_search(struct wil6210_priv *wil,
        rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ,
                        request->ie_len, request->ie);
        if (rc) {
-               wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n",
-                       __func__);
+               wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n");
                goto out_stop;
        }
 
@@ -119,14 +116,13 @@ int wil_p2p_search(struct wil6210_priv *wil,
        rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
                        request->ie_len, request->ie);
        if (rc) {
-               wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n",
-                       __func__);
+               wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n");
                goto out_stop;
        }
 
        rc = wmi_start_search(wil);
        if (rc) {
-               wil_err(wil, "%s: wmi_start_search failed\n", __func__);
+               wil_err(wil, "wmi_start_search failed\n");
                goto out_stop;
        }
 
@@ -153,12 +149,12 @@ int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev,
        if (!chan)
                return -EINVAL;
 
-       wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration);
+       wil_dbg_misc(wil, "p2p_listen: duration %d\n", duration);
 
        mutex_lock(&wil->mutex);
 
        if (p2p->discovery_started) {
-               wil_err(wil, "%s: discovery already ongoing\n", __func__);
+               wil_err(wil, "discovery already ongoing\n");
                rc = -EBUSY;
                goto out;
        }
@@ -220,8 +216,8 @@ int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie)
        mutex_lock(&wil->mutex);
 
        if (cookie != p2p->cookie) {
-               wil_info(wil, "%s: Cookie mismatch: 0x%016llx vs. 0x%016llx\n",
-                        __func__, p2p->cookie, cookie);
+               wil_info(wil, "Cookie mismatch: 0x%016llx vs. 0x%016llx\n",
+                        p2p->cookie, cookie);
                mutex_unlock(&wil->mutex);
                return -ENOENT;
        }
@@ -231,7 +227,7 @@ int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie)
        mutex_unlock(&wil->mutex);
 
        if (!started) {
-               wil_err(wil, "%s: listen not started\n", __func__);
+               wil_err(wil, "listen not started\n");
                return -ENOENT;
        }
 
@@ -253,7 +249,7 @@ void wil_p2p_listen_expired(struct work_struct *work)
                        struct wil6210_priv, p2p);
        u8 started;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "p2p_listen_expired\n");
 
        mutex_lock(&wil->mutex);
        started = wil_p2p_stop_discovery(wil);
@@ -279,7 +275,7 @@ void wil_p2p_search_expired(struct work_struct *work)
                        struct wil6210_priv, p2p);
        u8 started;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "p2p_search_expired\n");
 
        mutex_lock(&wil->mutex);
        started = wil_p2p_stop_discovery(wil);
index 44746ca0d2e6ae86a3581c32d055bfb57586ca9b..874c787727fe5a013acfc558a7804679ee5aa39e 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/rtnetlink.h>
 
 static bool use_msi = true;
-module_param(use_msi, bool, S_IRUGO);
+module_param(use_msi, bool, 0444);
 MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
 
 #ifdef CONFIG_PM
@@ -36,18 +36,38 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
 static
 void wil_set_capabilities(struct wil6210_priv *wil)
 {
-       u32 rev_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
+       u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
+       u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
+                           RGF_USER_REVISION_ID_MASK);
 
        bitmap_zero(wil->hw_capabilities, hw_capability_last);
        bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
-
-       switch (rev_id) {
-       case JTAG_DEV_ID_SPARROW_B0:
-               wil->hw_name = "Sparrow B0";
-               wil->hw_version = HW_VER_SPARROW_B0;
+       wil->wil_fw_name = WIL_FW_NAME_DEFAULT;
+       wil->chip_revision = chip_revision;
+
+       switch (jtag_id) {
+       case JTAG_DEV_ID_SPARROW:
+               switch (chip_revision) {
+               case REVISION_ID_SPARROW_D0:
+                       wil->hw_name = "Sparrow D0";
+                       wil->hw_version = HW_VER_SPARROW_D0;
+                       if (wil_fw_verify_file_exists(wil,
+                                                     WIL_FW_NAME_SPARROW_PLUS))
+                               wil->wil_fw_name = WIL_FW_NAME_SPARROW_PLUS;
+                       break;
+               case REVISION_ID_SPARROW_B0:
+                       wil->hw_name = "Sparrow B0";
+                       wil->hw_version = HW_VER_SPARROW_B0;
+                       break;
+               default:
+                       wil->hw_name = "Unknown";
+                       wil->hw_version = HW_VER_UNKNOWN;
+                       break;
+               }
                break;
        default:
-               wil_err(wil, "Unknown board hardware 0x%08x\n", rev_id);
+               wil_err(wil, "Unknown board hardware, chip_id 0x%08x, chip_revision 0x%08x\n",
+                       jtag_id, chip_revision);
                wil->hw_name = "Unknown";
                wil->hw_version = HW_VER_UNKNOWN;
        }
@@ -55,7 +75,7 @@ void wil_set_capabilities(struct wil6210_priv *wil)
        wil_info(wil, "Board hardware is %s\n", wil->hw_name);
 
        /* extract FW capabilities from file without loading the FW */
-       wil_request_firmware(wil, WIL_FW_NAME, false);
+       wil_request_firmware(wil, wil->wil_fw_name, false);
 }
 
 void wil_disable_irq(struct wil6210_priv *wil)
@@ -79,8 +99,10 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
         */
        int msi_only = pdev->msi_enabled;
        bool _use_msi = use_msi;
+       bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY,
+                                wil->fw_capabilities);
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "if_pcie_enable, wmi_only %d\n", wmi_only);
 
        pdev->msi_enabled = 0;
 
@@ -103,9 +125,11 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
        if (rc)
                goto stop_master;
 
-       /* need reset here to obtain MAC */
+       /* need reset here to obtain MAC or in case of WMI-only FW, full reset
+        * and fw loading takes place
+        */
        mutex_lock(&wil->mutex);
-       rc = wil_reset(wil, false);
+       rc = wil_reset(wil, wmi_only);
        mutex_unlock(&wil->mutex);
        if (rc)
                goto release_irq;
@@ -125,7 +149,7 @@ static int wil_if_pcie_disable(struct wil6210_priv *wil)
 {
        struct pci_dev *pdev = wil->pdev;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "if_pcie_disable\n");
 
        pci_clear_master(pdev);
        /* disable and release IRQ */
@@ -289,7 +313,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
        struct wil6210_priv *wil = pci_get_drvdata(pdev);
        void __iomem *csr = wil->csr;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "pcie_remove\n");
 
 #ifdef CONFIG_PM
 #ifdef CONFIG_PM_SLEEP
@@ -327,8 +351,7 @@ static int wil6210_suspend(struct device *dev, bool is_runtime)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct wil6210_priv *wil = pci_get_drvdata(pdev);
 
-       wil_dbg_pm(wil, "%s(%s)\n", __func__,
-                  is_runtime ? "runtime" : "system");
+       wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
 
        rc = wil_can_suspend(wil, is_runtime);
        if (rc)
@@ -354,8 +377,7 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct wil6210_priv *wil = pci_get_drvdata(pdev);
 
-       wil_dbg_pm(wil, "%s(%s)\n", __func__,
-                  is_runtime ? "runtime" : "system");
+       wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
 
        /* allow master */
        pci_set_master(pdev);
@@ -375,7 +397,7 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
        int rc = 0;
        enum wil_platform_event evt;
 
-       wil_dbg_pm(wil, "%s: mode (%ld)\n", __func__, mode);
+       wil_dbg_pm(wil, "pm_notify: mode (%ld)\n", mode);
 
        switch (mode) {
        case PM_HIBERNATION_PREPARE:
index 11ee24d509e53f058cad2f7cbc1ebbf4b7f94db8..a0acb2d0cb7933e21f62c98d6b96a80cb51f9ef7 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -21,8 +21,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
        int rc = 0;
        struct wireless_dev *wdev = wil->wdev;
 
-       wil_dbg_pm(wil, "%s(%s)\n", __func__,
-                  is_runtime ? "runtime" : "system");
+       wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system");
 
        if (!netif_running(wil_to_ndev(wil))) {
                /* can always sleep when down */
@@ -59,7 +58,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
        }
 
 out:
-       wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__,
+       wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n",
                   is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
 
        return rc;
@@ -70,8 +69,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
        int rc = 0;
        struct net_device *ndev = wil_to_ndev(wil);
 
-       wil_dbg_pm(wil, "%s(%s)\n", __func__,
-                  is_runtime ? "runtime" : "system");
+       wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
 
        /* if netif up, hardware is alive, shut it down */
        if (ndev->flags & IFF_UP) {
@@ -86,7 +84,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
                rc = wil->platform_ops.suspend(wil->platform_handle);
 
 out:
-       wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+       wil_dbg_pm(wil, "suspend: %s => %d\n",
                   is_runtime ? "runtime" : "system", rc);
        return rc;
 }
@@ -96,8 +94,7 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
        int rc = 0;
        struct net_device *ndev = wil_to_ndev(wil);
 
-       wil_dbg_pm(wil, "%s(%s)\n", __func__,
-                  is_runtime ? "runtime" : "system");
+       wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
 
        if (wil->platform_ops.resume) {
                rc = wil->platform_ops.resume(wil->platform_handle);
@@ -115,7 +112,7 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
                rc = wil_up(wil);
 
 out:
-       wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+       wil_dbg_pm(wil, "resume: %s => %d\n",
                   is_runtime ? "runtime" : "system", rc);
        return rc;
 }
index b9faae0278c9b33d85a4666bc94e847f3672dd26..3ff4f4ce9feffe24c46754c258c3deb811e69056 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -60,7 +60,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
 
        if (wil_is_pmc_allocated(pmc)) {
                /* sanity check */
-               wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__);
+               wil_err(wil, "ERROR pmc is already allocated\n");
                goto no_release_err;
        }
        if ((num_descriptors <= 0) || (descriptor_size <= 0)) {
@@ -90,21 +90,20 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
        pmc->num_descriptors = num_descriptors;
        pmc->descriptor_size = descriptor_size;
 
-       wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n",
-                    __func__, num_descriptors, descriptor_size);
+       wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n",
+                    num_descriptors, descriptor_size);
 
        /* allocate descriptors info list in pmc context*/
        pmc->descriptors = kcalloc(num_descriptors,
                                  sizeof(struct desc_alloc_info),
                                  GFP_KERNEL);
        if (!pmc->descriptors) {
-               wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__);
+               wil_err(wil, "ERROR allocating pmc skb list\n");
                goto no_release_err;
        }
 
-       wil_dbg_misc(wil,
-                    "%s: allocated descriptors info list %p\n",
-                    __func__, pmc->descriptors);
+       wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n",
+                    pmc->descriptors);
 
        /* Allocate pring buffer and descriptors.
         * vring->va should be aligned on its size rounded up to power of 2
@@ -116,15 +115,14 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
                        GFP_KERNEL);
 
        wil_dbg_misc(wil,
-                    "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
-                    __func__,
+                    "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
                     pmc->pring_va, &pmc->pring_pa,
                     sizeof(struct vring_tx_desc),
                     num_descriptors,
                     sizeof(struct vring_tx_desc) * num_descriptors);
 
        if (!pmc->pring_va) {
-               wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__);
+               wil_err(wil, "ERROR allocating pmc pring\n");
                goto release_pmc_skb_list;
        }
 
@@ -143,9 +141,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
                        GFP_KERNEL);
 
                if (unlikely(!pmc->descriptors[i].va)) {
-                       wil_err(wil,
-                               "%s: ERROR allocating pmc descriptor %d",
-                               __func__, i);
+                       wil_err(wil, "ERROR allocating pmc descriptor %d", i);
                        goto release_pmc_skbs;
                }
 
@@ -165,21 +161,21 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
                *_d = *d;
        }
 
-       wil_dbg_misc(wil, "%s: allocated successfully\n", __func__);
+       wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n");
 
        pmc_cmd.op = WMI_PMC_ALLOCATE;
        pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
        pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
 
-       wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__);
+       wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n");
        pmc->last_cmd_status = wmi_send(wil,
                                        WMI_PMC_CMDID,
                                        &pmc_cmd,
                                        sizeof(pmc_cmd));
        if (pmc->last_cmd_status) {
                wil_err(wil,
-                       "%s: WMI_PMC_CMD with ALLOCATE op failed with status %d",
-                       __func__, pmc->last_cmd_status);
+                       "WMI_PMC_CMD with ALLOCATE op failed with status %d",
+                       pmc->last_cmd_status);
                goto release_pmc_skbs;
        }
 
@@ -188,7 +184,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
        return;
 
 release_pmc_skbs:
-       wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__);
+       wil_err(wil, "exit on error: Releasing skbs...\n");
        for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
                dma_free_coherent(dev,
                                  descriptor_size,
@@ -197,7 +193,7 @@ release_pmc_skbs:
 
                pmc->descriptors[i].va = NULL;
        }
-       wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__);
+       wil_err(wil, "exit on error: Releasing pring...\n");
 
        dma_free_coherent(dev,
                          sizeof(struct vring_tx_desc) * num_descriptors,
@@ -207,8 +203,7 @@ release_pmc_skbs:
        pmc->pring_va = NULL;
 
 release_pmc_skb_list:
-       wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n",
-               __func__);
+       wil_err(wil, "exit on error: Releasing descriptors info list...\n");
        kfree(pmc->descriptors);
        pmc->descriptors = NULL;
 
@@ -232,24 +227,23 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
        pmc->last_cmd_status = 0;
 
        if (!wil_is_pmc_allocated(pmc)) {
-               wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n",
-                            __func__);
+               wil_dbg_misc(wil,
+                            "pmc_free: Error, can't free - not allocated\n");
                pmc->last_cmd_status = -EPERM;
                mutex_unlock(&pmc->lock);
                return;
        }
 
        if (send_pmc_cmd) {
-               wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n",
-                            __func__);
+               wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n");
                pmc_cmd.op = WMI_PMC_RELEASE;
                pmc->last_cmd_status =
                                wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd,
                                         sizeof(pmc_cmd));
                if (pmc->last_cmd_status) {
                        wil_err(wil,
-                               "%s WMI_PMC_CMD with RELEASE op failed, status %d",
-                               __func__, pmc->last_cmd_status);
+                               "WMI_PMC_CMD with RELEASE op failed, status %d",
+                               pmc->last_cmd_status);
                        /* There's nothing we can do with this error.
                         * Normally, it should never occur.
                         * Continue to freeing all memory allocated for pmc.
@@ -261,8 +255,8 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
                size_t buf_size = sizeof(struct vring_tx_desc) *
                                  pmc->num_descriptors;
 
-               wil_dbg_misc(wil, "%s: free pring va %p\n",
-                            __func__, pmc->pring_va);
+               wil_dbg_misc(wil, "pmc_free: free pring va %p\n",
+                            pmc->pring_va);
                dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
 
                pmc->pring_va = NULL;
@@ -281,11 +275,11 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
                                          pmc->descriptors[i].pa);
                        pmc->descriptors[i].va = NULL;
                }
-               wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n",
-                            __func__, i, pmc->num_descriptors);
+               wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i,
+                            pmc->num_descriptors);
                wil_dbg_misc(wil,
-                            "%s: free pmc descriptors info list %p\n",
-                            __func__, pmc->descriptors);
+                            "pmc_free: free pmc descriptors info list %p\n",
+                            pmc->descriptors);
                kfree(pmc->descriptors);
                pmc->descriptors = NULL;
        } else {
@@ -301,7 +295,7 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
  */
 int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
 {
-       wil_dbg_misc(wil, "%s: status %d\n", __func__,
+       wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n",
                     wil->pmc.last_cmd_status);
 
        return wil->pmc.last_cmd_status;
@@ -324,7 +318,7 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
        mutex_lock(&pmc->lock);
 
        if (!wil_is_pmc_allocated(pmc)) {
-               wil_err(wil, "%s: error, pmc is not allocated!\n", __func__);
+               wil_err(wil, "error, pmc is not allocated!\n");
                pmc->last_cmd_status = -EPERM;
                mutex_unlock(&pmc->lock);
                return -EPERM;
@@ -333,8 +327,8 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
        pmc_size = pmc->descriptor_size * pmc->num_descriptors;
 
        wil_dbg_misc(wil,
-                    "%s: size %u, pos %lld\n",
-                    __func__, (unsigned)count, *f_pos);
+                    "pmc_read: size %u, pos %lld\n",
+                    (u32)count, *f_pos);
 
        pmc->last_cmd_status = 0;
 
@@ -343,15 +337,16 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
        offset = *f_pos - (idx * pmc->descriptor_size);
 
        if (*f_pos >= pmc_size) {
-               wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n",
-                            __func__, *f_pos, (unsigned)pmc_size);
+               wil_dbg_misc(wil,
+                            "pmc_read: reached end of pmc buf: %lld >= %u\n",
+                            *f_pos, (u32)pmc_size);
                pmc->last_cmd_status = -ERANGE;
                goto out;
        }
 
        wil_dbg_misc(wil,
-                    "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
-                    __func__, *f_pos, idx, offset, count);
+                    "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
+                    *f_pos, idx, offset, count);
 
        /* if no errors, return the copied byte count */
        retval = simple_read_from_buffer(buf,
index 19ed127d4d055703d14e018a1f278a5b8d9dbd89..7404b6f39c6aff08495dd53c6f463d5c20f0e15c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -349,8 +349,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
        rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status,
                               agg_amsdu, agg_wsize, agg_timeout);
        if (rc || (status != WLAN_STATUS_SUCCESS)) {
-               wil_err(wil, "%s: do not apply ba, rc(%d), status(%d)\n",
-                       __func__, rc, status);
+               wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc,
+                       status);
                goto out;
        }
 
@@ -387,7 +387,7 @@ int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
        txdata->addba_in_progress = true;
        rc = wmi_addba(wil, ringid, agg_wsize, agg_timeout);
        if (rc) {
-               wil_err(wil, "%s: wmi_addba failed, rc (%d)", __func__, rc);
+               wil_err(wil, "wmi_addba failed, rc (%d)", rc);
                txdata->addba_in_progress = false;
        }
 
index c1b4bb03e997b31fe8798a8b3543dd117d1fbe26..072182e527e69afe888b4ef54789495ba37d4c6a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
 #include "trace.h"
 
 static bool rtap_include_phy_info;
-module_param(rtap_include_phy_info, bool, S_IRUGO);
+module_param(rtap_include_phy_info, bool, 0444);
 MODULE_PARM_DESC(rtap_include_phy_info,
                 " Include PHY info in the radiotap header, default - no");
 
 bool rx_align_2;
-module_param(rx_align_2, bool, S_IRUGO);
+module_param(rx_align_2, bool, 0444);
 MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
 
 static inline uint wil_rx_snaplen(void)
@@ -112,7 +112,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
        size_t sz = vring->size * sizeof(vring->va[0]);
        uint i;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "vring_alloc:\n");
 
        BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
 
@@ -745,7 +745,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
                wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
                return;
        }
-       wil_dbg_txrx(wil, "%s()\n", __func__);
+       wil_dbg_txrx(wil, "rx_handle\n");
        while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
                (*quota)--;
 
@@ -768,7 +768,7 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
        struct vring *vring = &wil->vring_rx;
        int rc;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "rx_init\n");
 
        if (vring->va) {
                wil_err(wil, "Rx ring already allocated\n");
@@ -799,7 +799,7 @@ void wil_rx_fini(struct wil6210_priv *wil)
 {
        struct vring *vring = &wil->vring_rx;
 
-       wil_dbg_misc(wil, "%s()\n", __func__);
+       wil_dbg_misc(wil, "rx_fini\n");
 
        if (vring->va)
                wil_vring_free(wil, vring, 0);
@@ -851,7 +851,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
        struct vring *vring = &wil->vring_tx[id];
        struct vring_tx_data *txdata = &wil->vring_tx_data[id];
 
-       wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
+       wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
                     cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
        lockdep_assert_held(&wil->mutex);
 
@@ -931,7 +931,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
        struct vring *vring = &wil->vring_tx[id];
        struct vring_tx_data *txdata = &wil->vring_tx_data[id];
 
-       wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
+       wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
                     cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
        lockdep_assert_held(&wil->mutex);
 
@@ -993,7 +993,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
        if (!vring->va)
                return;
 
-       wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
+       wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
 
        spin_lock_bh(&txdata->lock);
        txdata->dot1x_open = false;
@@ -1032,12 +1032,14 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
                        struct vring *v = &wil->vring_tx[i];
                        struct vring_tx_data *txdata = &wil->vring_tx_data[i];
 
-                       wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n",
-                                    __func__, eth->h_dest, i);
+                       wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
+                                    eth->h_dest, i);
                        if (v->va && txdata->enabled) {
                                return v;
                        } else {
-                               wil_dbg_txrx(wil, "vring[%d] not valid\n", i);
+                               wil_dbg_txrx(wil,
+                                            "find_tx_ucast: vring[%d] not valid\n",
+                                            i);
                                return NULL;
                        }
                }
@@ -1193,17 +1195,6 @@ found:
        return v;
 }
 
-static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
-                                      struct sk_buff *skb)
-{
-       struct wireless_dev *wdev = wil->wdev;
-
-       if (wdev->iftype != NL80211_IFTYPE_AP)
-               return wil_find_tx_bcast_2(wil, skb);
-
-       return wil_find_tx_bcast_1(wil, skb);
-}
-
 static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
                           int vring_index)
 {
@@ -1373,8 +1364,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
        int gso_type;
        int rc = -EINVAL;
 
-       wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
-                    __func__, skb->len, vring_index);
+       wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
+                    vring_index);
 
        if (unlikely(!txdata->enabled))
                return -EINVAL;
@@ -1643,8 +1634,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
        bool mcast = (vring_index == wil->bcast_vring);
        uint len = skb_headlen(skb);
 
-       wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
-                    __func__, skb->len, vring_index);
+       wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len,
+                    vring_index);
 
        if (unlikely(!txdata->enabled))
                return -EINVAL;
@@ -1884,7 +1875,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        static bool pr_once_fw;
        int rc;
 
-       wil_dbg_txrx(wil, "%s()\n", __func__);
+       wil_dbg_txrx(wil, "start_xmit\n");
        if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
                if (!pr_once_fw) {
                        wil_err(wil, "FW not ready\n");
@@ -1903,12 +1894,26 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        pr_once_fw = false;
 
        /* find vring */
-       if (wil->wdev->iftype == NL80211_IFTYPE_STATION) {
-               /* in STA mode (ESS), all to same VRING */
+       if (wil->wdev->iftype == NL80211_IFTYPE_STATION && !wil->pbss) {
+               /* in STA mode (ESS), all to same VRING (to AP) */
                vring = wil_find_tx_vring_sta(wil, skb);
-       } else { /* direct communication, find matching VRING */
-               vring = bcast ? wil_find_tx_bcast(wil, skb) :
-                               wil_find_tx_ucast(wil, skb);
+       } else if (bcast) {
+               if (wil->pbss)
+                       /* in pbss, no bcast VRING - duplicate skb in
+                        * all stations VRINGs
+                        */
+                       vring = wil_find_tx_bcast_2(wil, skb);
+               else if (wil->wdev->iftype == NL80211_IFTYPE_AP)
+                       /* AP has a dedicated bcast VRING */
+                       vring = wil_find_tx_bcast_1(wil, skb);
+               else
+                       /* unexpected combination, fallback to duplicating
+                        * the skb in all stations VRINGs
+                        */
+                       vring = wil_find_tx_bcast_2(wil, skb);
+       } else {
+               /* unicast, find specific VRING by dest. address */
+               vring = wil_find_tx_ucast(wil, skb);
        }
        if (unlikely(!vring)) {
                wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
@@ -1982,7 +1987,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
                return 0;
        }
 
-       wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
+       wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
 
        used_before_complete = wil_vring_used_tx(vring);
 
index 237e1666df2da1efcb2832515e1df8f16adfcade..085a2dbfa21d615887c458e6385ac06cae8dcca3 100644 (file)
@@ -33,10 +33,12 @@ extern int agg_wsize;
 extern u32 vring_idle_trsh;
 extern bool rx_align_2;
 extern bool debug_fw;
+extern bool disable_ap_sme;
 
 #define WIL_NAME "wil6210"
-#define WIL_FW_NAME "wil6210.fw" /* code */
-#define WIL_FW2_NAME "wil6210.brd" /* board & radio parameters */
+#define WIL_FW_NAME_DEFAULT "wil6210.fw" /* code Sparrow B0 */
+#define WIL_FW_NAME_SPARROW_PLUS "wil6210_sparrow_plus.fw" /* code Sparrow D0 */
+#define WIL_BOARD_FILE_NAME "wil6210.brd" /* board & radio parameters */
 
 #define WIL_MAX_BUS_REQUEST_KBPS 800000 /* ~6.1Gbps */
 
@@ -98,6 +100,9 @@ static inline u32 wil_mtu2macbuf(u32 mtu)
 #define WIL6210_RX_HIGH_TRSH_INIT              (0)
 #define WIL6210_RX_HIGH_TRSH_DEFAULT \
                                (1 << (WIL_RX_RING_SIZE_ORDER_DEFAULT - 3))
+#define WIL_MAX_DMG_AID 254 /* for DMG only 1-254 allowed (see
+                            * 802.11REVmc/D5.0, section 9.4.1.8)
+                            */
 /* Hardware definitions begin */
 
 /*
@@ -249,7 +254,12 @@ struct RGF_ICR {
        #define BIT_CAF_OSC_DIG_XTAL_STABLE     BIT(0)
 
 #define RGF_USER_JTAG_DEV_ID   (0x880b34) /* device ID */
-       #define JTAG_DEV_ID_SPARROW_B0  (0x2632072f)
+       #define JTAG_DEV_ID_SPARROW     (0x2632072f)
+
+#define RGF_USER_REVISION_ID           (0x88afe4)
+#define RGF_USER_REVISION_ID_MASK      (3)
+       #define REVISION_ID_SPARROW_B0  (0x0)
+       #define REVISION_ID_SPARROW_D0  (0x3)
 
 /* crash codes for FW/Ucode stored here */
 #define RGF_FW_ASSERT_CODE             (0x91f020)
@@ -257,7 +267,8 @@ struct RGF_ICR {
 
 enum {
        HW_VER_UNKNOWN,
-       HW_VER_SPARROW_B0, /* JTAG_DEV_ID_SPARROW_B0 */
+       HW_VER_SPARROW_B0, /* REVISION_ID_SPARROW_B0 */
+       HW_VER_SPARROW_D0, /* REVISION_ID_SPARROW_D0 */
 };
 
 /* popular locations */
@@ -512,6 +523,7 @@ struct wil_sta_info {
        unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)];
        struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM];
        struct wil_tid_crypto_rx group_crypto_rx;
+       u8 aid; /* 1-254; 0 if unknown/not reported */
 };
 
 enum {
@@ -583,7 +595,9 @@ struct wil6210_priv {
        DECLARE_BITMAP(status, wil_status_last);
        u8 fw_version[ETHTOOL_FWVERS_LEN];
        u32 hw_version;
+       u8 chip_revision;
        const char *hw_name;
+       const char *wil_fw_name;
        DECLARE_BITMAP(hw_capabilities, hw_capability_last);
        DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
        u8 n_mids; /* number of additional MIDs as reported by FW */
@@ -653,6 +667,7 @@ struct wil6210_priv {
        struct dentry *debug;
        struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
        u8 discovery_mode;
+       u8 abft_len;
 
        void *platform_handle;
        struct wil_platform_ops platform_ops;
@@ -816,8 +831,8 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
 int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
 int wmi_rxon(struct wil6210_priv *wil, bool on);
 int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
-int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
-                      bool full_disconnect);
+int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac,
+                      u16 reason, bool full_disconnect, bool del_sta);
 int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout);
 int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason);
 int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason);
@@ -827,6 +842,7 @@ int wmi_ps_dev_profile_cfg(struct wil6210_priv *wil,
                           enum wmi_ps_profile_type ps_profile);
 int wmi_set_mgmt_retry(struct wil6210_priv *wil, u8 retry_short);
 int wmi_get_mgmt_retry(struct wil6210_priv *wil, u8 *retry_short);
+int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid);
 int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
                         u8 dialog_token, __le16 ba_param_set,
                         __le16 ba_timeout, __le16 ba_seq_ctrl);
@@ -918,6 +934,7 @@ int wil_iftype_nl2wmi(enum nl80211_iftype type);
 int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd);
 int wil_request_firmware(struct wil6210_priv *wil, const char *name,
                         bool load);
+bool wil_fw_verify_file_exists(struct wil6210_priv *wil, const char *name);
 
 int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime);
 int wil_suspend(struct wil6210_priv *wil, bool is_runtime);
index d051eea47a54ec58af7a154f411d5c0c9e6d3288..e53cf0cf70315ed3b32b3851fc5724c985c00de2 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -62,13 +62,13 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
        u32 host_min, dump_size, offset, len;
 
        if (wil_fw_get_crash_dump_bounds(wil, &dump_size, &host_min)) {
-               wil_err(wil, "%s: fail to obtain crash dump size\n", __func__);
+               wil_err(wil, "fail to obtain crash dump size\n");
                return -EINVAL;
        }
 
        if (dump_size > size) {
-               wil_err(wil, "%s: not enough space for dump. Need %d have %d\n",
-                       __func__, dump_size, size);
+               wil_err(wil, "not enough space for dump. Need %d have %d\n",
+                       dump_size, size);
                return -EINVAL;
        }
 
@@ -83,8 +83,9 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
                len = map->to - map->from;
                offset = map->host - host_min;
 
-               wil_dbg_misc(wil, "%s() - dump %s, size %d, offset %d\n",
-                            __func__, fw_mapping[i].name, len, offset);
+               wil_dbg_misc(wil,
+                            "fw_copy_crash_dump: - dump %s, size %d, offset %d\n",
+                            fw_mapping[i].name, len, offset);
 
                wil_memcpy_fromio_32((void * __force)(dest + offset),
                                     (const void __iomem * __force)data, len);
@@ -99,7 +100,7 @@ void wil_fw_core_dump(struct wil6210_priv *wil)
        u32 fw_dump_size;
 
        if (wil_fw_get_crash_dump_bounds(wil, &fw_dump_size, NULL)) {
-               wil_err(wil, "%s: fail to get fw dump size\n", __func__);
+               wil_err(wil, "fail to get fw dump size\n");
                return;
        }
 
@@ -115,6 +116,5 @@ void wil_fw_core_dump(struct wil6210_priv *wil)
         * after 5 min
         */
        dev_coredumpv(wil_to_dev(wil), fw_dump_data, fw_dump_size, GFP_KERNEL);
-       wil_info(wil, "%s: fw core dumped, size %d bytes\n", __func__,
-                fw_dump_size);
+       wil_info(wil, "fw core dumped, size %d bytes\n", fw_dump_size);
 }
index 7585003bef67cb867263ab5460aaef90c6d01d47..1f22c19696b11914e7e79a470882f7de692753c0 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
 #include "trace.h"
 
 static uint max_assoc_sta = WIL6210_MAX_CID;
-module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR);
+module_param(max_assoc_sta, uint, 0644);
 MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP");
 
 int agg_wsize; /* = 0; */
-module_param(agg_wsize, int, S_IRUGO | S_IWUSR);
+module_param(agg_wsize, int, 0644);
 MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;"
                 " 0 - use default; < 0 - don't auto-establish");
 
 u8 led_id = WIL_LED_INVALID_ID;
-module_param(led_id, byte, S_IRUGO);
+module_param(led_id, byte, 0444);
 MODULE_PARM_DESC(led_id,
                 " 60G device led enablement. Set the led ID (0-2) to enable");
 
@@ -495,8 +495,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
        }
 
        ch = evt->channel + 1;
-       wil_info(wil, "Connect %pM channel [%d] cid %d\n",
-                evt->bssid, ch, evt->cid);
+       wil_info(wil, "Connect %pM channel [%d] cid %d aid %d\n",
+                evt->bssid, ch, evt->cid, evt->aid);
        wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
                         evt->assoc_info, len - sizeof(*evt), true);
 
@@ -539,8 +539,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
        } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
                   (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
                if (wil->sta[evt->cid].status != wil_sta_unused) {
-                       wil_err(wil, "%s: AP: Invalid status %d for CID %d\n",
-                               __func__, wil->sta[evt->cid].status, evt->cid);
+                       wil_err(wil, "AP: Invalid status %d for CID %d\n",
+                               wil->sta[evt->cid].status, evt->cid);
                        mutex_unlock(&wil->mutex);
                        return;
                }
@@ -553,22 +553,19 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
 
        rc = wil_tx_init(wil, evt->cid);
        if (rc) {
-               wil_err(wil, "%s: config tx vring failed for CID %d, rc (%d)\n",
-                       __func__, evt->cid, rc);
+               wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n",
+                       evt->cid, rc);
                wmi_disconnect_sta(wil, wil->sta[evt->cid].addr,
-                                  WLAN_REASON_UNSPECIFIED, false);
+                                  WLAN_REASON_UNSPECIFIED, false, false);
        } else {
-               wil_info(wil, "%s: successful connection to CID %d\n",
-                        __func__, evt->cid);
+               wil_info(wil, "successful connection to CID %d\n", evt->cid);
        }
 
        if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
            (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) {
                if (rc) {
                        netif_carrier_off(ndev);
-                       wil_err(wil,
-                               "%s: cfg80211_connect_result with failure\n",
-                               __func__);
+                       wil_err(wil, "cfg80211_connect_result with failure\n");
                        cfg80211_connect_result(ndev, evt->bssid, NULL, 0,
                                                NULL, 0,
                                                WLAN_STATUS_UNSPECIFIED_FAILURE,
@@ -583,8 +580,12 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
                }
        } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
                   (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
-               if (rc)
+               if (rc) {
+                       if (disable_ap_sme)
+                               /* notify new_sta has failed */
+                               cfg80211_del_sta(ndev, evt->bssid, GFP_KERNEL);
                        goto out;
+               }
 
                memset(&sinfo, 0, sizeof(sinfo));
 
@@ -597,12 +598,13 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
 
                cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
        } else {
-               wil_err(wil, "%s: unhandled iftype %d for CID %d\n",
-                       __func__, wdev->iftype, evt->cid);
+               wil_err(wil, "unhandled iftype %d for CID %d\n", wdev->iftype,
+                       evt->cid);
                goto out;
        }
 
        wil->sta[evt->cid].status = wil_sta_connected;
+       wil->sta[evt->cid].aid = evt->aid;
        set_bit(wil_status_fwconnected, wil->status);
        wil_update_net_queues_bh(wil, NULL, false);
 
@@ -687,6 +689,7 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len)
 {
        struct wmi_vring_en_event *evt = d;
        u8 vri = evt->vring_index;
+       struct wireless_dev *wdev = wil_to_wdev(wil);
 
        wil_dbg_wmi(wil, "Enable vring %d\n", vri);
 
@@ -694,7 +697,12 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len)
                wil_err(wil, "Enable for invalid vring %d\n", vri);
                return;
        }
-       wil->vring_tx_data[vri].dot1x_open = true;
+
+       if (wdev->iftype != NL80211_IFTYPE_AP || !disable_ap_sme)
+               /* in AP mode with disable_ap_sme, this is done by
+                * wil_cfg80211_change_station()
+                */
+               wil->vring_tx_data[vri].dot1x_open = true;
        if (vri == wil->bcast_vring) /* no BA for bcast */
                return;
        if (agg_wsize >= 0)
@@ -919,8 +927,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                      offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail);
 
                if (immed_reply) {
-                       wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n",
-                                   __func__, wil->reply_id);
+                       wil_dbg_wmi(wil, "recv_cmd: Complete WMI 0x%04x\n",
+                                   wil->reply_id);
                        kfree(evt);
                        num_immed_reply++;
                        complete(&wil->wmi_call);
@@ -934,7 +942,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
                }
        }
        /* normally, 1 event per IRQ should be processed */
-       wil_dbg_wmi(wil, "%s -> %d events queued, %d completed\n", __func__,
+       wil_dbg_wmi(wil, "recv_cmd: -> %d events queued, %d completed\n",
                    n - num_immed_reply, num_immed_reply);
 }
 
@@ -950,6 +958,7 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
        wil->reply_id = reply_id;
        wil->reply_buf = reply;
        wil->reply_size = reply_size;
+       reinit_completion(&wil->wmi_call);
        spin_unlock(&wil->wmi_ev_lock);
 
        rc = __wmi_send(wil, cmdid, buf, len);
@@ -1069,6 +1078,8 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
                .pcp_max_assoc_sta = max_assoc_sta,
                .hidden_ssid = hidden_ssid,
                .is_go = is_go,
+               .disable_ap_sme = disable_ap_sme,
+               .abft_len = wil->abft_len,
        };
        struct {
                struct wmi_cmd_hdr wmi;
@@ -1086,6 +1097,13 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
                cmd.pcp_max_assoc_sta = WIL6210_MAX_CID;
        }
 
+       if (disable_ap_sme &&
+           !test_bit(WMI_FW_CAPABILITY_DISABLE_AP_SME,
+                     wil->fw_capabilities)) {
+               wil_err(wil, "disable_ap_sme not supported by FW\n");
+               return -EOPNOTSUPP;
+       }
+
        /*
         * Processing time may be huge, in case of secure AP it takes about
         * 3500ms for FW to start AP
@@ -1352,7 +1370,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
                struct wmi_listen_started_event evt;
        } __packed reply;
 
-       wil_info(wil, "%s(%s)\n", __func__, on ? "on" : "off");
+       wil_info(wil, "(%s)\n", on ? "on" : "off");
 
        if (on) {
                rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0,
@@ -1456,12 +1474,15 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
        return 0;
 }
 
-int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
-                      bool full_disconnect)
+int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac,
+                      u16 reason, bool full_disconnect, bool del_sta)
 {
        int rc;
        u16 reason_code;
-       struct wmi_disconnect_sta_cmd cmd = {
+       struct wmi_disconnect_sta_cmd disc_sta_cmd = {
+               .disconnect_reason = cpu_to_le16(reason),
+       };
+       struct wmi_del_sta_cmd del_sta_cmd = {
                .disconnect_reason = cpu_to_le16(reason),
        };
        struct {
@@ -1469,12 +1490,19 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
                struct wmi_disconnect_event evt;
        } __packed reply;
 
-       ether_addr_copy(cmd.dst_mac, mac);
-
-       wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
+       wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason);
 
-       rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &cmd, sizeof(cmd),
-                     WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000);
+       if (del_sta) {
+               ether_addr_copy(del_sta_cmd.dst_mac, mac);
+               rc = wmi_call(wil, WMI_DEL_STA_CMDID, &del_sta_cmd,
+                             sizeof(del_sta_cmd), WMI_DISCONNECT_EVENTID,
+                             &reply, sizeof(reply), 1000);
+       } else {
+               ether_addr_copy(disc_sta_cmd.dst_mac, mac);
+               rc = wmi_call(wil, WMI_DISCONNECT_STA_CMDID, &disc_sta_cmd,
+                             sizeof(disc_sta_cmd), WMI_DISCONNECT_EVENTID,
+                             &reply, sizeof(reply), 1000);
+       }
        /* failure to disconnect in reasonable time treated as FW error */
        if (rc) {
                wil_fw_error_recovery(wil);
@@ -1507,8 +1535,8 @@ int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout)
                .amsdu = 0,
        };
 
-       wil_dbg_wmi(wil, "%s(ring %d size %d timeout %d)\n", __func__,
-                   ringid, size, timeout);
+       wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size,
+                   timeout);
 
        return wmi_send(wil, WMI_VRING_BA_EN_CMDID, &cmd, sizeof(cmd));
 }
@@ -1520,8 +1548,7 @@ int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason)
                .reason = cpu_to_le16(reason),
        };
 
-       wil_dbg_wmi(wil, "%s(ring %d reason %d)\n", __func__,
-                   ringid, reason);
+       wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason);
 
        return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, &cmd, sizeof(cmd));
 }
@@ -1533,8 +1560,8 @@ int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason)
                .reason = cpu_to_le16(reason),
        };
 
-       wil_dbg_wmi(wil, "%s(CID %d TID %d reason %d)\n", __func__,
-                   cidxtid & 0xf, (cidxtid >> 4) & 0xf, reason);
+       wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cidxtid & 0xf,
+                   (cidxtid >> 4) & 0xf, reason);
 
        return wmi_send(wil, WMI_RCP_DELBA_CMDID, &cmd, sizeof(cmd));
 }
@@ -1686,11 +1713,29 @@ int wmi_abort_scan(struct wil6210_priv *wil)
        return rc;
 }
 
+int wmi_new_sta(struct wil6210_priv *wil, const u8 *mac, u8 aid)
+{
+       int rc;
+       struct wmi_new_sta_cmd cmd = {
+               .aid = aid,
+       };
+
+       wil_dbg_wmi(wil, "new sta %pM, aid %d\n", mac, aid);
+
+       ether_addr_copy(cmd.dst_mac, mac);
+
+       rc = wmi_send(wil, WMI_NEW_STA_CMDID, &cmd, sizeof(cmd));
+       if (rc)
+               wil_err(wil, "Failed to send new sta (%d)\n", rc);
+
+       return rc;
+}
+
 void wmi_event_flush(struct wil6210_priv *wil)
 {
        struct pending_wmi_event *evt, *t;
 
-       wil_dbg_wmi(wil, "%s()\n", __func__);
+       wil_dbg_wmi(wil, "event_flush\n");
 
        list_for_each_entry_safe(evt, t, &wil->pending_wmi_ev, list) {
                list_del(&evt->list);
@@ -1731,8 +1776,8 @@ static void wmi_event_handle(struct wil6210_priv *wil,
                        WARN_ON(wil->reply_buf);
                        wmi_evt_call_handler(wil, id, evt_data,
                                             len - sizeof(*wmi));
-                       wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n",
-                                   __func__, id);
+                       wil_dbg_wmi(wil, "event_handle: Complete WMI 0x%04x\n",
+                                   id);
                        complete(&wil->wmi_call);
                        return;
                }
@@ -1779,11 +1824,11 @@ void wmi_event_worker(struct work_struct *work)
        struct pending_wmi_event *evt;
        struct list_head *lh;
 
-       wil_dbg_wmi(wil, "Start %s\n", __func__);
+       wil_dbg_wmi(wil, "event_worker: Start\n");
        while ((lh = next_wmi_ev(wil)) != NULL) {
                evt = list_entry(lh, struct pending_wmi_event, list);
                wmi_event_handle(wil, &evt->event.hdr);
                kfree(evt);
        }
-       wil_dbg_wmi(wil, "Finished %s\n", __func__);
+       wil_dbg_wmi(wil, "event_worker: Finished\n");
 }
index d93a4d490d24e67444e2d27f03ad74e4e77cd70f..7c9fee57aa9110695e10dc96535169a2f988c62a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
  * Copyright (c) 2006-2012 Wilocity
  *
  * Permission to use, copy, modify, and/or distribute this software for any
@@ -56,6 +56,8 @@ enum wmi_fw_capability {
        WMI_FW_CAPABILITY_PS_CONFIG             = 1,
        WMI_FW_CAPABILITY_RF_SECTORS            = 2,
        WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT      = 3,
+       WMI_FW_CAPABILITY_DISABLE_AP_SME        = 4,
+       WMI_FW_CAPABILITY_WMI_ONLY              = 5,
        WMI_FW_CAPABILITY_MAX,
 };
 
@@ -185,8 +187,11 @@ enum wmi_command_id {
        WMI_RS_CFG_CMDID                                = 0x921,
        WMI_GET_DETAILED_RS_RES_CMDID                   = 0x922,
        WMI_AOA_MEAS_CMDID                              = 0x923,
+       WMI_BRP_SET_ANT_LIMIT_CMDID                     = 0x924,
        WMI_SET_MGMT_RETRY_LIMIT_CMDID                  = 0x930,
        WMI_GET_MGMT_RETRY_LIMIT_CMDID                  = 0x931,
+       WMI_NEW_STA_CMDID                               = 0x935,
+       WMI_DEL_STA_CMDID                               = 0x936,
        WMI_TOF_SESSION_START_CMDID                     = 0x991,
        WMI_TOF_GET_CAPABILITIES_CMDID                  = 0x992,
        WMI_TOF_SET_LCR_CMDID                           = 0x993,
@@ -543,7 +548,10 @@ struct wmi_pcp_start_cmd {
        u8 pcp_max_assoc_sta;
        u8 hidden_ssid;
        u8 is_go;
-       u8 reserved0[7];
+       u8 reserved0[5];
+       /* abft_len override if non-0 */
+       u8 abft_len;
+       u8 disable_ap_sme;
        u8 network_type;
        u8 channel;
        u8 disable_sec_offload;
@@ -902,6 +910,18 @@ struct wmi_set_mgmt_retry_limit_cmd {
        u8 reserved[3];
 } __packed;
 
+/* WMI_NEW_STA_CMDID */
+struct wmi_new_sta_cmd {
+       u8 dst_mac[WMI_MAC_LEN];
+       u8 aid;
+} __packed;
+
+/* WMI_DEL_STA_CMDID */
+struct wmi_del_sta_cmd {
+       u8 dst_mac[WMI_MAC_LEN];
+       __le16 disconnect_reason;
+} __packed;
+
 enum wmi_tof_burst_duration {
        WMI_TOF_BURST_DURATION_250_USEC         = 2,
        WMI_TOF_BURST_DURATION_500_USEC         = 3,
@@ -1067,6 +1087,7 @@ enum wmi_event_id {
        WMI_RS_CFG_DONE_EVENTID                         = 0x1921,
        WMI_GET_DETAILED_RS_RES_EVENTID                 = 0x1922,
        WMI_AOA_MEAS_EVENTID                            = 0x1923,
+       WMI_BRP_SET_ANT_LIMIT_EVENTID                   = 0x1924,
        WMI_SET_MGMT_RETRY_LIMIT_EVENTID                = 0x1930,
        WMI_GET_MGMT_RETRY_LIMIT_EVENTID                = 0x1931,
        WMI_TOF_SESSION_END_EVENTID                     = 0x1991,
@@ -1287,12 +1308,13 @@ struct wmi_connect_event {
        u8 assoc_req_len;
        u8 assoc_resp_len;
        u8 cid;
-       u8 reserved2[3];
+       u8 aid;
+       u8 reserved2[2];
        /* not in use */
        u8 assoc_info[0];
 } __packed;
 
-/* WMI_DISCONNECT_EVENTID */
+/* disconnect_reason */
 enum wmi_disconnect_reason {
        WMI_DIS_REASON_NO_NETWORK_AVAIL         = 0x01,
        /* bmiss */
@@ -1310,6 +1332,7 @@ enum wmi_disconnect_reason {
        WMI_DIS_REASON_IBSS_MERGE               = 0x0E,
 };
 
+/* WMI_DISCONNECT_EVENTID */
 struct wmi_disconnect_event {
        /* reason code, see 802.11 spec. */
        __le16 protocol_reason_status;
@@ -1759,6 +1782,42 @@ struct wmi_get_detailed_rs_res_event {
        u8 reserved[3];
 } __packed;
 
+/* BRP antenna limit mode */
+enum wmi_brp_ant_limit_mode {
+       /* Disable BRP force antenna limit */
+       WMI_BRP_ANT_LIMIT_MODE_DISABLE          = 0x00,
+       /* Define maximal antennas limit. Only effective antennas will be
+        * actually used
+        */
+       WMI_BRP_ANT_LIMIT_MODE_EFFECTIVE        = 0x01,
+       /* Force a specific number of antennas */
+       WMI_BRP_ANT_LIMIT_MODE_FORCE            = 0x02,
+       /* number of BRP antenna limit modes */
+       WMI_BRP_ANT_LIMIT_MODES_NUM             = 0x03,
+};
+
+/* WMI_BRP_SET_ANT_LIMIT_CMDID */
+struct wmi_brp_set_ant_limit_cmd {
+       /* connection id */
+       u8 cid;
+       /* enum wmi_brp_ant_limit_mode */
+       u8 limit_mode;
+       /* antenna limit count, 1-27
+        * disable_mode - ignored
+        * effective_mode - upper limit to number of antennas to be used
+        * force_mode - exact number of antennas to be used
+        */
+       u8 ant_limit;
+       u8 reserved;
+} __packed;
+
+/* WMI_BRP_SET_ANT_LIMIT_EVENTID */
+struct wmi_brp_set_ant_limit_event {
+       /* wmi_fw_status */
+       u8 status;
+       u8 reserved[3];
+} __packed;
+
 /* broadcast connection ID */
 #define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST    (0xFFFFFFFF)
 
index 72139b579b1892d71a4ff4741ee1ad1bd66478be..5bc2ba214735af2a8f44394834e1c40c45820487 100644 (file)
@@ -1104,6 +1104,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339),
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
+       BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455),
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
        BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
        { /* end: all zeroes */ }
index e21f7600122b7cbd0c98980dab6ccac0e543c0d6..76693df347425951397a211c863f19046dbf07f5 100644 (file)
@@ -218,9 +218,6 @@ int brcmf_bus_get_memdump(struct brcmf_bus *bus, void *data, size_t len)
  * interface functions from common layer
  */
 
-bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
-                     int prec);
-
 /* Receive frame for delivery to OS.  Callee disposes of rxp. */
 void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event);
 /* Receive async event packet from firmware. Callee disposes of rxp. */
@@ -241,13 +238,12 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success);
 /* Configure the "global" bus state used by upper layers */
 void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state);
 
-int brcmf_bus_start(struct device *dev);
+int brcmf_bus_started(struct device *dev);
 s32 brcmf_iovar_data_set(struct device *dev, char *name, void *data, u32 len);
 void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
 
 #ifdef CONFIG_BRCMFMAC_SDIO
 void brcmf_sdio_exit(void);
-void brcmf_sdio_init(void);
 void brcmf_sdio_register(void);
 #endif
 #ifdef CONFIG_BRCMFMAC_USB
index 7ffc4aba5bab0683fe993d1d5eef9e8e3d8389d0..10098b7586f3c95de1d2a5a5abc4396f5b13e319 100644 (file)
@@ -138,7 +138,6 @@ static struct ieee80211_rate __wl_rates[] = {
        .band                   = NL80211_BAND_2GHZ,            \
        .center_freq            = (_freq),                      \
        .hw_value               = (_channel),                   \
-       .flags                  = IEEE80211_CHAN_DISABLED,      \
        .max_antenna_gain       = 0,                            \
        .max_power              = 30,                           \
 }
@@ -147,7 +146,6 @@ static struct ieee80211_rate __wl_rates[] = {
        .band                   = NL80211_BAND_5GHZ,            \
        .center_freq            = 5000 + (5 * (_channel)),      \
        .hw_value               = (_channel),                   \
-       .flags                  = IEEE80211_CHAN_DISABLED,      \
        .max_antenna_gain       = 0,                            \
        .max_power              = 30,                           \
 }
@@ -328,7 +326,7 @@ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
  * triples, returning a pointer to the substring whose first element
  * matches tag
  */
-const struct brcmf_tlv *
+static const struct brcmf_tlv *
 brcmf_parse_tlvs(const void *buf, int buflen, uint key)
 {
        const struct brcmf_tlv *elt = buf;
@@ -3332,7 +3330,6 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp,
                goto out_err;
        }
 
-       data += sizeof(struct brcmf_pno_scanresults_le);
        netinfo_start = brcmf_get_netinfo_array(pfn_result);
 
        for (i = 0; i < result_count; i++) {
@@ -3480,8 +3477,7 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e,
                return -EINVAL;
        }
 
-       data += sizeof(struct brcmf_pno_scanresults_le);
-       netinfo = (struct brcmf_pno_net_info_le *)data;
+       netinfo = brcmf_get_netinfo_array(pfn_result);
        memcpy(cfg->wowl.nd->ssid.ssid, netinfo->SSID, netinfo->SSID_len);
        cfg->wowl.nd->ssid.ssid_len = netinfo->SSID_len;
        cfg->wowl.nd->n_channels = 1;
@@ -3971,7 +3967,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
                        pval |= AES_ENABLED;
                        break;
                default:
-                       brcmf_err("Ivalid unicast security info\n");
+                       brcmf_err("Invalid unicast security info\n");
                }
                offset++;
        }
@@ -4015,7 +4011,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
                        wpa_auth |= WPA2_AUTH_1X_SHA256;
                        break;
                default:
-                       brcmf_err("Ivalid key mgmt info\n");
+                       brcmf_err("Invalid key mgmt info\n");
                }
                offset++;
        }
@@ -5071,6 +5067,29 @@ static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
        return ret;
 }
 
+static int
+brcmf_cfg80211_update_conn_params(struct wiphy *wiphy,
+                                 struct net_device *ndev,
+                                 struct cfg80211_connect_params *sme,
+                                 u32 changed)
+{
+       struct brcmf_if *ifp;
+       int err;
+
+       if (!(changed & UPDATE_ASSOC_IES))
+               return 0;
+
+       ifp = netdev_priv(ndev);
+       err = brcmf_vif_set_mgmt_ie(ifp->vif, BRCMF_VNDR_IE_ASSOCREQ_FLAG,
+                                   sme->ie, sme->ie_len);
+       if (err)
+               brcmf_err("Set Assoc REQ IE Failed\n");
+       else
+               brcmf_dbg(TRACE, "Applied Vndr IEs for Assoc request\n");
+
+       return err;
+}
+
 #ifdef CONFIG_PM
 static int
 brcmf_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *ndev,
@@ -5138,6 +5157,7 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
        .crit_proto_start = brcmf_cfg80211_crit_proto_start,
        .crit_proto_stop = brcmf_cfg80211_crit_proto_stop,
        .tdls_oper = brcmf_cfg80211_tdls_oper,
+       .update_connect_params = brcmf_cfg80211_update_conn_params,
 };
 
 struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
@@ -5825,7 +5845,6 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
        u32 i, j;
        u32 total;
        u32 chaninfo;
-       u32 index;
 
        pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
 
@@ -5873,33 +5892,39 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
                    ch.bw == BRCMU_CHAN_BW_80)
                        continue;
 
-               channel = band->channels;
-               index = band->n_channels;
+               channel = NULL;
                for (j = 0; j < band->n_channels; j++) {
-                       if (channel[j].hw_value == ch.control_ch_num) {
-                               index = j;
+                       if (band->channels[j].hw_value == ch.control_ch_num) {
+                               channel = &band->channels[j];
                                break;
                        }
                }
-               channel[index].center_freq =
-                       ieee80211_channel_to_frequency(ch.control_ch_num,
-                                                      band->band);
-               channel[index].hw_value = ch.control_ch_num;
+               if (!channel) {
+                       /* It seems firmware supports some channel we never
+                        * considered. Something new in IEEE standard?
+                        */
+                       brcmf_err("Ignoring unexpected firmware channel %d\n",
+                                 ch.control_ch_num);
+                       continue;
+               }
+
+               if (channel->orig_flags & IEEE80211_CHAN_DISABLED)
+                       continue;
 
                /* assuming the chanspecs order is HT20,
                 * HT40 upper, HT40 lower, and VHT80.
                 */
                if (ch.bw == BRCMU_CHAN_BW_80) {
-                       channel[index].flags &= ~IEEE80211_CHAN_NO_80MHZ;
+                       channel->flags &= ~IEEE80211_CHAN_NO_80MHZ;
                } else if (ch.bw == BRCMU_CHAN_BW_40) {
-                       brcmf_update_bw40_channel_flag(&channel[index], &ch);
+                       brcmf_update_bw40_channel_flag(channel, &ch);
                } else {
                        /* enable the channel and disable other bandwidths
                         * for now as mentioned order assure they are enabled
                         * for subsequent chanspecs.
                         */
-                       channel[index].flags = IEEE80211_CHAN_NO_HT40 |
-                                              IEEE80211_CHAN_NO_80MHZ;
+                       channel->flags = IEEE80211_CHAN_NO_HT40 |
+                                        IEEE80211_CHAN_NO_80MHZ;
                        ch.bw = BRCMU_CHAN_BW_20;
                        cfg->d11inf.encchspec(&ch);
                        chaninfo = ch.chspec;
@@ -5907,11 +5932,11 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
                                                       &chaninfo);
                        if (!err) {
                                if (chaninfo & WL_CHAN_RADAR)
-                                       channel[index].flags |=
+                                       channel->flags |=
                                                (IEEE80211_CHAN_RADAR |
                                                 IEEE80211_CHAN_NO_IR);
                                if (chaninfo & WL_CHAN_PASSIVE)
-                                       channel[index].flags |=
+                                       channel->flags |=
                                                IEEE80211_CHAN_NO_IR;
                        }
                }
@@ -6341,7 +6366,7 @@ static void brcmf_wiphy_pno_params(struct wiphy *wiphy)
 }
 
 #ifdef CONFIG_PM
-static struct wiphy_wowlan_support brcmf_wowlan_support = {
+static const struct wiphy_wowlan_support brcmf_wowlan_support = {
        .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
        .n_patterns = BRCMF_WOWL_MAXPATTERNS,
        .pattern_max_len = BRCMF_WOWL_MAXPATTERNSIZE,
@@ -6354,19 +6379,29 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy, struct brcmf_if *ifp)
 {
 #ifdef CONFIG_PM
        struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
+       struct wiphy_wowlan_support *wowl;
+
+       wowl = kmemdup(&brcmf_wowlan_support, sizeof(brcmf_wowlan_support),
+                      GFP_KERNEL);
+       if (!wowl) {
+               brcmf_err("only support basic wowlan features\n");
+               wiphy->wowlan = &brcmf_wowlan_support;
+               return;
+       }
 
        if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) {
                if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_ND)) {
-                       brcmf_wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
+                       wowl->flags |= WIPHY_WOWLAN_NET_DETECT;
+                       wowl->max_nd_match_sets = BRCMF_PNO_MAX_PFN_COUNT;
                        init_waitqueue_head(&cfg->wowl.nd_data_wait);
                }
        }
        if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK)) {
-               brcmf_wowlan_support.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY;
-               brcmf_wowlan_support.flags |= WIPHY_WOWLAN_GTK_REKEY_FAILURE;
+               wowl->flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY;
+               wowl->flags |= WIPHY_WOWLAN_GTK_REKEY_FAILURE;
        }
 
-       wiphy->wowlan = &brcmf_wowlan_support;
+       wiphy->wowlan = wowl;
 #endif
 }
 
@@ -6477,8 +6512,10 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
                        wiphy->bands[NL80211_BAND_5GHZ] = band;
                }
        }
-       err = brcmf_setup_wiphybands(wiphy);
-       return err;
+
+       wiphy_read_of_freq_limits(wiphy);
+
+       return 0;
 }
 
 static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg)
@@ -6748,6 +6785,10 @@ static void brcmf_free_wiphy(struct wiphy *wiphy)
                kfree(wiphy->bands[NL80211_BAND_5GHZ]->channels);
                kfree(wiphy->bands[NL80211_BAND_5GHZ]);
        }
+#if IS_ENABLED(CONFIG_PM)
+       if (wiphy->wowlan != &brcmf_wowlan_support)
+               kfree(wiphy->wowlan);
+#endif
        wiphy_free(wiphy);
 }
 
@@ -6843,6 +6884,12 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
                goto priv_out;
        }
 
+       err = brcmf_setup_wiphybands(wiphy);
+       if (err) {
+               brcmf_err("Setting wiphy bands failed (%d)\n", err);
+               goto wiphy_unreg_out;
+       }
+
        /* If cfg80211 didn't disable 40MHz HT CAP in wiphy_register(),
         * setup 40MHz in 2GHz band and enable OBSS scanning.
         */
index 0c9a7081fca912005474be5f5015d3fb2d667067..8f19d95d4175da0742fbecb17c3fd6fc19453dff 100644 (file)
@@ -396,8 +396,6 @@ void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
 s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
                          const u8 *vndr_ie_buf, u32 vndr_ie_len);
 s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
-const struct brcmf_tlv *
-brcmf_parse_tlvs(const void *buf, int buflen, uint key);
 u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
                        struct ieee80211_channel *ch);
 bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg,
index 3e15d64c64813513bc22202dd9e468588699abb0..33b133f7e63aad3b5a6bb14018fd461ec4fb90c6 100644 (file)
@@ -74,7 +74,7 @@ module_param_named(roamoff, brcmf_roamoff, int, S_IRUSR);
 MODULE_PARM_DESC(roamoff, "Do not use internal roaming engine");
 
 #ifdef DEBUG
-/* always succeed brcmf_bus_start() */
+/* always succeed brcmf_bus_started() */
 static int brcmf_ignore_probe_fail;
 module_param_named(ignore_probe_fail, brcmf_ignore_probe_fail, int, 0);
 MODULE_PARM_DESC(ignore_probe_fail, "always succeed probe for debugging");
@@ -218,6 +218,22 @@ done:
        return err;
 }
 
+#ifndef CONFIG_BRCM_TRACING
+void __brcmf_err(const char *func, const char *fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+
+       va_start(args, fmt);
+
+       vaf.fmt = fmt;
+       vaf.va = &args;
+       pr_err("%s: %pV", func, &vaf);
+
+       va_end(args);
+}
+#endif
+
 #if defined(CONFIG_BRCM_TRACING) || defined(CONFIG_BRCMDBG)
 void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...)
 {
@@ -299,11 +315,9 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev,
                        }
                }
        }
-       if ((bus_type == BRCMF_BUSTYPE_SDIO) && (!found)) {
-               /* No platform data for this device. In case of SDIO try OF
-                * (Open Firwmare) Device Tree.
-                */
-               brcmf_of_probe(dev, &settings->bus.sdio);
+       if (!found) {
+               /* No platform data for this device, try OF (Open Firwmare) */
+               brcmf_of_probe(dev, bus_type, settings);
        }
        return settings;
 }
index bd095abca39340cc75490673755a07593dc66d93..a62f8e70b32078ef3d83e38c3eca4b3362c44aba 100644 (file)
@@ -65,6 +65,8 @@ struct brcmf_mp_device {
        } bus;
 };
 
+void brcmf_c_set_joinpref_default(struct brcmf_if *ifp);
+
 struct brcmf_mp_device *brcmf_get_module_param(struct device *dev,
                                               enum brcmf_bus_type bus_type,
                                               u32 chip, u32 chiprev);
index 9e6f60a0ec3eac7adac493b9fb337ffae62b6b9f..60da86a8d95b0190b22c76e99cfc88543939e1a1 100644 (file)
@@ -249,10 +249,10 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
 
 done:
        if (ret) {
-               ifp->stats.tx_dropped++;
+               ndev->stats.tx_dropped++;
        } else {
-               ifp->stats.tx_packets++;
-               ifp->stats.tx_bytes += skb->len;
+               ndev->stats.tx_packets++;
+               ndev->stats.tx_bytes += skb->len;
        }
 
        /* Return ok: we always eat the packet */
@@ -296,15 +296,15 @@ void brcmf_txflowblock(struct device *dev, bool state)
 void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
 {
        if (skb->pkt_type == PACKET_MULTICAST)
-               ifp->stats.multicast++;
+               ifp->ndev->stats.multicast++;
 
        if (!(ifp->ndev->flags & IFF_UP)) {
                brcmu_pkt_buf_free_skb(skb);
                return;
        }
 
-       ifp->stats.rx_bytes += skb->len;
-       ifp->stats.rx_packets++;
+       ifp->ndev->stats.rx_bytes += skb->len;
+       ifp->ndev->stats.rx_packets++;
 
        brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
        if (in_interrupt())
@@ -327,7 +327,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
 
        if (ret || !(*ifp) || !(*ifp)->ndev) {
                if (ret != -ENODATA && *ifp)
-                       (*ifp)->stats.rx_errors++;
+                       (*ifp)->ndev->stats.rx_errors++;
                brcmu_pkt_buf_free_skb(skb);
                return -ENODATA;
        }
@@ -388,7 +388,7 @@ void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
        }
 
        if (!success)
-               ifp->stats.tx_errors++;
+               ifp->ndev->stats.tx_errors++;
 
        brcmu_pkt_buf_free_skb(txp);
 }
@@ -411,15 +411,6 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
        }
 }
 
-static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
-{
-       struct brcmf_if *ifp = netdev_priv(ndev);
-
-       brcmf_dbg(TRACE, "Enter, bsscfgidx=%d\n", ifp->bsscfgidx);
-
-       return &ifp->stats;
-}
-
 static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
                                    struct ethtool_drvinfo *info)
 {
@@ -492,7 +483,6 @@ static int brcmf_netdev_open(struct net_device *ndev)
 static const struct net_device_ops brcmf_netdev_ops_pri = {
        .ndo_open = brcmf_netdev_open,
        .ndo_stop = brcmf_netdev_stop,
-       .ndo_get_stats = brcmf_netdev_get_stats,
        .ndo_start_xmit = brcmf_netdev_start_xmit,
        .ndo_set_mac_address = brcmf_netdev_set_mac_address,
        .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
@@ -966,7 +956,7 @@ static int brcmf_revinfo_read(struct seq_file *s, void *data)
        return 0;
 }
 
-int brcmf_bus_start(struct device *dev)
+int brcmf_bus_started(struct device *dev)
 {
        int ret = -1;
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
@@ -1075,16 +1065,6 @@ void brcmf_bus_add_txhdrlen(struct device *dev, uint len)
        }
 }
 
-static void brcmf_bus_detach(struct brcmf_pub *drvr)
-{
-       brcmf_dbg(TRACE, "Enter\n");
-
-       if (drvr) {
-               /* Stop the bus module */
-               brcmf_bus_stop(drvr->bus_if);
-       }
-}
-
 void brcmf_dev_reset(struct device *dev)
 {
        struct brcmf_bus *bus_if = dev_get_drvdata(dev);
@@ -1131,7 +1111,7 @@ void brcmf_detach(struct device *dev)
 
        brcmf_fws_deinit(drvr);
 
-       brcmf_bus_detach(drvr);
+       brcmf_bus_stop(drvr->bus_if);
 
        brcmf_proto_detach(drvr);
 
index c94dcab260d0009bc960e1eef119f66c2187e718..6aecd8dfd824d9656890b96359734aa80a03ca35 100644 (file)
@@ -171,7 +171,6 @@ enum brcmf_netif_stop_reason {
  * @drvr: points to device related information.
  * @vif: points to cfg80211 specific interface information.
  * @ndev: associated network device.
- * @stats: interface specific network statistics.
  * @multicast_work: worker object for multicast provisioning.
  * @ndoffload_work: worker object for neighbor discovery offload configuration.
  * @fws_desc: interface specific firmware-signalling descriptor.
@@ -187,7 +186,6 @@ struct brcmf_if {
        struct brcmf_pub *drvr;
        struct brcmf_cfg80211_vif *vif;
        struct net_device *ndev;
-       struct net_device_stats stats;
        struct work_struct multicast_work;
        struct work_struct ndoffload_work;
        struct brcmf_fws_mac_descriptor *fws_desc;
@@ -216,7 +214,6 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
 void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
 void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
 void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
-void brcmf_c_set_joinpref_default(struct brcmf_if *ifp);
 int __init brcmf_core_init(void);
 void __exit brcmf_core_exit(void);
 
index e64557c35553fe4a53a933196d923902cb989270..f4644cf371c7e058401fcba2797fb9bd11f63993 100644 (file)
@@ -32,16 +32,25 @@ static int brcmf_debug_create_memdump(struct brcmf_bus *bus, const void *data,
 {
        void *dump;
        size_t ramsize;
+       int err;
 
        ramsize = brcmf_bus_get_ramsize(bus);
-       if (ramsize) {
-               dump = vzalloc(len + ramsize);
-               if (!dump)
-                       return -ENOMEM;
-               memcpy(dump, data, len);
-               brcmf_bus_get_memdump(bus, dump + len, ramsize);
-               dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
+       if (!ramsize)
+               return -ENOTSUPP;
+
+       dump = vzalloc(len + ramsize);
+       if (!dump)
+               return -ENOMEM;
+
+       memcpy(dump, data, len);
+       err = brcmf_bus_get_memdump(bus, dump + len, ramsize);
+       if (err) {
+               vfree(dump);
+               return err;
        }
+
+       dev_coredumpv(bus->dev, dump, len + ramsize, GFP_KERNEL);
+
        return 0;
 }
 
@@ -49,10 +58,18 @@ static int brcmf_debug_psm_watchdog_notify(struct brcmf_if *ifp,
                                           const struct brcmf_event_msg *evtmsg,
                                           void *data)
 {
+       int err;
+
        brcmf_dbg(TRACE, "enter: bsscfgidx=%d\n", ifp->bsscfgidx);
 
-       return brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
-                                         evtmsg->datalen);
+       brcmf_err("PSM's watchdog has fired!\n");
+
+       err = brcmf_debug_create_memdump(ifp->drvr->bus_if, data,
+                                        evtmsg->datalen);
+       if (err)
+               brcmf_err("Failed to get memory dump, %d\n", err);
+
+       return err;
 }
 
 void brcmf_debugfs_init(void)
index 6687812770cc18a14b29e4266fcbc74ca097523c..066126123e9663e89277a28d0dd2fa48ad9f0a72 100644 (file)
 #undef pr_fmt
 #define pr_fmt(fmt)            KBUILD_MODNAME ": " fmt
 
-/* Macro for error messages. net_ratelimit() is used when driver
- * debugging is not selected. When debugging the driver error
- * messages are as important as other tracing or even more so.
+__printf(2, 3)
+void __brcmf_err(const char *func, const char *fmt, ...);
+/* Macro for error messages. When debugging / tracing the driver all error
+ * messages are important to us.
  */
-#ifndef CONFIG_BRCM_TRACING
-#ifdef CONFIG_BRCMDBG
-#define brcmf_err(fmt, ...)    pr_err("%s: " fmt, __func__, ##__VA_ARGS__)
-#else
 #define brcmf_err(fmt, ...)                                            \
        do {                                                            \
-               if (net_ratelimit())                                    \
-                       pr_err("%s: " fmt, __func__, ##__VA_ARGS__);    \
+               if (IS_ENABLED(CONFIG_BRCMDBG) ||                       \
+                   IS_ENABLED(CONFIG_BRCM_TRACING) ||                  \
+                   net_ratelimit())                                    \
+                       __brcmf_err(__func__, fmt, ##__VA_ARGS__);      \
        } while (0)
-#endif
-#else
-__printf(2, 3)
-void __brcmf_err(const char *func, const char *fmt, ...);
-#define brcmf_err(fmt, ...) \
-       __brcmf_err(__func__, fmt, ##__VA_ARGS__)
-#endif
 
 #if defined(DEBUG) || defined(CONFIG_BRCM_TRACING)
 __printf(3, 4)
index 425c41dc0a59bd5167eb429f37395273d06d2403..aee6e5937c41cd3afc763893b3a147e7097430e1 100644 (file)
 #include "common.h"
 #include "of.h"
 
-void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio)
+void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
+                   struct brcmf_mp_device *settings)
 {
+       struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
        struct device_node *np = dev->of_node;
        int irq;
        u32 irqf;
        u32 val;
 
-       if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
+       if (!np || bus_type != BRCMF_BUSTYPE_SDIO ||
+           !of_device_is_compatible(np, "brcm,bcm4329-fmac"))
                return;
 
        if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0)
index a9d94c15d0f5ef422fdbad2656db1fd3064d0e6c..95b7032d54b199f55650d5d9a4c8d23c7f0c83d6 100644 (file)
  * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 #ifdef CONFIG_OF
-void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio);
+void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
+                   struct brcmf_mp_device *settings);
 #else
-static void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio)
+static void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
+                          struct brcmf_mp_device *settings)
 {
 }
 #endif /* CONFIG_OF */
index 048027f2085bbd293abe878ba2b2821ba6452804..6fae4cf3f6ab2876eb6ae2c3b5b07d02bcb0d3ca 100644 (file)
@@ -601,7 +601,6 @@ static void brcmf_pcie_attach(struct brcmf_pciedev_info *devinfo)
 {
        u32 config;
 
-       brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
        /* BAR1 window may not be sized properly */
        brcmf_pcie_select_core(devinfo, BCMA_CORE_PCIE2);
        brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_CONFIGADDR, 0x4e0);
@@ -1572,7 +1571,7 @@ static int brcmf_pcie_attach_bus(struct brcmf_pciedev_info *devinfo)
        if (ret) {
                brcmf_err("brcmf_attach failed\n");
        } else {
-               ret = brcmf_bus_start(&devinfo->pdev->dev);
+               ret = brcmf_bus_started(&devinfo->pdev->dev);
                if (ret)
                        brcmf_err("dongle is not responding\n");
        }
index dfb0658713d9b31714566b5003564a9a53eb5e97..c5744b45ec8fbc6bb67539647d6db4a8afa4a465 100644 (file)
@@ -1661,7 +1661,7 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
                                           pfirst->len, pfirst->next,
                                           pfirst->prev);
                        skb_unlink(pfirst, &bus->glom);
-                       if (brcmf_sdio_fromevntchan(pfirst->data))
+                       if (brcmf_sdio_fromevntchan(&dptr[SDPCM_HWHDR_LEN]))
                                brcmf_rx_event(bus->sdiodev->dev, pfirst);
                        else
                                brcmf_rx_frame(bus->sdiodev->dev, pfirst,
@@ -4065,7 +4065,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev,
 
        sdio_release_host(sdiodev->func[1]);
 
-       err = brcmf_bus_start(dev);
+       err = brcmf_bus_started(dev);
        if (err != 0) {
                brcmf_err("dongle is not responding\n");
                goto fail;
index 2f978a39b58a49485209f1da9f4f1a5c22647307..d93ebbdc773757adda218b16b816c30202792973 100644 (file)
@@ -1148,7 +1148,7 @@ static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo)
        if (ret)
                goto fail;
 
-       ret = brcmf_bus_start(devinfo->dev);
+       ret = brcmf_bus_started(devinfo->dev);
        if (ret)
                goto fail;
 
index 466912eb2d874a667fff2394022a748f943d3712..e8e65115feba4ca892623975a6b55e901df545aa 100644 (file)
@@ -3469,7 +3469,7 @@ static struct attribute_group il3945_attribute_group = {
        .attrs = il3945_sysfs_entries,
 };
 
-static struct ieee80211_ops il3945_mac_ops __read_mostly = {
+static struct ieee80211_ops il3945_mac_ops __ro_after_init = {
        .tx = il3945_mac_tx,
        .start = il3945_mac_start,
        .stop = il3945_mac_stop,
@@ -3627,15 +3627,6 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        il->cmd_queue = IL39_CMD_QUEUE_NUM;
 
-       /*
-        * Disabling hardware scan means that mac80211 will perform scans
-        * "the hard way", rather than using device's scan.
-        */
-       if (il3945_mod_params.disable_hw_scan) {
-               D_INFO("Disabling hw_scan\n");
-               il3945_mac_ops.hw_scan = NULL;
-       }
-
        D_INFO("*** LOAD DRIVER ***\n");
        il->cfg = cfg;
        il->ops = &il3945_ops;
@@ -3913,6 +3904,15 @@ il3945_init(void)
        pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
        pr_info(DRV_COPYRIGHT "\n");
 
+       /*
+        * Disabling hardware scan means that mac80211 will perform scans
+        * "the hard way", rather than using device's scan.
+        */
+       if (il3945_mod_params.disable_hw_scan) {
+               pr_info("hw_scan is disabled\n");
+               il3945_mac_ops.hw_scan = NULL;
+       }
+
        ret = il3945_rate_control_register();
        if (ret) {
                pr_err("Unable to register rate control algorithm: %d\n", ret);
index b64db47b31bbfecf15631dc632c880285b0271ec..c5f2ddf9b0fe5fafb8633ce7a7905bd5f8fb61d8 100644 (file)
@@ -90,13 +90,16 @@ config IWLWIFI_BCAST_FILTERING
 
 config IWLWIFI_PCIE_RTPM
        bool "Enable runtime power management mode for PCIe devices"
-       depends on IWLMVM && PM
+       depends on IWLMVM && PM && EXPERT
        default false
        help
          Say Y here to enable runtime power management for PCIe
          devices.  If enabled, the device will go into low power mode
          when idle for a short period of time, allowing for improved
-         power saving during runtime.
+         power saving during runtime. Note that this feature requires
+         a tight integration with the platform. It is not recommended
+         to enable this feature without proper validation with the
+         specific target platform.
 
         If unsure, say N.
 
index affe760c8c224a3a0b009fcf54a4ef0139d229e4..376c79337a0e04f86796494565749b7b333ab1ff 100644 (file)
@@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
 {
        struct iwl_priv *priv = file->private_data;
        bool restart_fw = iwlwifi_mod_params.restart_fw;
-       int ret;
+       int __maybe_unused ret;
 
        iwlwifi_mod_params.restart_fw = true;
 
index 8c0719468d00514572a0cc6e76c4f084f850a335..2a04d0cd71aefc5b09022daee9e6470e924075a7 100644 (file)
@@ -163,7 +163,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
                                       REGULATORY_DISABLE_BEACON_HINTS;
 
 #ifdef CONFIG_PM_SLEEP
-       if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+       if (priv->fw->img[IWL_UCODE_WOWLAN].num_sec &&
            priv->trans->ops->d3_suspend &&
            priv->trans->ops->d3_resume &&
            device_can_wakeup(priv->trans->dev)) {
index b95c2d76db33c52cf6c5c9e1957ead160917f772..ff44ebc5829d664d10f52c1125f0205a958364e1 100644 (file)
@@ -364,7 +364,7 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
 /*
        get the traffic load value for tid
 */
-static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
+static void rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
 {
        u32 curr_time = jiffies_to_msecs(jiffies);
        u32 time_diff;
@@ -372,14 +372,14 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
        struct iwl_traffic_load *tl = NULL;
 
        if (tid >= IWL_MAX_TID_COUNT)
-               return 0;
+               return;
 
        tl = &(lq_data->load[tid]);
 
        curr_time -= curr_time % TID_ROUND_VALUE;
 
        if (!(tl->queue_count))
-               return 0;
+               return;
 
        time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
        index = time_diff / TID_QUEUE_CELL_SPACING;
@@ -388,8 +388,6 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
        /* TID_MAX_TIME_DIFF */
        if (index >= TID_QUEUE_MAX_SIZE)
                rs_tl_rm_old_stats(tl, curr_time);
-
-       return tl->total;
 }
 
 static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
@@ -397,7 +395,6 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
                                      struct ieee80211_sta *sta)
 {
        int ret = -EAGAIN;
-       u32 load;
 
        /*
         * Don't create TX aggregation sessions when in high
@@ -410,7 +407,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
                return ret;
        }
 
-       load = rs_tl_get_load(lq_data, tid);
+       rs_tl_get_load(lq_data, tid);
 
        IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
                        sta->addr, tid);
@@ -743,7 +740,10 @@ static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
 
                /* Find the previous rate that is in the rate mask */
                i = index - 1;
-               for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+               if (i >= 0)
+                       mask = BIT(i);
+
+               for (; i >= 0; i--, mask >>= 1) {
                        if (rate_mask & mask) {
                                low = i;
                                break;
index c7509c51e9d94cfa18c856b468ca1a809a48c299..d6013bfe991cde5f28b694ff72a0bb46812b0081 100644 (file)
@@ -407,7 +407,7 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
        lockdep_assert_held(&priv->mutex);
 
        /* No init ucode required? Curious, but maybe ok */
-       if (!priv->fw->img[IWL_UCODE_INIT].sec[0].len)
+       if (!priv->fw->img[IWL_UCODE_INIT].num_sec)
                return 0;
 
        iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
index 0b9f6a7bc83439eeb8665c2176a0efb2655d79d1..39335b7b0c165c69a4d1aa004069c70184ed9675 100644 (file)
@@ -371,4 +371,4 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
 MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
 MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
index d4b73dedf89b1a9bfdbe02cdd418311a5a062939..a72e58623d3ab013137f7fdf4a469de3d2d28a40 100644 (file)
@@ -73,8 +73,8 @@
 /* Highest firmware API version supported */
 #define IWL7260_UCODE_API_MAX  17
 #define IWL7265_UCODE_API_MAX  17
-#define IWL7265D_UCODE_API_MAX 26
-#define IWL3168_UCODE_API_MAX  26
+#define IWL7265D_UCODE_API_MAX 28
+#define IWL3168_UCODE_API_MAX  28
 
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN  17
index 8d3e53fac1dabc01ed875b6f8c2863bb908f770c..b7953bf55f6fea72b45e7a44685ec421cca2cc94 100644 (file)
@@ -70,8 +70,8 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX  26
-#define IWL8265_UCODE_API_MAX  26
+#define IWL8000_UCODE_API_MAX  28
+#define IWL8265_UCODE_API_MAX  28
 
 /* Lowest firmware API version supported */
 #define IWL8000_UCODE_API_MIN  17
index ff850410d89719f77c67c8132f6f2b3e554af3a1..a5f0c0bf85ec8dd7f271a8954cd6d0823acbc9b1 100644 (file)
@@ -55,7 +55,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL9000_UCODE_API_MAX  26
+#define IWL9000_UCODE_API_MAX  28
 
 /* Lowest firmware API version supported */
 #define IWL9000_UCODE_API_MIN  17
index ea16185258788a2235d1c7055d8bdae107b0caa1..15dd7f6137c8fff1bae35a9b8eb165ab8ce7d3b6 100644 (file)
@@ -55,7 +55,7 @@
 #include "iwl-agn-hw.h"
 
 /* Highest firmware API version supported */
-#define IWL_A000_UCODE_API_MAX 26
+#define IWL_A000_UCODE_API_MAX 28
 
 /* Lowest firmware API version supported */
 #define IWL_A000_UCODE_API_MIN 24
 #define IWL_A000_SMEM_OFFSET           0x400000
 #define IWL_A000_SMEM_LEN              0x68000
 
-#define IWL_A000_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
-#define IWL_A000_MODULE_FIRMWARE(api) \
-       IWL_A000_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
+#define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
+
+#define IWL_A000_HR_MODULE_FIRMWARE(api) \
+       IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL_A000_JF_MODULE_FIRMWARE(api) \
+       IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode"
 
 #define NVM_HW_SECTION_NUM_FAMILY_A000         10
 
@@ -116,11 +120,22 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
        .mq_rx_supported = true,                                        \
        .vht_mu_mimo_supported = true,                                  \
        .mac_addr_from_csr = true,                                      \
-       .use_tfh = true
+       .use_tfh = true,                                                \
+       .rf_id = true
+
+const struct iwl_cfg iwla000_2ac_cfg_hr = {
+               .name = "Intel(R) Dual Band Wireless AC a000",
+               .fw_name_pre = IWL_A000_HR_FW_PRE,
+               IWL_DEVICE_A000,
+               .ht_params = &iwl_a000_ht_params,
+               .nvm_ver = IWL_A000_NVM_VERSION,
+               .nvm_calib_ver = IWL_A000_TX_POWER_VERSION,
+               .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
 
-const struct iwl_cfg iwla000_2ac_cfg = {
+const struct iwl_cfg iwla000_2ac_cfg_jf = {
                .name = "Intel(R) Dual Band Wireless AC a000",
-               .fw_name_pre = IWL_A000_FW_PRE,
+               .fw_name_pre = IWL_A000_JF_FW_PRE,
                IWL_DEVICE_A000,
                .ht_params = &iwl_a000_ht_params,
                .nvm_ver = IWL_A000_NVM_VERSION,
@@ -128,4 +143,5 @@ const struct iwl_cfg iwla000_2ac_cfg = {
                .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
 };
 
-MODULE_FIRMWARE(IWL_A000_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_A000_HR_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_A000_JF_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX));
index 2660cc4b9f8ca848a5bc865525dc0afbf7ce1683..94f8a51b633eb25e7c5731944c5e5f00b5673743 100644 (file)
@@ -455,7 +455,8 @@ extern const struct iwl_cfg iwl9260_2ac_cfg;
 extern const struct iwl_cfg iwl9270_2ac_cfg;
 extern const struct iwl_cfg iwl9460_2ac_cfg;
 extern const struct iwl_cfg iwl9560_2ac_cfg;
-extern const struct iwl_cfg iwla000_2ac_cfg;
+extern const struct iwl_cfg iwla000_2ac_cfg_hr;
+extern const struct iwl_cfg iwla000_2ac_cfg_jf;
 #endif /* CONFIG_IWLMVM */
 
 #endif /* __IWL_CONFIG_H__ */
index d73e9d436027dec465d1146269b9fd9df3257359..4ee3b621ec27ab58c899e91c6552f15fc0d5a57a 100644 (file)
@@ -349,6 +349,7 @@ enum {
 /* RF_ID value */
 #define CSR_HW_RF_ID_TYPE_JF           (0x00105000)
 #define CSR_HW_RF_ID_TYPE_LC           (0x00101000)
+#define CSR_HW_RF_ID_TYPE_HR           (0x00109000)
 
 /* EEPROM REG */
 #define CSR_EEPROM_REG_READ_VALID_MSK  (0x00000001)
index 45b2f679e4d8ec624be0090081aad2cbab2bec5a..0e0293d42b5d3464586e12cae7a7741d54d16ed0 100644 (file)
@@ -102,7 +102,6 @@ static struct dentry *iwl_dbgfs_root;
  * @op_mode: the running op_mode
  * @trans: transport layer
  * @dev: for debug prints only
- * @cfg: configuration struct
  * @fw_index: firmware revision to try loading
  * @firmware_name: composite filename of ucode file to load
  * @request_firmware_complete: the firmware has been obtained from user space
@@ -114,7 +113,6 @@ struct iwl_drv {
        struct iwl_op_mode *op_mode;
        struct iwl_trans *trans;
        struct device *dev;
-       const struct iwl_cfg *cfg;
 
        int fw_index;                   /* firmware we're trying to load */
        char firmware_name[64];         /* name of firmware file to load */
@@ -166,8 +164,9 @@ static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
 static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img)
 {
        int i;
-       for (i = 0; i < IWL_UCODE_SECTION_MAX; i++)
+       for (i = 0; i < img->num_sec; i++)
                iwl_free_fw_desc(drv, &img->sec[i]);
+       kfree(img->sec);
 }
 
 static void iwl_dealloc_ucode(struct iwl_drv *drv)
@@ -179,8 +178,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
                kfree(drv->fw.dbg_conf_tlv[i]);
        for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
                kfree(drv->fw.dbg_trigger_tlv[i]);
-       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++)
-               kfree(drv->fw.dbg_mem_tlv[i]);
+       kfree(drv->fw.dbg_mem_tlv);
 
        for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
                iwl_free_fw_img(drv, drv->fw.img + i);
@@ -213,18 +211,18 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw,
 
 static int iwl_request_firmware(struct iwl_drv *drv, bool first)
 {
-       const char *name_pre = drv->cfg->fw_name_pre;
+       const char *name_pre = drv->trans->cfg->fw_name_pre;
        char tag[8];
 
        if (first) {
-               drv->fw_index = drv->cfg->ucode_api_max;
+               drv->fw_index = drv->trans->cfg->ucode_api_max;
                sprintf(tag, "%d", drv->fw_index);
        } else {
                drv->fw_index--;
                sprintf(tag, "%d", drv->fw_index);
        }
 
-       if (drv->fw_index < drv->cfg->ucode_api_min) {
+       if (drv->fw_index < drv->trans->cfg->ucode_api_min) {
                IWL_ERR(drv, "no suitable firmware found!\n");
                return -ENOENT;
        }
@@ -241,7 +239,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
 }
 
 struct fw_img_parsing {
-       struct fw_sec sec[IWL_UCODE_SECTION_MAX];
+       struct fw_sec *sec;
        int sec_counter;
 };
 
@@ -276,7 +274,8 @@ struct iwl_firmware_pieces {
        size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
        struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
        size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
-       struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
+       struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
+       size_t n_dbg_mem_tlv;
 };
 
 /*
@@ -290,11 +289,33 @@ static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces,
        return &pieces->img[type].sec[sec];
 }
 
+static void alloc_sec_data(struct iwl_firmware_pieces *pieces,
+                          enum iwl_ucode_type type,
+                          int sec)
+{
+       struct fw_img_parsing *img = &pieces->img[type];
+       struct fw_sec *sec_memory;
+       int size = sec + 1;
+       size_t alloc_size = sizeof(*img->sec) * size;
+
+       if (img->sec && img->sec_counter >= size)
+               return;
+
+       sec_memory = krealloc(img->sec, alloc_size, GFP_KERNEL);
+       if (!sec_memory)
+               return;
+
+       img->sec = sec_memory;
+       img->sec_counter = size;
+}
+
 static void set_sec_data(struct iwl_firmware_pieces *pieces,
                         enum iwl_ucode_type type,
                         int sec,
                         const void *data)
 {
+       alloc_sec_data(pieces, type, sec);
+
        pieces->img[type].sec[sec].data = data;
 }
 
@@ -303,6 +324,8 @@ static void set_sec_size(struct iwl_firmware_pieces *pieces,
                         int sec,
                         size_t size)
 {
+       alloc_sec_data(pieces, type, sec);
+
        pieces->img[type].sec[sec].size = size;
 }
 
@@ -318,6 +341,8 @@ static void set_sec_offset(struct iwl_firmware_pieces *pieces,
                           int sec,
                           u32 offset)
 {
+       alloc_sec_data(pieces, type, sec);
+
        pieces->img[type].sec[sec].offset = offset;
 }
 
@@ -383,6 +408,7 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
        struct fw_img_parsing *img;
        struct fw_sec *sec;
        struct fw_sec_parsing *sec_parse;
+       size_t alloc_size;
 
        if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
                return -1;
@@ -390,6 +416,13 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
        sec_parse = (struct fw_sec_parsing *)data;
 
        img = &pieces->img[type];
+
+       alloc_size = sizeof(*img->sec) * (img->sec_counter + 1);
+       sec = krealloc(img->sec, alloc_size, GFP_KERNEL);
+       if (!sec)
+               return -ENOMEM;
+       img->sec = sec;
+
        sec = &img->sec[img->sec_counter];
 
        sec->offset = le32_to_cpu(sec_parse->offset);
@@ -1009,31 +1042,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                        struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
                                (void *)tlv_data;
                        u32 type;
+                       size_t size;
+                       struct iwl_fw_dbg_mem_seg_tlv *n;
 
                        if (tlv_len != (sizeof(*dbg_mem)))
                                goto invalid_tlv_len;
 
                        type = le32_to_cpu(dbg_mem->data_type);
-                       drv->fw.dbg_dynamic_mem = true;
 
-                       if (type >= ARRAY_SIZE(drv->fw.dbg_mem_tlv)) {
-                               IWL_ERR(drv,
-                                       "Skip unknown dbg mem segment: %u\n",
-                                       dbg_mem->data_type);
-                               break;
-                       }
+                       IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
+                                      dbg_mem->data_type);
 
-                       if (pieces->dbg_mem_tlv[type]) {
-                               IWL_ERR(drv,
-                                       "Ignore duplicate mem segment: %u\n",
-                                       dbg_mem->data_type);
+                       switch (type & FW_DBG_MEM_TYPE_MASK) {
+                       case FW_DBG_MEM_TYPE_REGULAR:
+                       case FW_DBG_MEM_TYPE_PRPH:
+                               /* we know how to handle these */
                                break;
+                       default:
+                               IWL_ERR(drv,
+                                       "Found debug memory segment with invalid type: 0x%x\n",
+                                       type);
+                               return -EINVAL;
                        }
 
-                       IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
-                                      dbg_mem->data_type);
-
-                       pieces->dbg_mem_tlv[type] = dbg_mem;
+                       size = sizeof(*pieces->dbg_mem_tlv) *
+                              (pieces->n_dbg_mem_tlv + 1);
+                       n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL);
+                       if (!n)
+                               return -ENOMEM;
+                       pieces->dbg_mem_tlv = n;
+                       pieces->dbg_mem_tlv[pieces->n_dbg_mem_tlv] = *dbg_mem;
+                       pieces->n_dbg_mem_tlv++;
                        break;
                        }
                default:
@@ -1083,12 +1122,18 @@ static int iwl_alloc_ucode(struct iwl_drv *drv,
                           enum iwl_ucode_type type)
 {
        int i;
-       for (i = 0;
-            i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i);
-            i++)
-               if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]),
-                                     get_sec(pieces, type, i)))
+       struct fw_desc *sec;
+
+       sec = kcalloc(pieces->img[type].sec_counter, sizeof(*sec), GFP_KERNEL);
+       if (!sec)
+               return -ENOMEM;
+       drv->fw.img[type].sec = sec;
+       drv->fw.img[type].num_sec = pieces->img[type].sec_counter;
+
+       for (i = 0; i < pieces->img[type].sec_counter; i++)
+               if (iwl_alloc_fw_desc(drv, &sec[i], get_sec(pieces, type, i)))
                        return -ENOMEM;
+
        return 0;
 }
 
@@ -1160,7 +1205,7 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
        dbgfs_dir = drv->dbgfs_op_mode;
 #endif
 
-       op_mode = ops->start(drv->trans, drv->cfg, &drv->fw, dbgfs_dir);
+       op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir);
 
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        if (!op_mode) {
@@ -1200,8 +1245,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
        struct iwlwifi_opmode_table *op;
        int err;
        struct iwl_firmware_pieces *pieces;
-       const unsigned int api_max = drv->cfg->ucode_api_max;
-       const unsigned int api_min = drv->cfg->ucode_api_min;
+       const unsigned int api_max = drv->trans->cfg->ucode_api_max;
+       const unsigned int api_min = drv->trans->cfg->ucode_api_min;
        size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
        u32 api_ver;
        int i;
@@ -1263,7 +1308,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
         * In mvm uCode there is no difference between data and instructions
         * sections.
         */
-       if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces, drv->cfg))
+       if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces,
+                                                        drv->trans->cfg))
                goto try_again;
 
        /* Allocate ucode buffers for card's bus-master loading ... */
@@ -1345,19 +1391,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                }
        }
 
-       for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) {
-               if (pieces->dbg_mem_tlv[i]) {
-                       drv->fw.dbg_mem_tlv[i] =
-                               kmemdup(pieces->dbg_mem_tlv[i],
-                                       sizeof(*drv->fw.dbg_mem_tlv[i]),
-                                       GFP_KERNEL);
-                       if (!drv->fw.dbg_mem_tlv[i])
-                               goto out_free_fw;
-               }
-       }
-
        /* Now that we can no longer fail, copy information */
 
+       drv->fw.dbg_mem_tlv = pieces->dbg_mem_tlv;
+       pieces->dbg_mem_tlv = NULL;
+       drv->fw.n_dbg_mem_tlv = pieces->n_dbg_mem_tlv;
+
        /*
         * The (size - 16) / 12 formula is based on the information recorded
         * for each event, which is of mode 1 (including timestamp) for all
@@ -1368,14 +1407,14 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12;
        else
                fw->init_evtlog_size =
-                       drv->cfg->base_params->max_event_log_size;
+                       drv->trans->cfg->base_params->max_event_log_size;
        fw->init_errlog_ptr = pieces->init_errlog_ptr;
        fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr;
        if (pieces->inst_evtlog_size)
                fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12;
        else
                fw->inst_evtlog_size =
-                       drv->cfg->base_params->max_event_log_size;
+                       drv->trans->cfg->base_params->max_event_log_size;
        fw->inst_errlog_ptr = pieces->inst_errlog_ptr;
 
        /*
@@ -1441,29 +1480,30 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
                                op->name, err);
 #endif
        }
-       kfree(pieces);
-       return;
+       goto free;
 
  try_again:
        /* try next, if any */
        release_firmware(ucode_raw);
        if (iwl_request_firmware(drv, false))
                goto out_unbind;
-       kfree(pieces);
-       return;
+       goto free;
 
  out_free_fw:
        IWL_ERR(drv, "failed to allocate pci memory\n");
        iwl_dealloc_ucode(drv);
        release_firmware(ucode_raw);
  out_unbind:
-       kfree(pieces);
        complete(&drv->request_firmware_complete);
        device_release_driver(drv->trans->dev);
+ free:
+       for (i = 0; i < ARRAY_SIZE(pieces->img); i++)
+               kfree(pieces->img[i].sec);
+       kfree(pieces->dbg_mem_tlv);
+       kfree(pieces);
 }
 
-struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
-                             const struct iwl_cfg *cfg)
+struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
 {
        struct iwl_drv *drv;
        int ret;
@@ -1476,7 +1516,6 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
 
        drv->trans = trans;
        drv->dev = trans->dev;
-       drv->cfg = cfg;
 
        init_completion(&drv->request_firmware_complete);
        INIT_LIST_HEAD(&drv->list);
index f6eacfdbc2657979164090da64cc294cbcdeda27..6c537e04864eff3492345f526fac7609b022dbbb 100644 (file)
@@ -118,15 +118,13 @@ struct iwl_cfg;
  * iwl_drv_start - start the drv
  *
  * @trans_ops: the ops of the transport
- * @cfg: device specific constants / virtual functions
  *
  * starts the driver: fetches the firmware. This should be called by bus
  * specific system flows implementations. For example, the bus specific probe
  * function should do bus related operations only, and then call to this
  * function. It returns the driver object or %NULL if an error occurred.
  */
-struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
-                             const struct iwl_cfg *cfg);
+struct iwl_drv *iwl_drv_start(struct iwl_trans *trans);
 
 /**
  * iwl_drv_stop - stop the drv
index 84813b550ef196566821c545ded9eca0b0568d09..d01701ee477702272d4597a33d7c5710510f4fda 100644 (file)
@@ -379,7 +379,6 @@ enum iwl_ucode_tlv_capa {
  * For 16.0 uCode and above, there is no differentiation between sections,
  * just an offset to the HW address.
  */
-#define IWL_UCODE_SECTION_MAX 16
 #define CPU1_CPU2_SEPARATOR_SECTION    0xFFFFCCCC
 #define PAGING_SEPARATOR_SECTION       0xAAAABBBB
 
@@ -489,25 +488,22 @@ enum iwl_fw_dbg_monitor_mode {
 };
 
 /**
- * enum iwl_fw_mem_seg_type - data types for dumping on error
- *
- * @FW_DBG_MEM_SMEM: the data type is SMEM
- * @FW_DBG_MEM_DCCM_LMAC: the data type is DCCM_LMAC
- * @FW_DBG_MEM_DCCM_UMAC: the data type is DCCM_UMAC
+ * enum iwl_fw_mem_seg_type - memory segment type
+ * @FW_DBG_MEM_TYPE_MASK: mask for the type indication
+ * @FW_DBG_MEM_TYPE_REGULAR: regular memory
+ * @FW_DBG_MEM_TYPE_PRPH: periphery memory (requires special reading)
  */
-enum iwl_fw_dbg_mem_seg_type {
-       FW_DBG_MEM_DCCM_LMAC = 0,
-       FW_DBG_MEM_DCCM_UMAC,
-       FW_DBG_MEM_SMEM,
-
-       /* Must be last */
-       FW_DBG_MEM_MAX,
+enum iwl_fw_mem_seg_type {
+       FW_DBG_MEM_TYPE_MASK    = 0xff000000,
+       FW_DBG_MEM_TYPE_REGULAR = 0x00000000,
+       FW_DBG_MEM_TYPE_PRPH    = 0x01000000,
 };
 
 /**
  * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments
  *
- * @data_type: enum %iwl_fw_mem_seg_type
+ * @data_type: the memory segment type to record, see &enum iwl_fw_mem_seg_type
+ *     for what we care about
  * @ofs: the memory segment offset
  * @len: the memory segment length, in bytes
  *
index 5f229556339a45f9d621e195974c8c5e9bb534b1..d323b70b510a1f5d795a56b82be267567b71360f 100644 (file)
@@ -132,7 +132,8 @@ struct fw_desc {
 };
 
 struct fw_img {
-       struct fw_desc sec[IWL_UCODE_SECTION_MAX];
+       struct fw_desc *sec;
+       int num_sec;
        bool is_dual_cpus;
        u32 paging_mem_size;
 };
@@ -295,8 +296,8 @@ struct iwl_fw {
        struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
        size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
        struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
-       struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
-       bool dbg_dynamic_mem;
+       struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
+       size_t n_dbg_mem_tlv;
        size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
        u8 dbg_dest_reg_num;
        struct iwl_gscan_capabilities gscan_capa;
index b88e2048ae0baa207406972c618a28c7db4026e4..c7eb1983c4f9a191fc20d3cec151f069e7704b5d 100644 (file)
@@ -91,7 +91,7 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
        memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
        memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
        mvmvif->rekey_data.replay_ctr =
-               cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
+               cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
        mvmvif->rekey_data.valid = true;
 
        mutex_unlock(&mvm->mutex);
@@ -1262,12 +1262,15 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
        iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
  out:
        if (ret < 0) {
-               iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
-               if (mvm->restart_fw > 0) {
-                       mvm->restart_fw--;
-                       ieee80211_restart_hw(mvm->hw);
-               }
                iwl_mvm_free_nd(mvm);
+
+               if (!unified_image) {
+                       iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+                       if (mvm->restart_fw > 0) {
+                               mvm->restart_fw--;
+                               ieee80211_restart_hw(mvm->hw);
+                       }
+               }
        }
  out_noreset:
        mutex_unlock(&mvm->mutex);
@@ -1738,7 +1741,7 @@ out:
 static struct iwl_wowlan_status *
 iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
-       u32 base = mvm->error_event_table;
+       u32 base = mvm->error_event_table[0];
        struct error_table_start {
                /* cf. struct iwl_error_event_table */
                u32 valid;
index 7b7d2a146e3020a286da8f7acc9a97d005142ce8..a260cd5032005bcbf520e98f8be188750c987439 100644 (file)
@@ -798,7 +798,7 @@ static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file,
 static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
                                          size_t count, loff_t *ppos)
 {
-       int ret;
+       int __maybe_unused ret;
 
        mutex_lock(&mvm->mutex);
 
index 0246506ab595c4105b393ce8b24d25a3aff4af95..480a54af453477ac7194766d9a8ada9b3f10c923 100644 (file)
 #define __fw_api_mac_h__
 
 /*
- * The first MAC indices (starting from 0)
- * are available to the driver, AUX follows
+ * The first MAC indices (starting from 0) are available to the driver,
+ * AUX indices follows - 1 for non-CDB, 2 for CDB.
  */
 #define MAC_INDEX_AUX          4
 #define MAC_INDEX_MIN_DRIVER   0
 #define NUM_MAC_INDEX_DRIVER   MAC_INDEX_AUX
-#define NUM_MAC_INDEX          (MAC_INDEX_AUX + 1)
+#define NUM_MAC_INDEX          (NUM_MAC_INDEX_DRIVER + 1)
+#define NUM_MAC_INDEX_CDB      (NUM_MAC_INDEX_DRIVER + 2)
 
 #define IWL_MVM_STATION_COUNT  16
 #define IWL_MVM_TDLS_STA_COUNT 4
index 0c294c9f98e95a2a23d725a1e0caa73d8873b94f..c78a0c49945981de464cda0d1d863a95a0173178 100644 (file)
@@ -453,6 +453,8 @@ enum scan_config_flags {
        SCAN_CONFIG_FLAG_CLEAR_CAM_MODE                 = BIT(19),
        SCAN_CONFIG_FLAG_SET_PROMISC_MODE               = BIT(20),
        SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE             = BIT(21),
+       SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED           = BIT(22),
+       SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED         = BIT(23),
 
        /* Bits 26-31 are for num of channels in channel_array */
 #define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
@@ -485,6 +487,20 @@ enum iwl_channel_flags {
        IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE        = BIT(3),
 };
 
+/**
+ * struct iwl_scan_dwell
+ * @active:            default dwell time for active scan
+ * @passive:           default dwell time for passive scan
+ * @fragmented:                default dwell time for fragmented scan
+ * @extended:          default dwell time for channels 1, 6 and 11
+ */
+struct iwl_scan_dwell {
+       u8 active;
+       u8 passive;
+       u8 fragmented;
+       u8 extended;
+} __packed;
+
 /**
  * struct iwl_scan_config
  * @flags:                     enum scan_config_flags
@@ -493,10 +509,7 @@ enum iwl_channel_flags {
  * @legacy_rates:              default legacy rates - enum scan_config_rates
  * @out_of_channel_time:       default max out of serving channel time
  * @suspend_time:              default max suspend time
- * @dwell_active:              default dwell time for active scan
- * @dwell_passive:             default dwell time for passive scan
- * @dwell_fragmented:          default dwell time for fragmented scan
- * @dwell_extended:            default dwell time for channels 1, 6 and 11
+ * @dwell:                     dwells for the scan
  * @mac_addr:                  default mac address to be used in probes
  * @bcast_sta_id:              the index of the station in the fw
  * @channel_flags:             default channel flags - enum iwl_channel_flags
@@ -510,16 +523,29 @@ struct iwl_scan_config {
        __le32 legacy_rates;
        __le32 out_of_channel_time;
        __le32 suspend_time;
-       u8 dwell_active;
-       u8 dwell_passive;
-       u8 dwell_fragmented;
-       u8 dwell_extended;
+       struct iwl_scan_dwell dwell;
        u8 mac_addr[ETH_ALEN];
        u8 bcast_sta_id;
        u8 channel_flags;
        u8 channel_array[];
 } __packed; /* SCAN_CONFIG_DB_CMD_API_S */
 
+#define SCAN_TWO_LMACS 2
+
+struct iwl_scan_config_cdb {
+       __le32 flags;
+       __le32 tx_chains;
+       __le32 rx_chains;
+       __le32 legacy_rates;
+       __le32 out_of_channel_time[SCAN_TWO_LMACS];
+       __le32 suspend_time[SCAN_TWO_LMACS];
+       struct iwl_scan_dwell dwell;
+       u8 mac_addr[ETH_ALEN];
+       u8 bcast_sta_id;
+       u8 channel_flags;
+       u8 channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
+
 /**
  * iwl_umac_scan_flags
  *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
@@ -540,17 +566,18 @@ enum iwl_umac_scan_uid_offsets {
 };
 
 enum iwl_umac_scan_general_flags {
-       IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC        = BIT(0),
-       IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT         = BIT(1),
-       IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL        = BIT(2),
-       IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE         = BIT(3),
-       IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT     = BIT(4),
-       IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE   = BIT(5),
-       IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID   = BIT(6),
-       IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED      = BIT(7),
-       IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED     = BIT(8),
-       IWL_UMAC_SCAN_GEN_FLAGS_MATCH           = BIT(9),
-       IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL  = BIT(10),
+       IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC                = BIT(0),
+       IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT                 = BIT(1),
+       IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL                = BIT(2),
+       IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE                 = BIT(3),
+       IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT             = BIT(4),
+       IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE           = BIT(5),
+       IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID           = BIT(6),
+       IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED              = BIT(7),
+       IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED             = BIT(8),
+       IWL_UMAC_SCAN_GEN_FLAGS_MATCH                   = BIT(9),
+       IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL          = BIT(10),
+       IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED        = BIT(11),
 };
 
 /**
@@ -610,8 +637,9 @@ struct iwl_scan_req_umac_tail {
  * @active_dwell: dwell time for active scan
  * @passive_dwell: dwell time for passive scan
  * @fragmented_dwell: dwell time for fragmented passive scan
- * @max_out_time: max out of serving channel time
- * @suspend_time: max suspend time
+ * @max_out_time: max out of serving channel time, per LMAC - for CDB there
+ *     are 2 LMACs
+ * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
  * @scan_priority: scan internal prioritization &enum iwl_scan_priority
  * @channel_flags: &enum iwl_scan_channel_flags
  * @n_channels: num of channels in scan request
@@ -631,15 +659,33 @@ struct iwl_scan_req_umac {
        u8 active_dwell;
        u8 passive_dwell;
        u8 fragmented_dwell;
-       __le32 max_out_time;
-       __le32 suspend_time;
-       __le32 scan_priority;
-       /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
-       u8 channel_flags;
-       u8 n_channels;
-       __le16 reserved;
-       u8 data[];
-} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+       union {
+               struct {
+                       __le32 max_out_time;
+                       __le32 suspend_time;
+                       __le32 scan_priority;
+                       /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+                       u8 channel_flags;
+                       u8 n_channels;
+                       __le16 reserved;
+                       u8 data[];
+               } no_cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+               struct {
+                       __le32 max_out_time[SCAN_TWO_LMACS];
+                       __le32 suspend_time[SCAN_TWO_LMACS];
+                       __le32 scan_priority;
+                       /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+                       u8 channel_flags;
+                       u8 n_channels;
+                       __le16 reserved;
+                       u8 data[];
+               } cdb; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_5 */
+       };
+} __packed;
+
+#define IWL_SCAN_REQ_UMAC_SIZE_CDB sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE (sizeof(struct iwl_scan_req_umac) - \
+                               2 * sizeof(__le32))
 
 /**
  * struct iwl_umac_scan_abort
index 4e638a44babb81e81015b604883cfbb6232cb25a..6371c342b96dc4795da26cde564fab562e365c8f 100644 (file)
@@ -220,7 +220,7 @@ struct mvm_statistics_bt_activity {
        __le32 lo_priority_rx_denied_cnt;
 } __packed;  /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
 
-struct mvm_statistics_general_v8 {
+struct mvm_statistics_general_common {
        __le32 radio_temperature;
        __le32 radio_voltage;
        struct mvm_statistics_dbg dbg;
@@ -248,11 +248,22 @@ struct mvm_statistics_general_v8 {
        __le64 on_time_rf;
        __le64 on_time_scan;
        __le64 tx_time;
+} __packed;
+
+struct mvm_statistics_general_v8 {
+       struct mvm_statistics_general_common common;
        __le32 beacon_counter[NUM_MAC_INDEX];
        u8 beacon_average_energy[NUM_MAC_INDEX];
        u8 reserved[4 - (NUM_MAC_INDEX % 4)];
 } __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
 
+struct mvm_statistics_general_cdb {
+       struct mvm_statistics_general_common common;
+       __le32 beacon_counter[NUM_MAC_INDEX_CDB];
+       u8 beacon_average_energy[NUM_MAC_INDEX_CDB];
+       u8 reserved[4 - (NUM_MAC_INDEX_CDB % 4)];
+} __packed; /* STATISTICS_GENERAL_API_S_VER_9 */
+
 /**
  * struct mvm_statistics_load - RX statistics for multi-queue devices
  * @air_time: accumulated air time, per mac
@@ -267,6 +278,13 @@ struct mvm_statistics_load {
        u8 avg_energy[IWL_MVM_STATION_COUNT];
 } __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */
 
+struct mvm_statistics_load_cdb {
+       __le32 air_time[NUM_MAC_INDEX_CDB];
+       __le32 byte_count[NUM_MAC_INDEX_CDB];
+       __le32 pkt_count[NUM_MAC_INDEX_CDB];
+       u8 avg_energy[IWL_MVM_STATION_COUNT];
+} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_2 */
+
 struct mvm_statistics_rx {
        struct mvm_statistics_rx_phy ofdm;
        struct mvm_statistics_rx_phy cck;
@@ -281,6 +299,7 @@ struct mvm_statistics_rx {
  * while associated.  To disable this behavior, set DISABLE_NOTIF flag in the
  * STATISTICS_CMD (0x9c), below.
  */
+
 struct iwl_notif_statistics_v10 {
        __le32 flag;
        struct mvm_statistics_rx rx;
@@ -296,6 +315,14 @@ struct iwl_notif_statistics_v11 {
        struct mvm_statistics_load load_stats;
 } __packed; /* STATISTICS_NTFY_API_S_VER_11 */
 
+struct iwl_notif_statistics_cdb {
+       __le32 flag;
+       struct mvm_statistics_rx rx;
+       struct mvm_statistics_tx tx;
+       struct mvm_statistics_general_cdb general;
+       struct mvm_statistics_load_cdb load_stats;
+} __packed; /* STATISTICS_NTFY_API_S_VER_12 */
+
 #define IWL_STATISTICS_FLG_CLEAR               0x1
 #define IWL_STATISTICS_FLG_DISABLE_NOTIF       0x2
 
index 59ca97a11b2b74c9b6a4b4880f0cf3d2c610bf12..b38cc073adcc7b0eb876bb4773dc2ad22762a8f9 100644 (file)
@@ -672,8 +672,7 @@ struct iwl_mac_beacon_cmd_v6 {
 } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */
 
 /**
- * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA
- * @tx: the tx commands associated with the beacon frame
+ * struct iwl_mac_beacon_cmd_data - data of beacon template with offloaded CSA
  * @template_id: currently equal to the mac context id of the coresponding
  *  mac.
  * @tim_idx: the offset of the tim IE in the beacon
@@ -682,16 +681,38 @@ struct iwl_mac_beacon_cmd_v6 {
  * @csa_offset: offset to the CSA IE if present
  * @frame: the template of the beacon frame
  */
-struct iwl_mac_beacon_cmd {
-       struct iwl_tx_cmd tx;
+struct iwl_mac_beacon_cmd_data {
        __le32 template_id;
        __le32 tim_idx;
        __le32 tim_size;
        __le32 ecsa_offset;
        __le32 csa_offset;
        struct ieee80211_hdr frame[0];
+};
+
+/**
+ * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA
+ * @tx: the tx commands associated with the beacon frame
+ * @data: see &iwl_mac_beacon_cmd_data
+ */
+struct iwl_mac_beacon_cmd_v7 {
+       struct iwl_tx_cmd tx;
+       struct iwl_mac_beacon_cmd_data data;
 } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */
 
+/**
+ * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA
+ * @byte_cnt: byte count of the beacon frame
+ * @flags: for future use
+ * @data: see &iwl_mac_beacon_cmd_data
+ */
+struct iwl_mac_beacon_cmd {
+       __le16 byte_cnt;
+       __le16 flags;
+       __le64 reserved;
+       struct iwl_mac_beacon_cmd_data data;
+} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_8 */
+
 struct iwl_beacon_notif {
        struct iwl_mvm_tx_resp beacon_notify_hdr;
        __le64 tsf;
index ae12badc0c2ab7dd08451d3432528274936125b8..cf2b836f38881364c440be63cc7feb76ae738bdc 100644 (file)
@@ -341,6 +341,10 @@ enum iwl_prot_offload_subcmd_ids {
        STORED_BEACON_NTF = 0xFF,
 };
 
+enum iwl_regulatory_and_nvm_subcmd_ids {
+       NVM_ACCESS_COMPLETE = 0x0,
+};
+
 enum iwl_fmac_debug_cmds {
        LMAC_RD_WR = 0x0,
        UMAC_RD_WR = 0x1,
@@ -355,6 +359,7 @@ enum {
        PHY_OPS_GROUP = 0x4,
        DATA_PATH_GROUP = 0x5,
        PROT_OFFLOAD_GROUP = 0xb,
+       REGULATORY_AND_NVM_GROUP = 0xc,
        DEBUG_GROUP = 0xf,
 };
 
@@ -593,60 +598,7 @@ enum {
 
 #define IWL_ALIVE_FLG_RFKILL   BIT(0)
 
-struct mvm_alive_resp_ver1 {
-       __le16 status;
-       __le16 flags;
-       u8 ucode_minor;
-       u8 ucode_major;
-       __le16 id;
-       u8 api_minor;
-       u8 api_major;
-       u8 ver_subtype;
-       u8 ver_type;
-       u8 mac;
-       u8 opt;
-       __le16 reserved2;
-       __le32 timestamp;
-       __le32 error_event_table_ptr;   /* SRAM address for error log */
-       __le32 log_event_table_ptr;     /* SRAM address for event log */
-       __le32 cpu_register_ptr;
-       __le32 dbgm_config_ptr;
-       __le32 alive_counter_ptr;
-       __le32 scd_base_ptr;            /* SRAM address for SCD */
-} __packed; /* ALIVE_RES_API_S_VER_1 */
-
-struct mvm_alive_resp_ver2 {
-       __le16 status;
-       __le16 flags;
-       u8 ucode_minor;
-       u8 ucode_major;
-       __le16 id;
-       u8 api_minor;
-       u8 api_major;
-       u8 ver_subtype;
-       u8 ver_type;
-       u8 mac;
-       u8 opt;
-       __le16 reserved2;
-       __le32 timestamp;
-       __le32 error_event_table_ptr;   /* SRAM address for error log */
-       __le32 log_event_table_ptr;     /* SRAM address for LMAC event log */
-       __le32 cpu_register_ptr;
-       __le32 dbgm_config_ptr;
-       __le32 alive_counter_ptr;
-       __le32 scd_base_ptr;            /* SRAM address for SCD */
-       __le32 st_fwrd_addr;            /* pointer to Store and forward */
-       __le32 st_fwrd_size;
-       u8 umac_minor;                  /* UMAC version: minor */
-       u8 umac_major;                  /* UMAC version: major */
-       __le16 umac_id;                 /* UMAC version: id */
-       __le32 error_info_addr;         /* SRAM address for UMAC error log */
-       __le32 dbg_print_buff_addr;
-} __packed; /* ALIVE_RES_API_S_VER_2 */
-
-struct mvm_alive_resp {
-       __le16 status;
-       __le16 flags;
+struct iwl_lmac_alive {
        __le32 ucode_minor;
        __le32 ucode_major;
        u8 ver_subtype;
@@ -662,12 +614,29 @@ struct mvm_alive_resp {
        __le32 scd_base_ptr;            /* SRAM address for SCD */
        __le32 st_fwrd_addr;            /* pointer to Store and forward */
        __le32 st_fwrd_size;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
+
+struct iwl_umac_alive {
        __le32 umac_minor;              /* UMAC version: minor */
        __le32 umac_major;              /* UMAC version: major */
        __le32 error_info_addr;         /* SRAM address for UMAC error log */
        __le32 dbg_print_buff_addr;
+} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
+
+struct mvm_alive_resp_v3 {
+       __le16 status;
+       __le16 flags;
+       struct iwl_lmac_alive lmac_data;
+       struct iwl_umac_alive umac_data;
 } __packed; /* ALIVE_RES_API_S_VER_3 */
 
+struct mvm_alive_resp {
+       __le16 status;
+       __le16 flags;
+       struct iwl_lmac_alive lmac_data[2];
+       struct iwl_umac_alive umac_data;
+} __packed; /* ALIVE_RES_API_S_VER_4 */
+
 /* Error response/notification */
 enum {
        FW_ERR_UNKNOWN_CMD = 0x0,
@@ -708,7 +677,6 @@ struct iwl_error_resp {
 #define MAX_MACS_IN_BINDING    (3)
 #define MAX_BINDINGS           (4)
 #define AUX_BINDING_INDEX      (3)
-#define MAX_PHYS               (4)
 
 /* Used to extract ID and color from the context dword */
 #define FW_CTXT_ID_POS   (0)
@@ -1251,13 +1219,16 @@ struct iwl_missed_beacons_notif {
  * @external_ver: external image version
  * @status: MFUART loading status
  * @duration: MFUART loading time
+ * @image_size: MFUART image size in bytes
 */
 struct iwl_mfuart_load_notif {
        __le32 installed_ver;
        __le32 external_ver;
        __le32 status;
        __le32 duration;
-} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/
+       /* image size valid only in v2 of the command */
+       __le32 image_size;
+} __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/
 
 /**
  * struct iwl_set_calib_default_cmd - set default value for calibration.
@@ -2075,7 +2046,7 @@ struct iwl_mu_group_mgmt_notif {
  * @system_time: system time on air rise
  * @tsf: TSF on air rise
  * @beacon_timestamp: beacon on air rise
- * @phy_flags: general phy flags: band, modulation, etc.
+ * @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition
  * @channel: channel this beacon was received on
  * @rates: rate in ucode internal format
  * @byte_count: frame's byte count
@@ -2084,12 +2055,12 @@ struct iwl_stored_beacon_notif {
        __le32 system_time;
        __le64 tsf;
        __le32 beacon_timestamp;
-       __le16 phy_flags;
+       __le16 band;
        __le16 channel;
        __le32 rates;
        __le32 byte_count;
        u8 data[MAX_STORED_BEACON_SIZE];
-} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */
+} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */
 
 #define LQM_NUMBER_OF_STATIONS_IN_REPORT 16
 
@@ -2200,4 +2171,11 @@ struct iwl_dbg_mem_access_rsp {
        __le32 data[];
 } __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */
 
+/**
+ * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
+ */
+struct iwl_nvm_access_complete_cmd {
+       __le32 reserved;
+} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
+
 #endif /* __fw_api_h__ */
index 2e8e3e8e30a329afc9c5b962739990ebea97fb1e..a027b11bbdb38b5040d679982762959c03df97d2 100644 (file)
@@ -406,46 +406,63 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
        { .start = 0x00a02400, .end = 0x00a02758 },
 };
 
-static u32 iwl_dump_prph(struct iwl_trans *trans,
-                        struct iwl_fw_error_dump_data **data,
-                        const struct iwl_prph_range *iwl_prph_dump_addr,
-                        u32 range_len)
+static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start,
+                                u32 len_bytes, __le32 *data)
+{
+       u32 i;
+
+       for (i = 0; i < len_bytes; i += 4)
+               *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
+}
+
+static bool iwl_read_prph_block(struct iwl_trans *trans, u32 start,
+                               u32 len_bytes, __le32 *data)
+{
+       unsigned long flags;
+       bool success = false;
+
+       if (iwl_trans_grab_nic_access(trans, &flags)) {
+               success = true;
+               _iwl_read_prph_block(trans, start, len_bytes, data);
+               iwl_trans_release_nic_access(trans, &flags);
+       }
+
+       return success;
+}
+
+static void iwl_dump_prph(struct iwl_trans *trans,
+                         struct iwl_fw_error_dump_data **data,
+                         const struct iwl_prph_range *iwl_prph_dump_addr,
+                         u32 range_len)
 {
        struct iwl_fw_error_dump_prph *prph;
        unsigned long flags;
-       u32 prph_len = 0, i;
+       u32 i;
 
        if (!iwl_trans_grab_nic_access(trans, &flags))
-               return 0;
+               return;
 
        for (i = 0; i < range_len; i++) {
                /* The range includes both boundaries */
                int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
                         iwl_prph_dump_addr[i].start + 4;
-               int reg;
-               __le32 *val;
-
-               prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk;
 
                (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
                (*data)->len = cpu_to_le32(sizeof(*prph) +
                                        num_bytes_in_chunk);
                prph = (void *)(*data)->data;
                prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
-               val = (void *)prph->data;
 
-               for (reg = iwl_prph_dump_addr[i].start;
-                    reg <= iwl_prph_dump_addr[i].end;
-                    reg += 4)
-                       *val++ = cpu_to_le32(iwl_read_prph_no_grab(trans,
-                                                                  reg));
+               _iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
+                                    /* our range is inclusive, hence + 4 */
+                                    iwl_prph_dump_addr[i].end -
+                                    iwl_prph_dump_addr[i].start + 4,
+                                    (void *)prph->data);
 
                *data = iwl_fw_error_next_data(*data);
        }
 
        iwl_trans_release_nic_access(trans, &flags);
-
-       return prph_len;
 }
 
 /*
@@ -495,11 +512,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        struct iwl_mvm_dump_ptrs *fw_error_dump;
        struct scatterlist *sg_dump_data;
        u32 sram_len, sram_ofs;
-       struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem =
-               mvm->fw->dbg_mem_tlv;
+       const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = mvm->fw->dbg_mem_tlv;
        u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
-       u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len;
-       u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len;
+       u32 smem_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->smem_len;
+       u32 sram2_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->dccm2_len;
        bool monitor_dump_only = false;
        int i;
 
@@ -624,10 +640,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
 
        /* Make room for MEM segments */
-       for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
-               if (fw_dbg_mem[i])
-                       file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
-                               le32_to_cpu(fw_dbg_mem[i]->len);
+       for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) {
+               file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+                           le32_to_cpu(fw_dbg_mem[i].len);
        }
 
        /* Make room for fw's virtual image pages, if it exists */
@@ -656,7 +671,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
                            mvm->fw_dump_desc->len;
 
-       if (!mvm->fw->dbg_dynamic_mem)
+       if (!mvm->fw->n_dbg_mem_tlv)
                file_len += sram_len + sizeof(*dump_mem);
 
        dump_file = vzalloc(file_len);
@@ -708,7 +723,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        if (monitor_dump_only)
                goto dump_trans_data;
 
-       if (!mvm->fw->dbg_dynamic_mem) {
+       if (!mvm->fw->n_dbg_mem_tlv) {
                dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
                dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
                dump_mem = (void *)dump_data->data;
@@ -719,22 +734,39 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                dump_data = iwl_fw_error_next_data(dump_data);
        }
 
-       for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
-               if (fw_dbg_mem[i]) {
-                       u32 len = le32_to_cpu(fw_dbg_mem[i]->len);
-                       u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs);
-
-                       dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
-                       dump_data->len = cpu_to_le32(len +
-                                       sizeof(*dump_mem));
-                       dump_mem = (void *)dump_data->data;
-                       dump_mem->type = fw_dbg_mem[i]->data_type;
-                       dump_mem->offset = cpu_to_le32(ofs);
+       for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) {
+               u32 len = le32_to_cpu(fw_dbg_mem[i].len);
+               u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
+               bool success;
+
+               dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+               dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
+               dump_mem = (void *)dump_data->data;
+               dump_mem->type = fw_dbg_mem[i].data_type;
+               dump_mem->offset = cpu_to_le32(ofs);
+
+               switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) {
+               case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR):
                        iwl_trans_read_mem_bytes(mvm->trans, ofs,
                                                 dump_mem->data,
                                                 len);
-                       dump_data = iwl_fw_error_next_data(dump_data);
+                       success = true;
+                       break;
+               case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH):
+                       success = iwl_read_prph_block(mvm->trans, ofs, len,
+                                                     (void *)dump_mem->data);
+                       break;
+               default:
+                       /*
+                        * shouldn't get here, we ignored this kind
+                        * of TLV earlier during the TLV parsing?!
+                        */
+                       WARN_ON(1);
+                       success = false;
                }
+
+               if (success)
+                       dump_data = iwl_fw_error_next_data(dump_data);
        }
 
        if (smem_len) {
@@ -779,12 +811,16 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                        struct iwl_fw_error_dump_paging *paging;
                        struct page *pages =
                                mvm->fw_paging_db[i].fw_paging_block;
+                       dma_addr_t addr = mvm->fw_paging_db[i].fw_paging_phys;
 
                        dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
                        dump_data->len = cpu_to_le32(sizeof(*paging) +
                                                     PAGING_BLOCK_SIZE);
                        paging = (void *)dump_data->data;
                        paging->index = cpu_to_le32(i);
+                       dma_sync_single_for_cpu(mvm->trans->dev, addr,
+                                               PAGING_BLOCK_SIZE,
+                                               DMA_BIDIRECTIONAL);
                        memcpy(paging->data, page_address(pages),
                               PAGING_BLOCK_SIZE);
                        dump_data = iwl_fw_error_next_data(dump_data);
@@ -816,11 +852,12 @@ dump_trans_data:
                                     sg_nents(sg_dump_data),
                                     fw_error_dump->op_mode_ptr,
                                     fw_error_dump->op_mode_len, 0);
-               sg_pcopy_from_buffer(sg_dump_data,
-                                    sg_nents(sg_dump_data),
-                                    fw_error_dump->trans_ptr->data,
-                                    fw_error_dump->trans_ptr->len,
-                                    fw_error_dump->op_mode_len);
+               if (fw_error_dump->trans_ptr)
+                       sg_pcopy_from_buffer(sg_dump_data,
+                                            sg_nents(sg_dump_data),
+                                            fw_error_dump->trans_ptr->data,
+                                            fw_error_dump->trans_ptr->len,
+                                            fw_error_dump->op_mode_len);
                dev_coredumpsg(mvm->trans->dev, sg_dump_data, file_len,
                               GFP_KERNEL);
        }
index 872066317fa5a88c5c21584560aa81c7b67ae239..45cb4f476e761b18dd7a2326ff01cc571e9b3df9 100644 (file)
@@ -190,7 +190,7 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
         * CPU2 paging CSS
         * CPU2 paging image (including instruction and data)
         */
-       for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
+       for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
                if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
                        sec_idx++;
                        break;
@@ -201,7 +201,7 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
         * If paging is enabled there should be at least 2 more sections left
         * (one for CSS and one for Paging data)
         */
-       if (sec_idx >= ARRAY_SIZE(image->sec) - 1) {
+       if (sec_idx >= image->num_sec - 1) {
                IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
                iwl_free_fw_paging(mvm);
                return -EINVAL;
@@ -214,6 +214,10 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
        memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
               image->sec[sec_idx].data,
               mvm->fw_paging_db[0].fw_paging_size);
+       dma_sync_single_for_device(mvm->trans->dev,
+                                  mvm->fw_paging_db[0].fw_paging_phys,
+                                  mvm->fw_paging_db[0].fw_paging_size,
+                                  DMA_BIDIRECTIONAL);
 
        IWL_DEBUG_FW(mvm,
                     "Paging: copied %d CSS bytes to first block\n",
@@ -228,9 +232,16 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
         * loop stop at num_of_paging_blk since that last block is not full.
         */
        for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
-               memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+               struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
+
+               memcpy(page_address(block->fw_paging_block),
                       image->sec[sec_idx].data + offset,
-                      mvm->fw_paging_db[idx].fw_paging_size);
+                      block->fw_paging_size);
+               dma_sync_single_for_device(mvm->trans->dev,
+                                          block->fw_paging_phys,
+                                          block->fw_paging_size,
+                                          DMA_BIDIRECTIONAL);
+
 
                IWL_DEBUG_FW(mvm,
                             "Paging: copied %d paging bytes to block %d\n",
@@ -242,9 +253,15 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
 
        /* copy the last paging block */
        if (mvm->num_of_pages_in_last_blk > 0) {
-               memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
+               struct iwl_fw_paging *block = &mvm->fw_paging_db[idx];
+
+               memcpy(page_address(block->fw_paging_block),
                       image->sec[sec_idx].data + offset,
                       FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
+               dma_sync_single_for_device(mvm->trans->dev,
+                                          block->fw_paging_phys,
+                                          block->fw_paging_size,
+                                          DMA_BIDIRECTIONAL);
 
                IWL_DEBUG_FW(mvm,
                             "Paging: copied %d pages in the last block %d\n",
@@ -259,9 +276,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
 {
        struct page *block;
        dma_addr_t phys = 0;
-       int blk_idx = 0;
-       int order, num_of_pages;
-       int dma_enabled;
+       int blk_idx, order, num_of_pages, size, dma_enabled;
 
        if (mvm->fw_paging_db[0].fw_paging_block)
                return 0;
@@ -272,9 +287,8 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
        BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
 
        num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
-       mvm->num_of_paging_blk = ((num_of_pages - 1) /
-                                   NUM_OF_PAGE_PER_GROUP) + 1;
-
+       mvm->num_of_paging_blk =
+               DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
        mvm->num_of_pages_in_last_blk =
                num_of_pages -
                NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
@@ -284,46 +298,13 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
                     mvm->num_of_paging_blk,
                     mvm->num_of_pages_in_last_blk);
 
-       /* allocate block of 4Kbytes for paging CSS */
-       order = get_order(FW_PAGING_SIZE);
-       block = alloc_pages(GFP_KERNEL, order);
-       if (!block) {
-               /* free all the previous pages since we failed */
-               iwl_free_fw_paging(mvm);
-               return -ENOMEM;
-       }
-
-       mvm->fw_paging_db[blk_idx].fw_paging_block = block;
-       mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
-
-       if (dma_enabled) {
-               phys = dma_map_page(mvm->trans->dev, block, 0,
-                                   PAGE_SIZE << order, DMA_BIDIRECTIONAL);
-               if (dma_mapping_error(mvm->trans->dev, phys)) {
-                       /*
-                        * free the previous pages and the current one since
-                        * we failed to map_page.
-                        */
-                       iwl_free_fw_paging(mvm);
-                       return -ENOMEM;
-               }
-               mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
-       } else {
-               mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
-                       blk_idx << BLOCK_2_EXP_SIZE;
-       }
-
-       IWL_DEBUG_FW(mvm,
-                    "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
-                    order);
-
        /*
-        * allocate blocks in dram.
-        * since that CSS allocated in fw_paging_db[0] loop start from index 1
+        * Allocate CSS and paging blocks in dram.
         */
-       for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
-               /* allocate block of PAGING_BLOCK_SIZE (32K) */
-               order = get_order(PAGING_BLOCK_SIZE);
+       for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
+               /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
+               size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
+               order = get_order(size);
                block = alloc_pages(GFP_KERNEL, order);
                if (!block) {
                        /* free all the previous pages since we failed */
@@ -332,7 +313,7 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
                }
 
                mvm->fw_paging_db[blk_idx].fw_paging_block = block;
-               mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
+               mvm->fw_paging_db[blk_idx].fw_paging_size = size;
 
                if (dma_enabled) {
                        phys = dma_map_page(mvm->trans->dev, block, 0,
@@ -353,9 +334,14 @@ static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
                                blk_idx << BLOCK_2_EXP_SIZE;
                }
 
-               IWL_DEBUG_FW(mvm,
-                            "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
-                            order);
+               if (!blk_idx)
+                       IWL_DEBUG_FW(mvm,
+                                    "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
+                                    order);
+               else
+                       IWL_DEBUG_FW(mvm,
+                                    "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
+                                    order);
        }
 
        return 0;
@@ -475,80 +461,60 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
        struct iwl_mvm *mvm =
                container_of(notif_wait, struct iwl_mvm, notif_wait);
        struct iwl_mvm_alive_data *alive_data = data;
-       struct mvm_alive_resp_ver1 *palive1;
-       struct mvm_alive_resp_ver2 *palive2;
+       struct mvm_alive_resp_v3 *palive3;
        struct mvm_alive_resp *palive;
+       struct iwl_umac_alive *umac;
+       struct iwl_lmac_alive *lmac1;
+       struct iwl_lmac_alive *lmac2 = NULL;
+       u16 status;
+
+       if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
+               palive = (void *)pkt->data;
+               umac = &palive->umac_data;
+               lmac1 = &palive->lmac_data[0];
+               lmac2 = &palive->lmac_data[1];
+               status = le16_to_cpu(palive->status);
+       } else {
+               palive3 = (void *)pkt->data;
+               umac = &palive3->umac_data;
+               lmac1 = &palive3->lmac_data;
+               status = le16_to_cpu(palive3->status);
+       }
 
-       if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
-               palive1 = (void *)pkt->data;
+       mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr);
+       if (lmac2)
+               mvm->error_event_table[1] =
+                       le32_to_cpu(lmac2->error_event_table_ptr);
+       mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr);
+       mvm->sf_space.addr = le32_to_cpu(lmac1->st_fwrd_addr);
+       mvm->sf_space.size = le32_to_cpu(lmac1->st_fwrd_size);
 
-               mvm->support_umac_log = false;
-               mvm->error_event_table =
-                       le32_to_cpu(palive1->error_event_table_ptr);
-               mvm->log_event_table =
-                       le32_to_cpu(palive1->log_event_table_ptr);
-               alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
+       mvm->umac_error_event_table = le32_to_cpu(umac->error_info_addr);
 
-               alive_data->valid = le16_to_cpu(palive1->status) ==
-                                   IWL_ALIVE_STATUS_OK;
-               IWL_DEBUG_FW(mvm,
-                            "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
-                            le16_to_cpu(palive1->status), palive1->ver_type,
-                            palive1->ver_subtype, palive1->flags);
-       } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
-               palive2 = (void *)pkt->data;
-
-               mvm->error_event_table =
-                       le32_to_cpu(palive2->error_event_table_ptr);
-               mvm->log_event_table =
-                       le32_to_cpu(palive2->log_event_table_ptr);
-               alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
-               mvm->umac_error_event_table =
-                       le32_to_cpu(palive2->error_info_addr);
-               mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
-               mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
-
-               alive_data->valid = le16_to_cpu(palive2->status) ==
-                                   IWL_ALIVE_STATUS_OK;
-               if (mvm->umac_error_event_table)
-                       mvm->support_umac_log = true;
+       alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr);
+       alive_data->valid = status == IWL_ALIVE_STATUS_OK;
+       if (mvm->umac_error_event_table)
+               mvm->support_umac_log = true;
 
-               IWL_DEBUG_FW(mvm,
-                            "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
-                            le16_to_cpu(palive2->status), palive2->ver_type,
-                            palive2->ver_subtype, palive2->flags);
+       IWL_DEBUG_FW(mvm,
+                    "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
+                    status, lmac1->ver_type, lmac1->ver_subtype);
 
-               IWL_DEBUG_FW(mvm,
-                            "UMAC version: Major - 0x%x, Minor - 0x%x\n",
-                            palive2->umac_major, palive2->umac_minor);
-       } else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
-               palive = (void *)pkt->data;
+       if (lmac2)
+               IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
 
-               mvm->error_event_table =
-                       le32_to_cpu(palive->error_event_table_ptr);
-               mvm->log_event_table =
-                       le32_to_cpu(palive->log_event_table_ptr);
-               alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
-               mvm->umac_error_event_table =
-                       le32_to_cpu(palive->error_info_addr);
-               mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
-               mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);
-
-               alive_data->valid = le16_to_cpu(palive->status) ==
-                                   IWL_ALIVE_STATUS_OK;
-               if (mvm->umac_error_event_table)
-                       mvm->support_umac_log = true;
+       IWL_DEBUG_FW(mvm,
+                    "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+                    le32_to_cpu(umac->umac_major),
+                    le32_to_cpu(umac->umac_minor));
 
-               IWL_DEBUG_FW(mvm,
-                            "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
-                            le16_to_cpu(palive->status), palive->ver_type,
-                            palive->ver_subtype, palive->flags);
+       return true;
+}
 
-               IWL_DEBUG_FW(mvm,
-                            "UMAC version: Major - 0x%x, Minor - 0x%x\n",
-                            le32_to_cpu(palive->umac_major),
-                            le32_to_cpu(palive->umac_minor));
-       }
+static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
+                                  struct iwl_rx_packet *pkt, void *data)
+{
+       WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
 
        return true;
 }
@@ -568,6 +534,48 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
        return false;
 }
 
+static int iwl_mvm_init_paging(struct iwl_mvm *mvm)
+{
+       const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode];
+       int ret;
+
+       /*
+        * Configure and operate fw paging mechanism.
+        * The driver configures the paging flow only once.
+        * The CPU2 paging image is included in the IWL_UCODE_INIT image.
+        */
+       if (!fw->paging_mem_size)
+               return 0;
+
+       /*
+        * When dma is not enabled, the driver needs to copy / write
+        * the downloaded / uploaded page to / from the smem.
+        * This gets the location of the place were the pages are
+        * stored.
+        */
+       if (!is_device_dma_capable(mvm->trans->dev)) {
+               ret = iwl_trans_get_paging_item(mvm);
+               if (ret) {
+                       IWL_ERR(mvm, "failed to get FW paging item\n");
+                       return ret;
+               }
+       }
+
+       ret = iwl_save_fw_paging(mvm, fw);
+       if (ret) {
+               IWL_ERR(mvm, "failed to save the FW paging image\n");
+               return ret;
+       }
+
+       ret = iwl_send_paging_cmd(mvm, fw);
+       if (ret) {
+               IWL_ERR(mvm, "failed to send the paging cmd\n");
+               iwl_free_fw_paging(mvm);
+               return ret;
+       }
+
+       return 0;
+}
 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
                                         enum iwl_ucode_type ucode_type)
 {
@@ -638,40 +646,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
 
        iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
 
-       /*
-        * configure and operate fw paging mechanism.
-        * driver configures the paging flow only once, CPU2 paging image
-        * included in the IWL_UCODE_INIT image.
-        */
-       if (fw->paging_mem_size) {
-               /*
-                * When dma is not enabled, the driver needs to copy / write
-                * the downloaded / uploaded page to / from the smem.
-                * This gets the location of the place were the pages are
-                * stored.
-                */
-               if (!is_device_dma_capable(mvm->trans->dev)) {
-                       ret = iwl_trans_get_paging_item(mvm);
-                       if (ret) {
-                               IWL_ERR(mvm, "failed to get FW paging item\n");
-                               return ret;
-                       }
-               }
-
-               ret = iwl_save_fw_paging(mvm, fw);
-               if (ret) {
-                       IWL_ERR(mvm, "failed to save the FW paging image\n");
-                       return ret;
-               }
-
-               ret = iwl_send_paging_cmd(mvm, fw);
-               if (ret) {
-                       IWL_ERR(mvm, "failed to send the paging cmd\n");
-                       iwl_free_fw_paging(mvm);
-                       return ret;
-               }
-       }
-
        /*
         * Note: all the queues are enabled as part of the interface
         * initialization, but in firmware restart scenarios they
@@ -829,6 +803,75 @@ out:
        return ret;
 }
 
+int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+{
+       struct iwl_notification_wait init_wait;
+       struct iwl_nvm_access_complete_cmd nvm_complete = {};
+       static const u16 init_complete[] = {
+               INIT_COMPLETE_NOTIF,
+       };
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       iwl_init_notification_wait(&mvm->notif_wait,
+                                  &init_wait,
+                                  init_complete,
+                                  ARRAY_SIZE(init_complete),
+                                  iwl_wait_init_complete,
+                                  NULL);
+
+       /* Will also start the device */
+       ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+       if (ret) {
+               IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+               goto error;
+       }
+
+       /* TODO: remove when integrating context info */
+       ret = iwl_mvm_init_paging(mvm);
+       if (ret) {
+               IWL_ERR(mvm, "Failed to init paging: %d\n",
+                       ret);
+               goto error;
+       }
+
+       /* Read the NVM only at driver load time, no need to do this twice */
+       if (read_nvm) {
+               /* Read nvm */
+               ret = iwl_nvm_init(mvm, true);
+               if (ret) {
+                       IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+                       goto error;
+               }
+       }
+
+       /* In case we read the NVM from external file, load it to the NIC */
+       if (mvm->nvm_file_name)
+               iwl_mvm_load_nvm_to_nic(mvm);
+
+       ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
+       if (WARN_ON(ret))
+               goto error;
+
+       ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
+                                               NVM_ACCESS_COMPLETE), 0,
+                                  sizeof(nvm_complete), &nvm_complete);
+       if (ret) {
+               IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
+                       ret);
+               goto error;
+       }
+
+       /* We wait for the INIT complete notification */
+       return iwl_wait_notification(&mvm->notif_wait, &init_wait,
+                                    MVM_UCODE_ALIVE_TIMEOUT);
+
+error:
+       iwl_remove_notification(&mvm->notif_wait, &init_wait);
+       return ret;
+}
+
 static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
                                          struct iwl_rx_packet *pkt)
 {
@@ -1089,23 +1132,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
        return ret;
 }
 
-int iwl_mvm_up(struct iwl_mvm *mvm)
+static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
 {
-       int ret, i;
-       struct ieee80211_channel *chan;
-       struct cfg80211_chan_def chandef;
-
-       lockdep_assert_held(&mvm->mutex);
+       int ret;
 
-       ret = iwl_trans_start_hw(mvm->trans);
-       if (ret)
-               return ret;
+       if (iwl_mvm_has_new_tx_api(mvm))
+               return iwl_run_unified_mvm_ucode(mvm, false);
 
-       /*
-        * If we haven't completed the run of the init ucode during
-        * module loading, load init ucode now
-        * (for example, if we were in RFKILL)
-        */
        ret = iwl_run_init_mvm_ucode(mvm, false);
 
        if (iwlmvm_mod_params.init_dbg)
@@ -1116,7 +1149,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                /* this can't happen */
                if (WARN_ON(ret > 0))
                        ret = -ERFKILL;
-               goto error;
+               return ret;
        }
 
        /*
@@ -1127,9 +1160,28 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        _iwl_trans_stop_device(mvm->trans, false);
        ret = _iwl_trans_start_hw(mvm->trans, false);
        if (ret)
-               goto error;
+               return ret;
 
        ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+       if (ret)
+               return ret;
+
+       return iwl_mvm_init_paging(mvm);
+}
+
+int iwl_mvm_up(struct iwl_mvm *mvm)
+{
+       int ret, i;
+       struct ieee80211_channel *chan;
+       struct cfg80211_chan_def chandef;
+
+       lockdep_assert_held(&mvm->mutex);
+
+       ret = iwl_trans_start_hw(mvm->trans);
+       if (ret)
+               return ret;
+
+       ret = iwl_mvm_load_rt_fw(mvm);
        if (ret) {
                IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
                goto error;
@@ -1156,13 +1208,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                goto error;
 
        /* Send phy db control command and then phy db calibration*/
-       ret = iwl_send_phy_db_data(mvm->phy_db);
-       if (ret)
-               goto error;
+       if (!iwl_mvm_has_new_tx_api(mvm)) {
+               ret = iwl_send_phy_db_data(mvm->phy_db);
+               if (ret)
+                       goto error;
 
-       ret = iwl_send_phy_cfg_cmd(mvm);
-       if (ret)
-               goto error;
+               ret = iwl_send_phy_cfg_cmd(mvm);
+               if (ret)
+                       goto error;
+       }
 
        /* Init RSS configuration */
        if (iwl_mvm_has_new_rx_api(mvm)) {
@@ -1348,4 +1402,9 @@ void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
                       le32_to_cpu(mfuart_notif->external_ver),
                       le32_to_cpu(mfuart_notif->status),
                       le32_to_cpu(mfuart_notif->duration));
+
+       if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
+               IWL_DEBUG_INFO(mvm,
+                              "MFUART: image size: 0x%08x\n",
+                              le32_to_cpu(mfuart_notif->image_size));
 }
index 4a0874e4073169632e45b8e09e9a3ff98343594e..99132ea16ede08e0e7ebd5f8734eeb0ab204e0fa 100644 (file)
@@ -531,38 +531,26 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        lockdep_assert_held(&mvm->mutex);
 
+       /*
+        * If DQA is supported - queues were already disabled, since in
+        * DQA-mode the queues are a property of the STA and not of the
+        * vif, and at this point the STA was already deleted
+        */
+       if (iwl_mvm_is_dqa_supported(mvm))
+               return;
+
        switch (vif->type) {
        case NL80211_IFTYPE_P2P_DEVICE:
-               if (!iwl_mvm_is_dqa_supported(mvm))
-                       iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
-                                           IWL_MVM_OFFCHANNEL_QUEUE,
-                                           IWL_MAX_TID_COUNT, 0);
-               else
-                       iwl_mvm_disable_txq(mvm,
-                                           IWL_MVM_DQA_P2P_DEVICE_QUEUE,
-                                           vif->hw_queue[0], IWL_MAX_TID_COUNT,
-                                           0);
+               iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
+                                   IWL_MVM_OFFCHANNEL_QUEUE,
+                                   IWL_MAX_TID_COUNT, 0);
 
                break;
        case NL80211_IFTYPE_AP:
                iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
                                    IWL_MAX_TID_COUNT, 0);
-
-               if (iwl_mvm_is_dqa_supported(mvm))
-                       iwl_mvm_disable_txq(mvm,
-                                           IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
-                                           vif->hw_queue[0], IWL_MAX_TID_COUNT,
-                                           0);
                /* fall through */
        default:
-               /*
-                * If DQA is supported - queues were already disabled, since in
-                * DQA-mode the queues are a property of the STA and not of the
-                * vif, and at this point the STA was already deleted
-                */
-               if (iwl_mvm_is_dqa_supported(mvm))
-                       break;
-
                for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
                        iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
                                            vif->hw_queue[ac],
@@ -991,7 +979,7 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
 }
 
 static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
-                                    struct iwl_mac_beacon_cmd_v6 *beacon_cmd,
+                                    __le32 *tim_index, __le32 *tim_size,
                                     u8 *beacon, u32 frame_size)
 {
        u32 tim_idx;
@@ -1008,8 +996,8 @@ static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
 
        /* If TIM field was found, set variables */
        if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
-               beacon_cmd->tim_idx = cpu_to_le32(tim_idx);
-               beacon_cmd->tim_size = cpu_to_le32((u32)beacon[tim_idx+1]);
+               *tim_index = cpu_to_le32(tim_idx);
+               *tim_size = cpu_to_le32((u32)beacon[tim_idx + 1]);
        } else {
                IWL_WARN(mvm, "Unable to find TIM Element in beacon\n");
        }
@@ -1043,8 +1031,9 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
        };
        union {
                struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6;
-               struct iwl_mac_beacon_cmd beacon_cmd;
+               struct iwl_mac_beacon_cmd_v7 beacon_cmd;
        } u = {};
+       struct iwl_mac_beacon_cmd beacon_cmd;
        struct ieee80211_tx_info *info;
        u32 beacon_skb_len;
        u32 rate, tx_flags;
@@ -1054,6 +1043,46 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
 
        beacon_skb_len = beacon->len;
 
+       if (fw_has_capa(&mvm->fw->ucode_capa,
+                       IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) {
+               u32 csa_offset, ecsa_offset;
+
+               csa_offset = iwl_mvm_find_ie_offset(beacon->data,
+                                                   WLAN_EID_CHANNEL_SWITCH,
+                                                   beacon_skb_len);
+               ecsa_offset =
+                       iwl_mvm_find_ie_offset(beacon->data,
+                                              WLAN_EID_EXT_CHANSWITCH_ANN,
+                                              beacon_skb_len);
+
+               if (iwl_mvm_has_new_tx_api(mvm)) {
+                       beacon_cmd.data.template_id =
+                               cpu_to_le32((u32)mvmvif->id);
+                       beacon_cmd.data.ecsa_offset = cpu_to_le32(ecsa_offset);
+                       beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset);
+                       beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon_skb_len);
+                       if (vif->type == NL80211_IFTYPE_AP)
+                               iwl_mvm_mac_ctxt_set_tim(mvm,
+                                                        &beacon_cmd.data.tim_idx,
+                                                        &beacon_cmd.data.tim_size,
+                                                        beacon->data,
+                                                        beacon_skb_len);
+                       cmd.len[0] = sizeof(beacon_cmd);
+                       cmd.data[0] = &beacon_cmd;
+                       goto send;
+
+               } else {
+                       u.beacon_cmd.data.ecsa_offset =
+                               cpu_to_le32(ecsa_offset);
+                       u.beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset);
+                       cmd.len[0] = sizeof(u.beacon_cmd);
+                       cmd.data[0] = &u;
+               }
+       } else {
+               cmd.len[0] = sizeof(u.beacon_cmd_v6);
+               cmd.data[0] = &u;
+       }
+
        /* TODO: for now the beacon template id is set to be the mac context id.
         * Might be better to handle it as another resource ... */
        u.beacon_cmd_v6.template_id = cpu_to_le32((u32)mvmvif->id);
@@ -1092,29 +1121,13 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
 
        /* Set up TX beacon command fields */
        if (vif->type == NL80211_IFTYPE_AP)
-               iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6,
+               iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6.tim_idx,
+                                        &u.beacon_cmd_v6.tim_size,
                                         beacon->data,
                                         beacon_skb_len);
 
+send:
        /* Submit command */
-
-       if (fw_has_capa(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) {
-               u.beacon_cmd.csa_offset =
-                       cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
-                                                   WLAN_EID_CHANNEL_SWITCH,
-                                                   beacon_skb_len));
-               u.beacon_cmd.ecsa_offset =
-                       cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
-                                                   WLAN_EID_EXT_CHANSWITCH_ANN,
-                                                   beacon_skb_len));
-
-               cmd.len[0] = sizeof(u.beacon_cmd);
-       } else {
-               cmd.len[0] = sizeof(u.beacon_cmd_v6);
-       }
-
-       cmd.data[0] = &u;
        cmd.dataflags[0] = 0;
        cmd.len[1] = beacon_skb_len;
        cmd.data[1] = beacon->data;
@@ -1565,7 +1578,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
        rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
        rx_status.device_timestamp = le32_to_cpu(sb->system_time);
        rx_status.band =
-               (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
+               (sb->band & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
                                NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
        rx_status.freq =
                ieee80211_channel_to_frequency(le16_to_cpu(sb->channel),
index 45122dafe9226278abed80ebcba6cd5b49b19f4c..d37b1695c64eac9096cfe7c11b3ef06764d4626a 100644 (file)
@@ -463,6 +463,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                                    IEEE80211_RADIOTAP_MCS_HAVE_STBC;
        hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
                IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
+
+       hw->radiotap_timestamp.units_pos =
+               IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US |
+               IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ;
+       /* this is the case for CCK frames, it's better (only 8) for OFDM */
+       hw->radiotap_timestamp.accuracy = 22;
+
        hw->rate_control_algorithm = "iwl-mvm-rs";
        hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
        hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
@@ -670,7 +677,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
                hw->wiphy->wowlan = &mvm->wowlan;
        }
 
-       if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
+       if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec &&
            mvm->trans->ops->d3_suspend &&
            mvm->trans->ops->d3_resume &&
            device_can_wakeup(mvm->trans->dev)) {
@@ -1203,8 +1210,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
        /* the fw is stopped, the aux sta is dead: clean up driver state */
        iwl_mvm_del_aux_sta(mvm);
 
-       iwl_free_fw_paging(mvm);
-
        /*
         * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
         * won't be called in this case).
@@ -2003,16 +2008,16 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
                if (fw_has_capa(&mvm->fw->ucode_capa,
                                IWL_UCODE_TLV_CAPA_UMAC_SCAN))
                        iwl_mvm_config_scan(mvm);
-       } else if (changes & BSS_CHANGED_BEACON_INFO) {
+       }
+
+       if (changes & BSS_CHANGED_BEACON_INFO) {
                /*
-                * We received a beacon _after_ association so
+                * We received a beacon from the associated AP so
                 * remove the session protection.
                 */
                iwl_mvm_remove_time_event(mvm, mvmvif,
                                          &mvmvif->time_event_data);
-       }
 
-       if (changes & BSS_CHANGED_BEACON_INFO) {
                iwl_mvm_sf_update(mvm, vif, false);
                WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
        }
@@ -2099,22 +2104,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
        if (ret)
                goto out_unbind;
 
-       /* enable the multicast queue, now that we have a station for it */
-       if (iwl_mvm_is_dqa_supported(mvm)) {
-               unsigned int wdg_timeout =
-                       iwl_mvm_get_wd_timeout(mvm, vif, false, false);
-               struct iwl_trans_txq_scd_cfg cfg = {
-                       .fifo = IWL_MVM_TX_FIFO_MCAST,
-                       .sta_id = mvmvif->bcast_sta.sta_id,
-                       .tid = IWL_MAX_TID_COUNT,
-                       .aggregate = false,
-                       .frame_limit = IWL_FRAME_LIMIT,
-               };
-
-               iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
-                                  &cfg, wdg_timeout);
-       }
-
        /* must be set before quota calculations */
        mvmvif->ap_ibss_active = true;
 
@@ -2547,6 +2536,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
        int ret;
 
        IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
@@ -2575,8 +2565,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
        if (old_state == IEEE80211_STA_NONE &&
            new_state == IEEE80211_STA_NOTEXIST &&
            iwl_mvm_is_dqa_supported(mvm)) {
-               struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-
                iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
                flush_work(&mvm->add_stream_wk);
 
@@ -2587,6 +2575,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
        }
 
        mutex_lock(&mvm->mutex);
+       /* track whether or not the station is associated */
+       mvm_sta->associated = new_state >= IEEE80211_STA_ASSOC;
+
        if (old_state == IEEE80211_STA_NOTEXIST &&
            new_state == IEEE80211_STA_NONE) {
                /*
@@ -2636,11 +2627,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
                        mvmvif->ap_assoc_sta_count++;
                        iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
                }
+
+               iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+                                    true);
                ret = iwl_mvm_update_sta(mvm, vif, sta);
-               if (ret == 0)
-                       iwl_mvm_rs_rate_init(mvm, sta,
-                                            mvmvif->phy_ctxt->channel->band,
-                                            true);
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTHORIZED) {
 
index 4a9cb76b7611d29fd1dd8dcfd8c00e24a9158c4f..73a216524af2984a07aa321f152df12013314e9c 100644 (file)
@@ -739,8 +739,9 @@ struct iwl_mvm {
 
        enum iwl_ucode_type cur_ucode;
        bool ucode_loaded;
+       bool hw_registered;
        bool calibrating;
-       u32 error_event_table;
+       u32 error_event_table[2];
        u32 log_event_table;
        u32 umac_error_event_table;
        bool support_umac_log;
@@ -1217,6 +1218,19 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
        return mvm->trans->cfg->use_tfh;
 }
 
+static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
+{
+       /*
+        * TODO:
+        * The issue of how to determine CDB support is still not well defined.
+        * It may be that it will be for all next HW devices and it may be per
+        * FW compilation and it may also differ between different devices.
+        * For now take a ride on the new TX API and get back to it when
+        * it is well defined.
+        */
+       return iwl_mvm_has_new_tx_api(mvm);
+}
+
 static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
 {
 #ifdef CONFIG_THERMAL
@@ -1257,6 +1271,7 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
  ******************/
 /* uCode */
 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
+int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
 
 /* Utils */
 int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
@@ -1657,8 +1672,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
  * Disable a TXQ.
  * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
  */
-void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
-                        u8 tid, u8 flags);
+int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+                       u8 tid, u8 flags);
 int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
 
 /* Return a bitmask with all the hw supported queues, except for the
@@ -1686,6 +1701,7 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
 
 static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
 {
+       iwl_free_fw_paging(mvm);
        mvm->ucode_loaded = false;
        iwl_trans_stop_device(mvm->trans);
 }
index f14aada390c53ff8613793a3c6e6a31e84bbe994..4cd72d4cdc47c4de8fac948d22c59a6848e381d0 100644 (file)
@@ -466,6 +466,13 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
        HCMD_NAME(STORED_BEACON_NTF),
 };
 
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
+       HCMD_NAME(NVM_ACCESS_COMPLETE),
+};
+
 static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
        [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
        [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
@@ -474,6 +481,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
        [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
        [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
        [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
+       [REGULATORY_AND_NVM_GROUP] =
+               HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
 };
 
 /* this forward declaration can avoid to export the function */
@@ -597,7 +606,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
                mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
        }
        mvm->sf_state = SF_UNINIT;
-       mvm->cur_ucode = IWL_UCODE_INIT;
+       if (iwl_mvm_has_new_tx_api(mvm))
+               mvm->cur_ucode = IWL_UCODE_REGULAR;
+       else
+               mvm->cur_ucode = IWL_UCODE_INIT;
        mvm->drop_bcn_ap_mode = true;
 
        mutex_init(&mvm->mutex);
@@ -720,7 +732,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        mutex_lock(&mvm->mutex);
        iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
-       err = iwl_run_init_mvm_ucode(mvm, true);
+       if (iwl_mvm_has_new_tx_api(mvm))
+               err = iwl_run_unified_mvm_ucode(mvm, true);
+       else
+               err = iwl_run_init_mvm_ucode(mvm, true);
        if (!err || !iwlmvm_mod_params.init_dbg)
                iwl_mvm_stop_device(mvm);
        iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
@@ -743,6 +758,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        err = iwl_mvm_mac_setup_register(mvm);
        if (err)
                goto out_free;
+       mvm->hw_registered = true;
 
        min_backoff = calc_min_backoff(trans, cfg);
        iwl_mvm_thermal_initialize(mvm, min_backoff);
@@ -764,6 +780,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
  out_unregister:
        ieee80211_unregister_hw(mvm->hw);
+       mvm->hw_registered = false;
        iwl_mvm_leds_exit(mvm);
        iwl_mvm_thermal_exit(mvm);
  out_free:
@@ -1192,7 +1209,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
                reprobe->dev = mvm->trans->dev;
                INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
                schedule_work(&reprobe->work);
-       } else if (mvm->cur_ucode == IWL_UCODE_REGULAR) {
+       } else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
+                  mvm->hw_registered) {
                /* don't let the transport/FW power down */
                iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
 
index af6d10c23e5aa1d8264d609376373dadeec29693..e684811f8e8b3156b4f392d1fc5624a299d22465 100644 (file)
@@ -174,6 +174,14 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
        enum ieee80211_ac_numbers ac;
        bool tid_found = false;
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       /* set advanced pm flag with no uapsd ACs to enable ps-poll */
+       if (mvmvif->dbgfs_pm.use_ps_poll) {
+               cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+               return;
+       }
+#endif
+
        for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
                if (!mvmvif->queue_params[ac].uapsd)
                        continue;
@@ -204,16 +212,6 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
                }
        }
 
-       if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-               /* set advanced pm flag with no uapsd ACs to enable ps-poll */
-               if (mvmvif->dbgfs_pm.use_ps_poll)
-                       cmd->flags |=
-                               cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
-#endif
-               return;
-       }
-
        cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
 
        if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
@@ -601,9 +599,8 @@ static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac,
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        bool *disable_ps = _data;
 
-       if (mvmvif->phy_ctxt)
-               if (mvmvif->phy_ctxt->id < MAX_PHYS)
-                       *disable_ps |= mvmvif->ps_disabled;
+       if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX)
+               *disable_ps |= mvmvif->ps_disabled;
 }
 
 static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
@@ -611,6 +608,7 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_power_vifs *power_iterator = _data;
+       bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX;
 
        switch (ieee80211_vif_type_p2p(vif)) {
        case NL80211_IFTYPE_P2P_DEVICE:
@@ -621,34 +619,30 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
                /* only a single MAC of the same type */
                WARN_ON(power_iterator->ap_vif);
                power_iterator->ap_vif = vif;
-               if (mvmvif->phy_ctxt)
-                       if (mvmvif->phy_ctxt->id < MAX_PHYS)
-                               power_iterator->ap_active = true;
+               if (active)
+                       power_iterator->ap_active = true;
                break;
 
        case NL80211_IFTYPE_MONITOR:
                /* only a single MAC of the same type */
                WARN_ON(power_iterator->monitor_vif);
                power_iterator->monitor_vif = vif;
-               if (mvmvif->phy_ctxt)
-                       if (mvmvif->phy_ctxt->id < MAX_PHYS)
-                               power_iterator->monitor_active = true;
+               if (active)
+                       power_iterator->monitor_active = true;
                break;
 
        case NL80211_IFTYPE_P2P_CLIENT:
                /* only a single MAC of the same type */
                WARN_ON(power_iterator->p2p_vif);
                power_iterator->p2p_vif = vif;
-               if (mvmvif->phy_ctxt)
-                       if (mvmvif->phy_ctxt->id < MAX_PHYS)
-                               power_iterator->p2p_active = true;
+               if (active)
+                       power_iterator->p2p_active = true;
                break;
 
        case NL80211_IFTYPE_STATION:
                power_iterator->bss_vif = vif;
-               if (mvmvif->phy_ctxt)
-                       if (mvmvif->phy_ctxt->id < MAX_PHYS)
-                               power_iterator->bss_active = true;
+               if (active)
+                       power_iterator->bss_active = true;
                break;
 
        default:
index 227c5ed9cbe6366611682c717d81336df83318d3..ce907c58ebf6d83b632313f5af9c134a5ea2e109 100644 (file)
@@ -161,9 +161,6 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                          struct rs_rate *rate,
                          const struct rs_tx_column *next_col)
 {
-       struct iwl_mvm_sta *mvmsta;
-       struct iwl_mvm_vif *mvmvif;
-
        if (!sta->ht_cap.ht_supported)
                return false;
 
@@ -176,9 +173,6 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
                return false;
 
-       mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
-
        if (mvm->nvm_data->sku_cap_mimo_disabled)
                return false;
 
@@ -978,7 +972,9 @@ static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
 
                /* Find the previous rate that is in the rate mask */
                i = index - 1;
-               for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+               if (i >= 0)
+                       mask = BIT(i);
+               for (; i >= 0; i--, mask >>= 1) {
                        if (rate_mask & mask) {
                                low = i;
                                break;
@@ -3071,7 +3067,7 @@ static void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
 
 void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
 {
-       u8 nss = 0, mcs = 0;
+       u8 nss = 0;
 
        spin_lock(&mvm->drv_stats_lock);
 
@@ -3099,11 +3095,9 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
 
        if (rate & RATE_MCS_HT_MSK) {
                mvm->drv_rx_stats.ht_frames++;
-               mcs = rate & RATE_HT_MCS_RATE_CODE_MSK;
                nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
        } else if (rate & RATE_MCS_VHT_MSK) {
                mvm->drv_rx_stats.vht_frames++;
-               mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
                nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
                       RATE_VHT_MCS_NSS_POS) + 1;
        } else {
@@ -3624,6 +3618,8 @@ int rs_pretty_print_rate(char *buf, const u32 rate)
        } else if (rate & RATE_MCS_HT_MSK) {
                type = "HT";
                mcs = rate & RATE_HT_MCS_INDEX_MSK;
+               nss = ((rate & RATE_HT_MCS_NSS_MSK)
+                      >> RATE_HT_MCS_NSS_POS) + 1;
        } else {
                type = "Unknown"; /* shouldn't happen */
        }
index 0e60e38b2acf058aef19954ae6e9ccacb2ed0f3e..20473df79c945f8f48024785d9cac5c00508f9cc 100644 (file)
@@ -497,8 +497,7 @@ struct iwl_mvm_stat_data {
        struct iwl_mvm *mvm;
        __le32 mac_id;
        u8 beacon_filter_average_energy;
-       struct mvm_statistics_general_v8 *general;
-       struct mvm_statistics_load *load;
+       void *general;
 };
 
 static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
@@ -518,10 +517,26 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
         * the notification directly.
         */
        if (data->general) {
-               mvmvif->beacon_stats.num_beacons =
-                       le32_to_cpu(data->general->beacon_counter[mvmvif->id]);
-               mvmvif->beacon_stats.avg_signal =
-                       -data->general->beacon_average_energy[mvmvif->id];
+               u16 vif_id = mvmvif->id;
+
+               if (iwl_mvm_is_cdb_supported(mvm)) {
+                       struct mvm_statistics_general_cdb *general =
+                               data->general;
+
+                       mvmvif->beacon_stats.num_beacons =
+                               le32_to_cpu(general->beacon_counter[vif_id]);
+                       mvmvif->beacon_stats.avg_signal =
+                               -general->beacon_average_energy[vif_id];
+               } else {
+                       struct mvm_statistics_general_v8 *general =
+                               data->general;
+
+                       mvmvif->beacon_stats.num_beacons =
+                               le32_to_cpu(general->beacon_counter[vif_id]);
+                       mvmvif->beacon_stats.avg_signal =
+                               -general->beacon_average_energy[vif_id];
+               }
+
        }
 
        if (mvmvif->id != id)
@@ -571,6 +586,7 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
                ieee80211_cqm_rssi_notify(
                        vif,
                        NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+                       sig,
                        GFP_KERNEL);
        } else if (sig > thold &&
                   (last_event == 0 || sig > last_event + hyst)) {
@@ -580,6 +596,7 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
                ieee80211_cqm_rssi_notify(
                        vif,
                        NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+                       sig,
                        GFP_KERNEL);
        }
 }
@@ -615,48 +632,65 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
 void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
                                  struct iwl_rx_packet *pkt)
 {
-       struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data;
+       struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
        struct iwl_mvm_stat_data data = {
                .mvm = mvm,
        };
-       int expected_size = iwl_mvm_has_new_rx_api(mvm) ? sizeof(*stats) :
-                           sizeof(struct iwl_notif_statistics_v10);
-       u32 temperature;
+       int expected_size;
+
+       if (iwl_mvm_is_cdb_supported(mvm))
+               expected_size = sizeof(*stats);
+       else if (iwl_mvm_has_new_rx_api(mvm))
+               expected_size = sizeof(struct iwl_notif_statistics_v11);
+       else
+               expected_size = sizeof(struct iwl_notif_statistics_v10);
 
        if (iwl_rx_packet_payload_len(pkt) != expected_size)
                goto invalid;
 
-       temperature = le32_to_cpu(stats->general.radio_temperature);
        data.mac_id = stats->rx.general.mac_id;
        data.beacon_filter_average_energy =
-               stats->general.beacon_filter_average_energy;
+               stats->general.common.beacon_filter_average_energy;
 
        iwl_mvm_update_rx_statistics(mvm, &stats->rx);
 
-       mvm->radio_stats.rx_time = le64_to_cpu(stats->general.rx_time);
-       mvm->radio_stats.tx_time = le64_to_cpu(stats->general.tx_time);
+       mvm->radio_stats.rx_time = le64_to_cpu(stats->general.common.rx_time);
+       mvm->radio_stats.tx_time = le64_to_cpu(stats->general.common.tx_time);
        mvm->radio_stats.on_time_rf =
-               le64_to_cpu(stats->general.on_time_rf);
+               le64_to_cpu(stats->general.common.on_time_rf);
        mvm->radio_stats.on_time_scan =
-               le64_to_cpu(stats->general.on_time_scan);
+               le64_to_cpu(stats->general.common.on_time_scan);
 
        data.general = &stats->general;
        if (iwl_mvm_has_new_rx_api(mvm)) {
                int i;
-
-               data.load = &stats->load_stats;
+               u8 *energy;
+               __le32 *bytes, *air_time;
+
+               if (!iwl_mvm_is_cdb_supported(mvm)) {
+                       struct iwl_notif_statistics_v11 *v11 =
+                               (void *)&pkt->data;
+
+                       energy = (void *)&v11->load_stats.avg_energy;
+                       bytes = (void *)&v11->load_stats.byte_count;
+                       air_time = (void *)&v11->load_stats.air_time;
+               } else {
+                       energy = (void *)&stats->load_stats.avg_energy;
+                       bytes = (void *)&stats->load_stats.byte_count;
+                       air_time = (void *)&stats->load_stats.air_time;
+               }
 
                rcu_read_lock();
                for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
                        struct iwl_mvm_sta *sta;
 
-                       if (!data.load->avg_energy[i])
+                       if (!energy[i])
                                continue;
 
                        sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
                        if (!sta)
                                continue;
-                       sta->avg_energy = data.load->avg_energy[i];
+                       sta->avg_energy = energy[i];
                }
                rcu_read_unlock();
        }
index 6c802cee900c925a7734e6a8f461a24f71c5ede6..d79e9c2a2654aed22fd8c359cb27a923cdcc5e50 100644 (file)
@@ -149,8 +149,17 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
        unsigned int headlen, fraglen, pad_len = 0;
        unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
-       if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD)
+       if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
                pad_len = 2;
+
+               /*
+                * If the device inserted padding it means that (it thought)
+                * the 802.11 header wasn't a multiple of 4 bytes long. In
+                * this case, reserve two bytes at the start of the SKB to
+                * align the payload properly in case we end up copying it.
+                */
+               skb_reserve(skb, pad_len);
+       }
        len -= pad_len;
 
        /* If frame is small enough to fit in skb->head, pull it completely.
@@ -409,7 +418,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
 
        /* ignore nssn smaller than head sn - this can happen due to timeout */
        if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
-               return;
+               goto set_timer;
 
        while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
                int index = ssn % reorder_buf->buf_size;
@@ -432,6 +441,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
        }
        reorder_buf->head_sn = nssn;
 
+set_timer:
        if (reorder_buf->num_stored && !reorder_buf->removed) {
                u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
 
index fa97432054912b53493d6e0f75000c6def479d93..0a64efa844b7565bba9ba83666fca2316096aedd 100644 (file)
@@ -197,7 +197,7 @@ static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
        int *global_cnt = data;
 
        if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
-           mvmvif->phy_ctxt->id < MAX_PHYS)
+           mvmvif->phy_ctxt->id < NUM_PHY_CTX)
                *global_cnt += 1;
 }
 
@@ -943,18 +943,92 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
        return cpu_to_le32(rates);
 }
 
-int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
+                                   struct iwl_scan_dwell *dwell,
+                                   struct iwl_mvm_scan_timing_params *timing)
+{
+       dwell->active = timing->dwell_active;
+       dwell->passive = timing->dwell_passive;
+       dwell->fragmented = timing->dwell_fragmented;
+       dwell->extended = timing->dwell_extended;
+}
+
+static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
 {
-       struct iwl_scan_config *scan_config;
        struct ieee80211_supported_band *band;
-       int num_channels =
-               mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
-               mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
-       int ret, i, j = 0, cmd_size;
+       int i, j = 0;
+
+       band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
+       for (i = 0; i < band->n_channels; i++, j++)
+               channels[j] = band->channels[i].hw_value;
+       band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
+       for (i = 0; i < band->n_channels; i++, j++)
+               channels[j] = band->channels[i].hw_value;
+}
+
+static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
+                                    u32 flags, u8 channel_flags)
+{
+       enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
+       struct iwl_scan_config *cfg = config;
+
+       cfg->flags = cpu_to_le32(flags);
+       cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+       cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+       cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
+       cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time);
+       cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
+
+       iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
+
+       memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+
+       cfg->bcast_sta_id = mvm->aux_sta.sta_id;
+       cfg->channel_flags = channel_flags;
+
+       iwl_mvm_fill_channels(mvm, cfg->channel_array);
+}
+
+static void iwl_mvm_fill_scan_config_cdb(struct iwl_mvm *mvm, void *config,
+                                        u32 flags, u8 channel_flags)
+{
+       enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
+       struct iwl_scan_config_cdb *cfg = config;
+
+       cfg->flags = cpu_to_le32(flags);
+       cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+       cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+       cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
+       cfg->out_of_channel_time[0] =
+               cpu_to_le32(scan_timing[type].max_out_time);
+       cfg->out_of_channel_time[1] =
+               cpu_to_le32(scan_timing[type].max_out_time);
+       cfg->suspend_time[0] = cpu_to_le32(scan_timing[type].suspend_time);
+       cfg->suspend_time[1] = cpu_to_le32(scan_timing[type].suspend_time);
+
+       iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell, &scan_timing[type]);
+
+       memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+
+       cfg->bcast_sta_id = mvm->aux_sta.sta_id;
+       cfg->channel_flags = channel_flags;
+
+       iwl_mvm_fill_channels(mvm, cfg->channel_array);
+}
+
+int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+{
+       void *cfg;
+       int ret, cmd_size;
        struct iwl_host_cmd cmd = {
                .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
        };
        enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
+       int num_channels =
+               mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
+               mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
+       u32 flags;
+       u8 channel_flags;
 
        if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
                return -ENOBUFS;
@@ -965,52 +1039,45 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
                return 0;
        }
 
-       cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
+       if (iwl_mvm_is_cdb_supported(mvm))
+               cmd_size = sizeof(struct iwl_scan_config_cdb);
+       else
+               cmd_size = sizeof(struct iwl_scan_config);
+       cmd_size += mvm->fw->ucode_capa.n_scan_channels;
 
-       scan_config = kzalloc(cmd_size, GFP_KERNEL);
-       if (!scan_config)
+       cfg = kzalloc(cmd_size, GFP_KERNEL);
+       if (!cfg)
                return -ENOMEM;
 
-       scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
-                                        SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
-                                        SCAN_CONFIG_FLAG_SET_TX_CHAINS |
-                                        SCAN_CONFIG_FLAG_SET_RX_CHAINS |
-                                        SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
-                                        SCAN_CONFIG_FLAG_SET_ALL_TIMES |
-                                        SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
-                                        SCAN_CONFIG_FLAG_SET_MAC_ADDR |
-                                        SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
-                                        SCAN_CONFIG_N_CHANNELS(num_channels) |
-                                        (type == IWL_SCAN_TYPE_FRAGMENTED ?
-                                         SCAN_CONFIG_FLAG_SET_FRAGMENTED :
-                                         SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED));
-       scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
-       scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
-       scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
-       scan_config->out_of_channel_time =
-               cpu_to_le32(scan_timing[type].max_out_time);
-       scan_config->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
-       scan_config->dwell_active = scan_timing[type].dwell_active;
-       scan_config->dwell_passive = scan_timing[type].dwell_passive;
-       scan_config->dwell_fragmented = scan_timing[type].dwell_fragmented;
-       scan_config->dwell_extended = scan_timing[type].dwell_extended;
-
-       memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
-
-       scan_config->bcast_sta_id = mvm->aux_sta.sta_id;
-       scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS |
-                                    IWL_CHANNEL_FLAG_ACCURATE_EBS |
-                                    IWL_CHANNEL_FLAG_EBS_ADD |
-                                    IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
-
-       band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
-       for (i = 0; i < band->n_channels; i++, j++)
-               scan_config->channel_array[j] = band->channels[i].hw_value;
-       band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
-       for (i = 0; i < band->n_channels; i++, j++)
-               scan_config->channel_array[j] = band->channels[i].hw_value;
+       flags = SCAN_CONFIG_FLAG_ACTIVATE |
+                SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
+                SCAN_CONFIG_FLAG_SET_TX_CHAINS |
+                SCAN_CONFIG_FLAG_SET_RX_CHAINS |
+                SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
+                SCAN_CONFIG_FLAG_SET_ALL_TIMES |
+                SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
+                SCAN_CONFIG_FLAG_SET_MAC_ADDR |
+                SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
+                SCAN_CONFIG_N_CHANNELS(num_channels) |
+                (type == IWL_SCAN_TYPE_FRAGMENTED ?
+                 SCAN_CONFIG_FLAG_SET_FRAGMENTED :
+                 SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
+
+       channel_flags = IWL_CHANNEL_FLAG_EBS |
+                       IWL_CHANNEL_FLAG_ACCURATE_EBS |
+                       IWL_CHANNEL_FLAG_EBS_ADD |
+                       IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
+
+       if (iwl_mvm_is_cdb_supported(mvm)) {
+               flags |= (type == IWL_SCAN_TYPE_FRAGMENTED) ?
+                        SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
+                        SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
+               iwl_mvm_fill_scan_config_cdb(mvm, cfg, flags, channel_flags);
+       } else {
+               iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
+       }
 
-       cmd.data[0] = scan_config;
+       cmd.data[0] = cfg;
        cmd.len[0] = cmd_size;
        cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
 
@@ -1020,7 +1087,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        if (!ret)
                mvm->scan_type = type;
 
-       kfree(scan_config);
+       kfree(cfg);
        return ret;
 }
 
@@ -1039,19 +1106,31 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
                                    struct iwl_scan_req_umac *cmd,
                                    struct iwl_mvm_scan_params *params)
 {
+       struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type];
+
        if (params->measurement_dwell) {
                cmd->active_dwell = params->measurement_dwell;
                cmd->passive_dwell = params->measurement_dwell;
                cmd->extended_dwell = params->measurement_dwell;
        } else {
-               cmd->active_dwell = scan_timing[params->type].dwell_active;
-               cmd->passive_dwell = scan_timing[params->type].dwell_passive;
-               cmd->extended_dwell = scan_timing[params->type].dwell_extended;
+               cmd->active_dwell = timing->dwell_active;
+               cmd->passive_dwell = timing->dwell_passive;
+               cmd->extended_dwell = timing->dwell_extended;
+       }
+       cmd->fragmented_dwell = timing->dwell_fragmented;
+
+       if (iwl_mvm_is_cdb_supported(mvm)) {
+               cmd->cdb.max_out_time[0] = cpu_to_le32(timing->max_out_time);
+               cmd->cdb.suspend_time[0] = cpu_to_le32(timing->suspend_time);
+               cmd->cdb.max_out_time[1] = cpu_to_le32(timing->max_out_time);
+               cmd->cdb.suspend_time[1] = cpu_to_le32(timing->suspend_time);
+               cmd->cdb.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+       } else {
+               cmd->no_cdb.max_out_time = cpu_to_le32(timing->max_out_time);
+               cmd->no_cdb.suspend_time = cpu_to_le32(timing->suspend_time);
+               cmd->no_cdb.scan_priority =
+                       cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
        }
-       cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
-       cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
-       cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
-       cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
 
        if (iwl_mvm_is_regular_scan(params))
                cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
@@ -1063,9 +1142,8 @@ static void
 iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
                               struct ieee80211_channel **channels,
                               int n_channels, u32 ssid_bitmap,
-                              struct iwl_scan_req_umac *cmd)
+                              struct iwl_scan_channel_cfg_umac *channel_cfg)
 {
-       struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data;
        int i;
 
        for (i = 0; i < n_channels; i++) {
@@ -1088,8 +1166,11 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
        if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
                flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
 
-       if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
+       if (params->type == IWL_SCAN_TYPE_FRAGMENTED) {
                flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
+               if (iwl_mvm_is_cdb_supported(mvm))
+                       flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
+       }
 
        if (iwl_mvm_rrm_scan_needed(mvm))
                flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
@@ -1126,11 +1207,14 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                             int type)
 {
        struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
-       struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
+       void *cmd_data = iwl_mvm_is_cdb_supported(mvm) ?
+                        (void *)&cmd->cdb.data : (void *)&cmd->no_cdb.data;
+       struct iwl_scan_req_umac_tail *sec_part = cmd_data +
                sizeof(struct iwl_scan_channel_cfg_umac) *
                        mvm->fw->ucode_capa.n_scan_channels;
        int uid, i;
        u32 ssid_bitmap = 0;
+       u8 channel_flags = 0;
        struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
 
        lockdep_assert_held(&mvm->mutex);
@@ -1157,16 +1241,23 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
 
        if (iwl_mvm_scan_use_ebs(mvm, vif))
-               cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
-                                    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
-                                    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+               channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
+                               IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+                               IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
 
-       cmd->n_channels = params->n_channels;
+       if (iwl_mvm_is_cdb_supported(mvm)) {
+               cmd->cdb.channel_flags = channel_flags;
+               cmd->cdb.n_channels = params->n_channels;
+       } else {
+               cmd->no_cdb.channel_flags = channel_flags;
+               cmd->no_cdb.n_channels = params->n_channels;
+       }
 
        iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
 
        iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
-                                      params->n_channels, ssid_bitmap, cmd);
+                                      params->n_channels, ssid_bitmap,
+                                      cmd_data);
 
        for (i = 0; i < params->n_scan_plans; i++) {
                struct cfg80211_sched_scan_plan *scan_plan =
@@ -1601,8 +1692,13 @@ static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
 
 int iwl_mvm_scan_size(struct iwl_mvm *mvm)
 {
+       int base_size = IWL_SCAN_REQ_UMAC_SIZE;
+
+       if (iwl_mvm_is_cdb_supported(mvm))
+               base_size = IWL_SCAN_REQ_UMAC_SIZE_CDB;
+
        if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
-               return sizeof(struct iwl_scan_req_umac) +
+               return base_size +
                        sizeof(struct iwl_scan_channel_cfg_umac) *
                                mvm->fw->ucode_capa.n_scan_channels +
                        sizeof(struct iwl_scan_req_umac_tail);
index 09e9e2e3ed040202f0cb40c1e326584b0fa7465a..bd1dcc863d8f338df994a9b177d498df2eef49cb 100644 (file)
@@ -202,7 +202,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
        add_sta_cmd.station_flags |=
                cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
-       add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
+       if (mvm_sta->associated)
+               add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
 
        if (sta->wme) {
                add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
@@ -454,14 +455,53 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
 
        rcu_read_unlock();
 
+       return disable_agg_tids;
+}
+
+static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
+                                      bool same_sta)
+{
+       struct iwl_mvm_sta *mvmsta;
+       u8 txq_curr_ac, sta_id, tid;
+       unsigned long disable_agg_tids = 0;
+       int ret;
+
+       lockdep_assert_held(&mvm->mutex);
+
        spin_lock_bh(&mvm->queue_info_lock);
-       /* Unmap MAC queues and TIDs from this queue */
-       mvm->queue_info[queue].hw_queue_to_mac80211 = 0;
-       mvm->queue_info[queue].hw_queue_refcount = 0;
-       mvm->queue_info[queue].tid_bitmap = 0;
+       txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
+       sta_id = mvm->queue_info[queue].ra_sta_id;
+       tid = mvm->queue_info[queue].txq_tid;
        spin_unlock_bh(&mvm->queue_info_lock);
 
-       return disable_agg_tids;
+       mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+
+       disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
+       /* Disable the queue */
+       if (disable_agg_tids)
+               iwl_mvm_invalidate_sta_queue(mvm, queue,
+                                            disable_agg_tids, false);
+
+       ret = iwl_mvm_disable_txq(mvm, queue,
+                                 mvmsta->vif->hw_queue[txq_curr_ac],
+                                 tid, 0);
+       if (ret) {
+               /* Re-mark the inactive queue as inactive */
+               spin_lock_bh(&mvm->queue_info_lock);
+               mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
+               spin_unlock_bh(&mvm->queue_info_lock);
+               IWL_ERR(mvm,
+                       "Failed to free inactive queue %d (ret=%d)\n",
+                       queue, ret);
+
+               return ret;
+       }
+
+       /* If TXQ is allocated to another STA, update removal in FW */
+       if (!same_sta)
+               iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
+
+       return 0;
 }
 
 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
@@ -652,7 +692,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
                iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
        u8 mac_queue = mvmsta->vif->hw_queue[ac];
        int queue = -1;
-       bool using_inactive_queue = false;
+       bool using_inactive_queue = false, same_sta = false;
        unsigned long disable_agg_tids = 0;
        enum iwl_mvm_agg_state queue_state;
        bool shared_queue = false;
@@ -709,6 +749,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
            mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
                mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
                using_inactive_queue = true;
+               same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
                IWL_DEBUG_TX_QUEUES(mvm,
                                    "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
                                    queue, mvmsta->sta_id, tid);
@@ -755,44 +796,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
         * first
         */
        if (using_inactive_queue) {
-               struct iwl_scd_txq_cfg_cmd cmd = {
-                       .scd_queue = queue,
-                       .action = SCD_CFG_DISABLE_QUEUE,
-               };
-               u8 txq_curr_ac;
-
-               disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
-
-               spin_lock_bh(&mvm->queue_info_lock);
-               txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
-               cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
-               cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[txq_curr_ac];
-               cmd.tid = mvm->queue_info[queue].txq_tid;
-               spin_unlock_bh(&mvm->queue_info_lock);
-
-               /* Disable the queue */
-               if (disable_agg_tids)
-                       iwl_mvm_invalidate_sta_queue(mvm, queue,
-                                                    disable_agg_tids, false);
-               iwl_trans_txq_disable(mvm->trans, queue, false);
-               ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
-                                          &cmd);
-               if (ret) {
-                       IWL_ERR(mvm,
-                               "Failed to free inactive queue %d (ret=%d)\n",
-                               queue, ret);
-
-                       /* Re-mark the inactive queue as inactive */
-                       spin_lock_bh(&mvm->queue_info_lock);
-                       mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
-                       spin_unlock_bh(&mvm->queue_info_lock);
-
+               ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
+               if (ret)
                        return ret;
-               }
-
-               /* If TXQ is allocated to another STA, update removal in FW */
-               if (cmd.sta_id != mvmsta->sta_id)
-                       iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
        }
 
        IWL_DEBUG_TX_QUEUES(mvm,
@@ -868,7 +874,6 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
                .scd_queue = queue,
                .action = SCD_CFG_UPDATE_QUEUE_TID,
        };
-       s8 sta_id;
        int tid;
        unsigned long tid_bitmap;
        int ret;
@@ -876,7 +881,6 @@ static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
        lockdep_assert_held(&mvm->mutex);
 
        spin_lock_bh(&mvm->queue_info_lock);
-       sta_id = mvm->queue_info[queue].ra_sta_id;
        tid_bitmap = mvm->queue_info[queue].tid_bitmap;
        spin_unlock_bh(&mvm->queue_info_lock);
 
@@ -1110,6 +1114,7 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        int queue;
+       bool using_inactive_queue = false, same_sta = false;
 
        /*
         * Check for inactive queues, so we don't reach a situation where we
@@ -1133,6 +1138,14 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
                spin_unlock_bh(&mvm->queue_info_lock);
                IWL_ERR(mvm, "No available queues for new station\n");
                return -ENOSPC;
+       } else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
+               /*
+                * If this queue is already allocated but inactive we'll need to
+                * first free this queue before enabling it again, we'll mark
+                * it as reserved to make sure no new traffic arrives on it
+                */
+               using_inactive_queue = true;
+               same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
        }
        mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
 
@@ -1140,6 +1153,9 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
 
        mvmsta->reserved_queue = queue;
 
+       if (using_inactive_queue)
+               iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
+
        IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
                            queue, mvmsta->sta_id);
 
@@ -1486,6 +1502,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+       u8 sta_id = mvm_sta->sta_id;
        int ret;
 
        lockdep_assert_held(&mvm->mutex);
@@ -1494,7 +1511,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
                kfree(mvm_sta->dup_data);
 
        if ((vif->type == NL80211_IFTYPE_STATION &&
-            mvmvif->ap_sta_id == mvm_sta->sta_id) ||
+            mvmvif->ap_sta_id == sta_id) ||
            iwl_mvm_is_dqa_supported(mvm)){
                ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
                if (ret)
@@ -1510,8 +1527,17 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
                ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
 
                /* If DQA is supported - the queues can be disabled now */
-               if (iwl_mvm_is_dqa_supported(mvm))
+               if (iwl_mvm_is_dqa_supported(mvm)) {
                        iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+                       /*
+                        * If pending_frames is set at this point - it must be
+                        * driver internal logic error, since queues are empty
+                        * and removed successuly.
+                        * warn on it but set it to 0 anyway to avoid station
+                        * not being removed later in the function
+                        */
+                       WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0));
+               }
 
                /* If there is a TXQ still marked as reserved - free it */
                if (iwl_mvm_is_dqa_supported(mvm) &&
@@ -1529,7 +1555,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
                        if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
                                 (*status != IWL_MVM_QUEUE_FREE),
                                 "sta_id %d reserved txq %d status %d",
-                                mvm_sta->sta_id, reserved_txq, *status)) {
+                                sta_id, reserved_txq, *status)) {
                                spin_unlock_bh(&mvm->queue_info_lock);
                                return -EINVAL;
                        }
@@ -1539,7 +1565,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
                }
 
                if (vif->type == NL80211_IFTYPE_STATION &&
-                   mvmvif->ap_sta_id == mvm_sta->sta_id) {
+                   mvmvif->ap_sta_id == sta_id) {
                        /* if associated - we can't remove the AP STA now */
                        if (vif->bss_conf.assoc)
                                return ret;
@@ -1548,7 +1574,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
                        mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
 
                        /* clear d0i3_ap_sta_id if no longer relevant */
-                       if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
+                       if (mvm->d0i3_ap_sta_id == sta_id)
                                mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
                }
        }
@@ -1557,7 +1583,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
         * This shouldn't happen - the TDLS channel switch should be canceled
         * before the STA is removed.
         */
-       if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) {
+       if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
                mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
                cancel_delayed_work(&mvm->tdls_cs.dwork);
        }
@@ -1567,21 +1593,20 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
         * calls the drain worker.
         */
        spin_lock_bh(&mvm_sta->lock);
+
        /*
         * There are frames pending on the AC queues for this station.
         * We need to wait until all the frames are drained...
         */
-       if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
-               rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
+       if (atomic_read(&mvm->pending_frames[sta_id])) {
+               rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id],
                                   ERR_PTR(-EBUSY));
                spin_unlock_bh(&mvm_sta->lock);
 
                /* disable TDLS sta queues on drain complete */
                if (sta->tdls) {
-                       mvm->tfd_drained[mvm_sta->sta_id] =
-                                                       mvm_sta->tfd_queue_msk;
-                       IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n",
-                                      mvm_sta->sta_id);
+                       mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk;
+                       IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id);
                }
 
                ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
@@ -1765,6 +1790,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
        static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
        const u8 *baddr = _baddr;
+       int ret;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -1780,19 +1806,16 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                        iwl_mvm_get_wd_timeout(mvm, vif, false, false);
                int queue;
 
-               if ((vif->type == NL80211_IFTYPE_AP) &&
-                   (mvmvif->bcast_sta.tfd_queue_msk &
-                    BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
+               if (vif->type == NL80211_IFTYPE_AP)
                        queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
-               else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
-                        (mvmvif->bcast_sta.tfd_queue_msk &
-                         BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
+               else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
                        queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
-               else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
+               else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
                        return -EINVAL;
 
                iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
                                   wdg_timeout);
+               bsta->tfd_queue_msk |= BIT(queue);
        }
 
        if (vif->type == NL80211_IFTYPE_ADHOC)
@@ -1801,8 +1824,67 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT))
                return -ENOSPC;
 
-       return iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
-                                         mvmvif->id, mvmvif->color);
+       ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
+                                        mvmvif->id, mvmvif->color);
+       if (ret)
+               return ret;
+
+       /*
+        * In AP vif type, we also need to enable the cab_queue. However, we
+        * have to enable it after the ADD_STA command is sent, otherwise the
+        * FW will throw an assert once we send the ADD_STA command (it'll
+        * detect a mismatch in the tfd_queue_msk, as we can't add the
+        * enabled-cab_queue to the mask)
+        */
+       if (iwl_mvm_is_dqa_supported(mvm) &&
+           vif->type == NL80211_IFTYPE_AP) {
+               struct iwl_trans_txq_scd_cfg cfg = {
+                       .fifo = IWL_MVM_TX_FIFO_MCAST,
+                       .sta_id = mvmvif->bcast_sta.sta_id,
+                       .tid = IWL_MAX_TID_COUNT,
+                       .aggregate = false,
+                       .frame_limit = IWL_FRAME_LIMIT,
+               };
+               unsigned int wdg_timeout =
+                       iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+
+               iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue,
+                                  0, &cfg, wdg_timeout);
+       }
+
+       return 0;
+}
+
+static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
+                                         struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+       lockdep_assert_held(&mvm->mutex);
+
+       if (vif->type == NL80211_IFTYPE_AP)
+               iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
+                                   IWL_MAX_TID_COUNT, 0);
+
+       if (mvmvif->bcast_sta.tfd_queue_msk &
+           BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)) {
+               iwl_mvm_disable_txq(mvm,
+                                   IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
+                                   vif->hw_queue[0], IWL_MAX_TID_COUNT,
+                                   0);
+               mvmvif->bcast_sta.tfd_queue_msk &=
+                       ~BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
+       }
+
+       if (mvmvif->bcast_sta.tfd_queue_msk &
+           BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)) {
+               iwl_mvm_disable_txq(mvm,
+                                   IWL_MVM_DQA_P2P_DEVICE_QUEUE,
+                                   vif->hw_queue[0], IWL_MAX_TID_COUNT,
+                                   0);
+               mvmvif->bcast_sta.tfd_queue_msk &=
+                       ~BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
+       }
 }
 
 /* Send the FW a request to remove the station from it's internal data
@@ -1814,6 +1896,9 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (iwl_mvm_is_dqa_supported(mvm))
+               iwl_mvm_free_bcast_sta_queues(mvm, vif);
+
        ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
        if (ret)
                IWL_WARN(mvm, "Failed sending remove station\n");
@@ -1827,22 +1912,16 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (!iwl_mvm_is_dqa_supported(mvm))
+       if (!iwl_mvm_is_dqa_supported(mvm)) {
                qmask = iwl_mvm_mac_get_queues_mask(vif);
 
-       if (vif->type == NL80211_IFTYPE_AP) {
                /*
                 * The firmware defines the TFD queue mask to only be relevant
                 * for *unicast* queues, so the multicast (CAB) queue shouldn't
-                * be included.
+                * be included. This only happens in NL80211_IFTYPE_AP vif type,
+                * so the next line will only have an effect there.
                 */
                qmask &= ~BIT(vif->cab_queue);
-
-               if (iwl_mvm_is_dqa_supported(mvm))
-                       qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
-       } else if (iwl_mvm_is_dqa_supported(mvm) &&
-                  vif->type == NL80211_IFTYPE_P2P_DEVICE) {
-               qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
        }
 
        return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
@@ -2247,6 +2326,13 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                        IWL_ERR(mvm, "Failed to allocate agg queue\n");
                        goto release_locks;
                }
+               /*
+                * TXQ shouldn't be in inactive mode for non-DQA, so getting
+                * an inactive queue from iwl_mvm_find_free_queue() is
+                * certainly a bug
+                */
+               WARN_ON(mvm->queue_info[txq_id].status ==
+                       IWL_MVM_QUEUE_INACTIVE);
 
                /* TXQ hasn't yet been enabled, so mark it only as reserved */
                mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
@@ -2962,6 +3048,11 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
 
        /* Get the station from the mvm local station table */
        mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
+       if (!mvm_sta) {
+               IWL_ERR(mvm, "Failed to find station\n");
+               return -EINVAL;
+       }
+       sta_id = mvm_sta->sta_id;
 
        IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
                      keyconf->keyidx, sta_id);
@@ -2989,8 +3080,6 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
                return 0;
        }
 
-       sta_id = mvm_sta->sta_id;
-
        ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
        if (ret)
                return ret;
index b45c7b9937c82dca15df67d0cb6e4c0f77301c1a..4be34f902278c8bb36521346dfcd7980976e8786 100644 (file)
@@ -437,6 +437,7 @@ struct iwl_mvm_sta {
        bool disable_tx;
        bool tlc_amsdu;
        bool sleeping;
+       bool associated;
        u8 agg_tids;
        u8 sleep_tx_count;
        u8 avg_energy;
index 66957ac12ca400d00e9f5b5dcd05b09af02fd4d9..dd2b4a30081993823634e18752187c576a656b79 100644 (file)
@@ -102,14 +102,13 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
 #define OPT_HDR(type, skb, off) \
        (type *)(skb_network_header(skb) + (off))
 
-static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
-                           struct ieee80211_hdr *hdr,
-                           struct ieee80211_tx_info *info,
-                           struct iwl_tx_cmd *tx_cmd)
+static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
+                          struct ieee80211_hdr *hdr,
+                          struct ieee80211_tx_info *info)
 {
+       u16 offload_assist = 0;
 #if IS_ENABLED(CONFIG_INET)
        u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
-       u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist);
        u8 protocol = 0;
 
        /*
@@ -117,7 +116,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
         * compute it
         */
        if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
-               return;
+               goto out;
 
        /* We do not expect to be requested to csum stuff we do not support */
        if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
@@ -125,7 +124,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
                       skb->protocol != htons(ETH_P_IPV6)),
                      "No support for requested checksum\n")) {
                skb_checksum_help(skb);
-               return;
+               goto out;
        }
 
        if (skb->protocol == htons(ETH_P_IP)) {
@@ -145,7 +144,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
                            protocol != NEXTHDR_HOP &&
                            protocol != NEXTHDR_DEST) {
                                skb_checksum_help(skb);
-                               return;
+                               goto out;
                        }
 
                        hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
@@ -159,7 +158,7 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
                WARN_ON_ONCE(1);
                skb_checksum_help(skb);
-               return;
+               goto out;
        }
 
        /* enable L4 csum */
@@ -191,8 +190,9 @@ static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
        mh_len /= 2;
        offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
 
-       tx_cmd->offload_assist = cpu_to_le16(offload_assist);
+out:
 #endif
+       return offload_assist;
 }
 
 /*
@@ -202,7 +202,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                        struct iwl_tx_cmd *tx_cmd,
                        struct ieee80211_tx_info *info, u8 sta_id)
 {
-       struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (void *)skb->data;
        __le16 fc = hdr->frame_control;
        u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
@@ -284,9 +283,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
 
        tx_cmd->tx_flags = cpu_to_le32(tx_flags);
-       /* Total # bytes to be transmitted */
-       tx_cmd->len = cpu_to_le16((u16)skb->len +
-               (uintptr_t)skb_info->driver_data[0]);
+       /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
+       tx_cmd->len = cpu_to_le16((u16)skb->len);
        tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
        tx_cmd->sta_id = sta_id;
 
@@ -295,7 +293,52 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
            !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU))))
                tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD));
 
-       iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd);
+       tx_cmd->offload_assist |=
+               cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info));
+}
+
+static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
+                              struct ieee80211_tx_info *info,
+                              struct ieee80211_sta *sta)
+{
+       int rate_idx;
+       u8 rate_plcp;
+       u32 rate_flags;
+
+       /* HT rate doesn't make sense for a non data frame */
+       WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
+                 "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n",
+                 info->control.rates[0].flags,
+                 info->control.rates[0].idx);
+
+       rate_idx = info->control.rates[0].idx;
+       /* if the rate isn't a well known legacy rate, take the lowest one */
+       if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
+               rate_idx = rate_lowest_index(
+                               &mvm->nvm_data->bands[info->band], sta);
+
+       /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+       if (info->band == NL80211_BAND_5GHZ)
+               rate_idx += IWL_FIRST_OFDM_RATE;
+
+       /* For 2.4 GHZ band, check that there is no need to remap */
+       BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+       /* Get PLCP rate for tx_cmd->rate_n_flags */
+       rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
+
+       if (info->band == NL80211_BAND_2GHZ &&
+           !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
+               rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
+       else
+               rate_flags =
+                       BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
+
+       /* Set CCK flag as needed */
+       if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+               rate_flags |= RATE_MCS_CCK_MSK;
+
+       return (u32)rate_plcp | rate_flags;
 }
 
 /*
@@ -305,10 +348,6 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
                            struct ieee80211_tx_info *info,
                            struct ieee80211_sta *sta, __le16 fc)
 {
-       u32 rate_flags;
-       int rate_idx;
-       u8 rate_plcp;
-
        /* Set retry limit on RTS packets */
        tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
 
@@ -337,46 +376,12 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
                        cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
        }
 
-       /* HT rate doesn't make sense for a non data frame */
-       WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
-                 "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame (fc:0x%x)\n",
-                 info->control.rates[0].flags,
-                 info->control.rates[0].idx,
-                 le16_to_cpu(fc));
-
-       rate_idx = info->control.rates[0].idx;
-       /* if the rate isn't a well known legacy rate, take the lowest one */
-       if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
-               rate_idx = rate_lowest_index(
-                               &mvm->nvm_data->bands[info->band], sta);
-
-       /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
-       if (info->band == NL80211_BAND_5GHZ)
-               rate_idx += IWL_FIRST_OFDM_RATE;
-
-       /* For 2.4 GHZ band, check that there is no need to remap */
-       BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
-
-       /* Get PLCP rate for tx_cmd->rate_n_flags */
-       rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
-
        mvm->mgmt_last_antenna_idx =
                iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
                                     mvm->mgmt_last_antenna_idx);
 
-       if (info->band == NL80211_BAND_2GHZ &&
-           !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
-               rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
-       else
-               rate_flags =
-                       BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
-
-       /* Set CCK flag as needed */
-       if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
-               rate_flags |= RATE_MCS_CCK_MSK;
-
        /* Set the rate in the TX cmd */
-       tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags);
+       tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
 }
 
 static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
@@ -459,7 +464,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
                      struct ieee80211_sta *sta, u8 sta_id)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
        struct iwl_device_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
 
@@ -479,12 +483,18 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
 
+       return dev_cmd;
+}
+
+static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
+                                      struct iwl_device_cmd *cmd)
+{
+       struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+
        memset(&skb_info->status, 0, sizeof(skb_info->status));
        memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
 
-       skb_info->driver_data[1] = dev_cmd;
-
-       return dev_cmd;
+       skb_info->driver_data[1] = cmd;
 }
 
 static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
@@ -496,15 +506,17 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
        switch (info->control.vif->type) {
        case NL80211_IFTYPE_AP:
                /*
-                * handle legacy hostapd as well, where station may be added
-                * only after assoc.
+                * Handle legacy hostapd as well, where station may be added
+                * only after assoc. Take care of the case where we send a
+                * deauth to a station that we don't have.
                 */
-               if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc))
+               if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) ||
+                   ieee80211_is_deauth(fc))
                        return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
                if (info->hw_queue == info->control.vif->cab_queue)
                        return info->hw_queue;
 
-               WARN_ON_ONCE(1);
+               WARN_ONCE(1, "fc=0x%02x", le16_to_cpu(fc));
                return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
        case NL80211_IFTYPE_P2P_DEVICE:
                if (ieee80211_is_mgmt(fc))
@@ -536,9 +548,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
         * queue. STATION (HS2.0) uses the auxiliary context of the FW,
         * and hence needs to be sent on the aux queue
         */
-       if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
+       if (skb_info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
            skb_info->control.vif->type == NL80211_IFTYPE_STATION)
-               IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
+               skb_info->hw_queue = mvm->aux_queue;
 
        memcpy(&info, skb->cb, sizeof(info));
 
@@ -550,9 +562,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                          info.hw_queue != info.control.vif->cab_queue)))
                return -1;
 
-       /* This holds the amsdu headers length */
-       skb_info->driver_data[0] = (void *)(uintptr_t)0;
-
        queue = info.hw_queue;
 
        /*
@@ -563,9 +572,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
         * (this is not possible for unicast packets as a TLDS discovery
         * response are sent without a station entry); otherwise use the
         * AUX station.
-        * In DQA mode, if vif is of type STATION and frames are not multicast,
-        * they should be sent from the BSS queue. For example, TDLS setup
-        * frames should be sent on this queue, as they go through the AP.
+        * In DQA mode, if vif is of type STATION and frames are not multicast
+        * or offchannel, they should be sent from the BSS queue.
+        * For example, TDLS setup frames should be sent on this queue,
+        * as they go through the AP.
         */
        sta_id = mvm->aux_sta.sta_id;
        if (info.control.vif) {
@@ -587,7 +597,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                        if (ap_sta_id != IWL_MVM_STATION_COUNT)
                                sta_id = ap_sta_id;
                } else if (iwl_mvm_is_dqa_supported(mvm) &&
-                          info.control.vif->type == NL80211_IFTYPE_STATION) {
+                          info.control.vif->type == NL80211_IFTYPE_STATION &&
+                          queue != mvm->aux_queue) {
                        queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
                }
        }
@@ -598,6 +609,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
        if (!dev_cmd)
                return -1;
 
+       /* From now on, we cannot access info->control */
+       iwl_mvm_skb_prepare_status(skb, dev_cmd);
+
        tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
        /* Copy MAC header from skb into command buffer */
@@ -634,7 +648,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
        unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
        bool ipv4 = (skb->protocol == htons(ETH_P_IP));
        u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
-       u16 amsdu_add, snap_ip_tcp, pad, i = 0;
+       u16 snap_ip_tcp, pad, i = 0;
        unsigned int dbg_max_amsdu_len;
        netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
        u8 *qc, tid, txf;
@@ -736,21 +750,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        /* This skb fits in one single A-MSDU */
        if (num_subframes * mss >= tcp_payload_len) {
-               struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
-
-               /*
-                * Compute the length of all the data added for the A-MSDU.
-                * This will be used to compute the length to write in the TX
-                * command. We have: SNAP + IP + TCP for n -1 subframes and
-                * ETH header for n subframes. Note that the original skb
-                * already had one set of SNAP / IP / TCP headers.
-                */
-               num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
-               amsdu_add = num_subframes * sizeof(struct ethhdr) +
-                       (num_subframes - 1) * (snap_ip_tcp + pad);
-               /* This holds the amsdu headers length */
-               skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
-
                __skb_queue_tail(mpdus_skb, skb);
                return 0;
        }
@@ -789,14 +788,6 @@ segment:
                        ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
 
                if (tcp_payload_len > mss) {
-                       struct ieee80211_tx_info *skb_info =
-                               IEEE80211_SKB_CB(tmp);
-
-                       num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
-                       amsdu_add = num_subframes * sizeof(struct ethhdr) +
-                               (num_subframes - 1) * (snap_ip_tcp + pad);
-                       skb_info->driver_data[0] =
-                               (void *)(uintptr_t)amsdu_add;
                        skb_shinfo(tmp)->gso_size = mss;
                } else {
                        qc = ieee80211_get_qos_ctl((void *)tmp->data);
@@ -908,7 +899,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
                goto drop;
 
        tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
-       /* From now on, we cannot access info->control */
 
        /*
         * we handle that entirely ourselves -- for uAPSD the firmware
@@ -919,6 +909,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        spin_lock(&mvmsta->lock);
 
+       /* nullfunc frames should go to the MGMT queue regardless of QOS,
+        * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default
+        * assignment of MGMT TID
+        */
        if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
                u8 *qc = NULL;
                qc = ieee80211_get_qos_ctl(hdr);
@@ -931,27 +925,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
                hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
                hdr->seq_ctrl |= cpu_to_le16(seq_number);
                is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
-       } else if (iwl_mvm_is_dqa_supported(mvm) &&
-                  (ieee80211_is_qos_nullfunc(fc) ||
-                   ieee80211_is_nullfunc(fc))) {
-               /*
-                * nullfunc frames should go to the MGMT queue regardless of QOS
-                */
-               tid = IWL_MAX_TID_COUNT;
+               if (WARN_ON_ONCE(is_ampdu &&
+                                mvmsta->tid_data[tid].state != IWL_AGG_ON))
+                       goto drop_unlock_sta;
        }
 
-       if (iwl_mvm_is_dqa_supported(mvm)) {
+       if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu)
                txq_id = mvmsta->tid_data[tid].txq_id;
-
-               if (ieee80211_is_mgmt(fc))
-                       tx_cmd->tid_tspec = IWL_TID_NON_QOS;
-       }
-
-       /* Copy MAC header from skb into command buffer */
-       memcpy(tx_cmd->hdr, hdr, hdrlen);
-
-       WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
-
        if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
                /* default to TID 0 for non-QoS packets */
                u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
@@ -959,11 +939,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
                txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]];
        }
 
-       if (is_ampdu) {
-               if (WARN_ON_ONCE(mvmsta->tid_data[tid].state != IWL_AGG_ON))
-                       goto drop_unlock_sta;
-               txq_id = mvmsta->tid_data[tid].txq_id;
-       }
+       /* Copy MAC header from skb into command buffer */
+       memcpy(tx_cmd->hdr, hdr, hdrlen);
+
+       WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
 
        /* Check if TXQ needs to be allocated or re-activated */
        if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE ||
@@ -1015,6 +994,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
        IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
                     tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
 
+       /* From now on, we cannot access info->control */
+       iwl_mvm_skb_prepare_status(skb, dev_cmd);
+
        if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
                goto drop_unlock_sta;
 
@@ -1024,7 +1006,10 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
        spin_unlock(&mvmsta->lock);
 
        /* Increase pending frames count if this isn't AMPDU */
-       if (!is_ampdu)
+       if ((iwl_mvm_is_dqa_supported(mvm) &&
+            mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON &&
+            mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) ||
+           (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu))
                atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
 
        return 0;
@@ -1040,7 +1025,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
                   struct ieee80211_sta *sta)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_tx_info info;
        struct sk_buff_head mpdus_skbs;
        unsigned int payload_len;
@@ -1054,9 +1038,6 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        memcpy(&info, skb->cb, sizeof(info));
 
-       /* This holds the amsdu headers length */
-       skb_info->driver_data[0] = (void *)(uintptr_t)0;
-
        if (!skb_is_gso(skb))
                return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
 
@@ -1295,8 +1276,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
 
                memset(&info->status, 0, sizeof(info->status));
 
-               info->flags &= ~IEEE80211_TX_CTL_AMPDU;
-
                /* inform mac80211 about what happened with the frame */
                switch (status & TX_STATUS_MSK) {
                case TX_STATUS_SUCCESS:
@@ -1319,10 +1298,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                        (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
 
                /* Single frame failure in an AMPDU queue => send BAR */
-               if (txq_id >= mvm->first_agg_queue &&
+               if (info->flags & IEEE80211_TX_CTL_AMPDU &&
                    !(info->flags & IEEE80211_TX_STAT_ACK) &&
                    !(info->flags & IEEE80211_TX_STAT_TX_FILTERED))
                        info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+               info->flags &= ~IEEE80211_TX_CTL_AMPDU;
 
                /* W/A FW bug: seq_ctl is wrong when the status isn't success */
                if (status != TX_STATUS_SUCCESS) {
@@ -1357,7 +1337,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                ieee80211_tx_status(mvm->hw, skb);
        }
 
-       if (txq_id >= mvm->first_agg_queue) {
+       if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) {
                /* If this is an aggregation queue, we use the ssn since:
                 * ssn = wifi seq_num % 256.
                 * The seq_ctl is the sequence control of the packet to which
index d04babd99b53803c2a0248d038bcad3fbfab36f0..dedea96a8e0ff7cd3c609d9e5a45976705b54451 100644 (file)
@@ -497,13 +497,11 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
        IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
 }
 
-void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
 {
        struct iwl_trans *trans = mvm->trans;
        struct iwl_error_event_table table;
-       u32 base;
 
-       base = mvm->error_event_table;
        if (mvm->cur_ucode == IWL_UCODE_INIT) {
                if (!base)
                        base = mvm->fw->init_errlog_ptr;
@@ -574,6 +572,14 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
        IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
        IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
        IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
+}
+
+void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+{
+       iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
+
+       if (mvm->error_event_table[1])
+               iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]);
 
        if (mvm->support_umac_log)
                iwl_mvm_dump_umac_error_log(mvm);
@@ -649,8 +655,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
        /* Make sure this TID isn't already enabled */
        if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) {
                spin_unlock_bh(&mvm->queue_info_lock);
-               IWL_ERR(mvm, "Trying to enable TXQ with existing TID %d\n",
-                       cfg->tid);
+               IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
+                       queue, cfg->tid);
                return;
        }
 
@@ -693,10 +699,6 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
                        .tid = cfg->tid,
                };
 
-               /* Set sta_id in the command, if it exists */
-               if (iwl_mvm_is_dqa_supported(mvm))
-                       cmd.sta_id = cfg->sta_id;
-
                iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL,
                                         wdg_timeout);
                WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd),
@@ -706,8 +708,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
        }
 }
 
-void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
-                        u8 tid, u8 flags)
+int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+                       u8 tid, u8 flags)
 {
        struct iwl_scd_txq_cfg_cmd cmd = {
                .scd_queue = queue,
@@ -720,7 +722,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
 
        if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
                spin_unlock_bh(&mvm->queue_info_lock);
-               return;
+               return 0;
        }
 
        mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
@@ -760,7 +762,7 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
        /* If the queue is still enabled - nothing left to do in this func */
        if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
                spin_unlock_bh(&mvm->queue_info_lock);
-               return;
+               return 0;
        }
 
        cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
@@ -791,6 +793,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
        if (ret)
                IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
                        queue, ret);
+
+       return ret;
 }
 
 /**
index 2f8134b2a504223856b02692eb5ff8f6a7f4acd8..ba8a81cb0e2b7118ae761f9be68e4157477c26e5 100644 (file)
@@ -533,7 +533,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
 
 /* a000 Series */
-       {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr)},
 #endif /* CONFIG_IWLMVM */
 
        {0}
@@ -673,11 +673,17 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        cfg = &iwl9000lc_2ac_cfg;
                        iwl_trans->cfg = cfg;
                }
+
+               if (cfg == &iwla000_2ac_cfg_hr &&
+                   iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) {
+                       cfg = &iwla000_2ac_cfg_jf;
+                       iwl_trans->cfg = cfg;
+               }
        }
 #endif
 
        pci_set_drvdata(pdev, iwl_trans);
-       iwl_trans->drv = iwl_drv_start(iwl_trans, cfg);
+       iwl_trans->drv = iwl_drv_start(iwl_trans);
 
        if (IS_ERR(iwl_trans->drv)) {
                ret = PTR_ERR(iwl_trans->drv);
@@ -778,13 +784,14 @@ static int iwl_pci_resume(struct device *device)
 
        /*
         * Enable rfkill interrupt (in order to keep track of
-        * the rfkill status)
+        * the rfkill status). Must be locked to avoid processing
+        * a possible rfkill interrupt between reading the state
+        * and calling iwl_trans_pcie_rf_kill() with it.
         */
+       mutex_lock(&trans_pcie->mutex);
        iwl_enable_rfkill_int(trans);
 
        hw_rfkill = iwl_is_rfkill_set(trans);
-
-       mutex_lock(&trans_pcie->mutex);
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
        mutex_unlock(&trans_pcie->mutex);
 
index cac6d99012b308803b2a0ce08cd21718a06de864..10937309641a5f097e5232acffca07665adf1316 100644 (file)
@@ -279,7 +279,7 @@ struct iwl_txq {
        bool frozen;
        u8 active;
        bool ampdu;
-       bool block;
+       int block;
        unsigned long wd_timeout;
        struct sk_buff_head overflow_q;
 
@@ -670,6 +670,8 @@ static inline u8 get_cmd_index(struct iwl_txq *q, u32 index)
 
 static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
 {
+       lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->mutex);
+
        return !(iwl_read32(trans, CSR_GP_CNTRL) &
                CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
 }
index 6fe5546dc7730f2f305321eb866379cf80478515..de94dfdf2ec9972ee7c78e695b26f04f3a022154 100644 (file)
@@ -1607,17 +1607,19 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
        if (inta & CSR_INT_BIT_RF_KILL) {
                bool hw_rfkill;
 
+               mutex_lock(&trans_pcie->mutex);
                hw_rfkill = iwl_is_rfkill_set(trans);
+               if (hw_rfkill)
+                       set_bit(STATUS_RFKILL, &trans->status);
+
                IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
                         hw_rfkill ? "disable radio" : "enable radio");
 
                isr_stats->rfkill++;
 
-               mutex_lock(&trans_pcie->mutex);
                iwl_trans_pcie_rf_kill(trans, hw_rfkill);
                mutex_unlock(&trans_pcie->mutex);
                if (hw_rfkill) {
-                       set_bit(STATUS_RFKILL, &trans->status);
                        if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
                                               &trans->status))
                                IWL_DEBUG_RF_KILL(trans,
@@ -1952,17 +1954,19 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
        if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) {
                bool hw_rfkill;
 
+               mutex_lock(&trans_pcie->mutex);
                hw_rfkill = iwl_is_rfkill_set(trans);
+               if (hw_rfkill)
+                       set_bit(STATUS_RFKILL, &trans->status);
+
                IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
                         hw_rfkill ? "disable radio" : "enable radio");
 
                isr_stats->rfkill++;
 
-               mutex_lock(&trans_pcie->mutex);
                iwl_trans_pcie_rf_kill(trans, hw_rfkill);
                mutex_unlock(&trans_pcie->mutex);
                if (hw_rfkill) {
-                       set_bit(STATUS_RFKILL, &trans->status);
                        if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
                                               &trans->status))
                                IWL_DEBUG_RF_KILL(trans,
index b10e3633df1a91ac67061502a332af0bef3c8014..7f05fc56587add6336fc4b453b2282e6ff8c4730 100644 (file)
@@ -805,7 +805,7 @@ static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
                (*first_ucode_section)++;
        }
 
-       for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+       for (i = *first_ucode_section; i < image->num_sec; i++) {
                last_read_idx = i;
 
                /*
@@ -868,19 +868,15 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
                                      int cpu,
                                      int *first_ucode_section)
 {
-       int shift_param;
        int i, ret = 0;
        u32 last_read_idx = 0;
 
-       if (cpu == 1) {
-               shift_param = 0;
+       if (cpu == 1)
                *first_ucode_section = 0;
-       } else {
-               shift_param = 16;
+       else
                (*first_ucode_section)++;
-       }
 
-       for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+       for (i = *first_ucode_section; i < image->num_sec; i++) {
                last_read_idx = i;
 
                /*
@@ -1066,6 +1062,137 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
                                               &first_ucode_section);
 }
 
+static bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans)
+{
+       bool hw_rfkill = iwl_is_rfkill_set(trans);
+
+       if (hw_rfkill)
+               set_bit(STATUS_RFKILL, &trans->status);
+       else
+               clear_bit(STATUS_RFKILL, &trans->status);
+
+       iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+
+       return hw_rfkill;
+}
+
+struct iwl_causes_list {
+       u32 cause_num;
+       u32 mask_reg;
+       u8 addr;
+};
+
+static struct iwl_causes_list causes_list[] = {
+       {MSIX_FH_INT_CAUSES_D2S_CH0_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0},
+       {MSIX_FH_INT_CAUSES_D2S_CH1_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0x1},
+       {MSIX_FH_INT_CAUSES_S2D,                CSR_MSIX_FH_INT_MASK_AD, 0x3},
+       {MSIX_FH_INT_CAUSES_FH_ERR,             CSR_MSIX_FH_INT_MASK_AD, 0x5},
+       {MSIX_HW_INT_CAUSES_REG_ALIVE,          CSR_MSIX_HW_INT_MASK_AD, 0x10},
+       {MSIX_HW_INT_CAUSES_REG_WAKEUP,         CSR_MSIX_HW_INT_MASK_AD, 0x11},
+       {MSIX_HW_INT_CAUSES_REG_CT_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x16},
+       {MSIX_HW_INT_CAUSES_REG_RF_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x17},
+       {MSIX_HW_INT_CAUSES_REG_PERIODIC,       CSR_MSIX_HW_INT_MASK_AD, 0x18},
+       {MSIX_HW_INT_CAUSES_REG_SW_ERR,         CSR_MSIX_HW_INT_MASK_AD, 0x29},
+       {MSIX_HW_INT_CAUSES_REG_SCD,            CSR_MSIX_HW_INT_MASK_AD, 0x2A},
+       {MSIX_HW_INT_CAUSES_REG_FH_TX,          CSR_MSIX_HW_INT_MASK_AD, 0x2B},
+       {MSIX_HW_INT_CAUSES_REG_HW_ERR,         CSR_MSIX_HW_INT_MASK_AD, 0x2D},
+       {MSIX_HW_INT_CAUSES_REG_HAP,            CSR_MSIX_HW_INT_MASK_AD, 0x2E},
+};
+
+static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
+       int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
+       int i;
+
+       /*
+        * Access all non RX causes and map them to the default irq.
+        * In case we are missing at least one interrupt vector,
+        * the first interrupt vector will serve non-RX and FBQ causes.
+        */
+       for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
+               iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
+               iwl_clear_bit(trans, causes_list[i].mask_reg,
+                             causes_list[i].cause_num);
+       }
+}
+
+static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       u32 offset =
+               trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+       u32 val, idx;
+
+       /*
+        * The first RX queue - fallback queue, which is designated for
+        * management frame, command responses etc, is always mapped to the
+        * first interrupt vector. The other RX queues are mapped to
+        * the other (N - 2) interrupt vectors.
+        */
+       val = BIT(MSIX_FH_INT_CAUSES_Q(0));
+       for (idx = 1; idx < trans->num_rx_queues; idx++) {
+               iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
+                          MSIX_FH_INT_CAUSES_Q(idx - offset));
+               val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
+       }
+       iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
+
+       val = MSIX_FH_INT_CAUSES_Q(0);
+       if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
+               val |= MSIX_NON_AUTO_CLEAR_CAUSE;
+       iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
+
+       if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
+               iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
+}
+
+static void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
+{
+       struct iwl_trans *trans = trans_pcie->trans;
+
+       if (!trans_pcie->msix_enabled) {
+               if (trans->cfg->mq_rx_supported &&
+                   test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+                       iwl_write_prph(trans, UREG_CHICK,
+                                      UREG_CHICK_MSI_ENABLE);
+               return;
+       }
+       /*
+        * The IVAR table needs to be configured again after reset,
+        * but if the device is disabled, we can't write to
+        * prph.
+        */
+       if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+               iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
+
+       /*
+        * Each cause from the causes list above and the RX causes is
+        * represented as a byte in the IVAR table. The first nibble
+        * represents the bound interrupt vector of the cause, the second
+        * represents no auto clear for this cause. This will be set if its
+        * interrupt vector is bound to serve other causes.
+        */
+       iwl_pcie_map_rx_causes(trans);
+
+       iwl_pcie_map_non_rx_causes(trans);
+}
+
+static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
+{
+       struct iwl_trans *trans = trans_pcie->trans;
+
+       iwl_pcie_conf_msix_hw(trans_pcie);
+
+       if (!trans_pcie->msix_enabled)
+               return;
+
+       trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
+       trans_pcie->fh_mask = trans_pcie->fh_init_mask;
+       trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
+       trans_pcie->hw_mask = trans_pcie->hw_init_mask;
+}
+
 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1118,6 +1245,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
        iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
        usleep_range(1000, 2000);
 
+       /*
+        * Upon stop, the IVAR table gets erased, so msi-x won't
+        * work. This causes a bug in RF-KILL flows, since the interrupt
+        * that enables radio won't fire on the correct irq, and the
+        * driver won't be able to handle the interrupt.
+        * Configure the IVAR table again after reset.
+        */
+       iwl_pcie_conf_msix_hw(trans_pcie);
+
        /*
         * Upon stop, the APM issues an interrupt if HW RF kill is set.
         * This is a bug in certain verions of the hardware.
@@ -1208,12 +1344,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
        mutex_lock(&trans_pcie->mutex);
 
        /* If platform's RF_KILL switch is NOT set to KILL */
-       hw_rfkill = iwl_is_rfkill_set(trans);
-       if (hw_rfkill)
-               set_bit(STATUS_RFKILL, &trans->status);
-       else
-               clear_bit(STATUS_RFKILL, &trans->status);
-       iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+       hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
        if (hw_rfkill && !run_in_rfkill) {
                ret = -ERFKILL;
                goto out;
@@ -1261,13 +1392,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
                ret = iwl_pcie_load_given_ucode(trans, fw);
 
        /* re-check RF-Kill state since we may have missed the interrupt */
-       hw_rfkill = iwl_is_rfkill_set(trans);
-       if (hw_rfkill)
-               set_bit(STATUS_RFKILL, &trans->status);
-       else
-               clear_bit(STATUS_RFKILL, &trans->status);
-
-       iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+       hw_rfkill = iwl_trans_check_hw_rf_kill(trans);
        if (hw_rfkill && !run_in_rfkill)
                ret = -ERFKILL;
 
@@ -1347,6 +1472,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
                                    enum iwl_d3_status *status,
                                    bool test,  bool reset)
 {
+       struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
        u32 val;
        int ret;
 
@@ -1359,11 +1485,15 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
        iwl_pcie_enable_rx_wake(trans, true);
 
        /*
-        * Also enables interrupts - none will happen as the device doesn't
-        * know we're waking it up, only when the opmode actually tells it
-        * after this call.
+        * Reconfigure IVAR table in case of MSIX or reset ict table in
+        * MSI mode since HW reset erased it.
+        * Also enables interrupts - none will happen as
+        * the device doesn't know we're waking it up, only when
+        * the opmode actually tells it after this call.
         */
-       iwl_pcie_reset_ict(trans);
+       iwl_pcie_conf_msix_hw(trans_pcie);
+       if (!trans_pcie->msix_enabled)
+               iwl_pcie_reset_ict(trans);
        iwl_enable_interrupts(trans);
 
        iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -1406,109 +1536,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
        return 0;
 }
 
-struct iwl_causes_list {
-       u32 cause_num;
-       u32 mask_reg;
-       u8 addr;
-};
-
-static struct iwl_causes_list causes_list[] = {
-       {MSIX_FH_INT_CAUSES_D2S_CH0_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0},
-       {MSIX_FH_INT_CAUSES_D2S_CH1_NUM,        CSR_MSIX_FH_INT_MASK_AD, 0x1},
-       {MSIX_FH_INT_CAUSES_S2D,                CSR_MSIX_FH_INT_MASK_AD, 0x3},
-       {MSIX_FH_INT_CAUSES_FH_ERR,             CSR_MSIX_FH_INT_MASK_AD, 0x5},
-       {MSIX_HW_INT_CAUSES_REG_ALIVE,          CSR_MSIX_HW_INT_MASK_AD, 0x10},
-       {MSIX_HW_INT_CAUSES_REG_WAKEUP,         CSR_MSIX_HW_INT_MASK_AD, 0x11},
-       {MSIX_HW_INT_CAUSES_REG_CT_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x16},
-       {MSIX_HW_INT_CAUSES_REG_RF_KILL,        CSR_MSIX_HW_INT_MASK_AD, 0x17},
-       {MSIX_HW_INT_CAUSES_REG_PERIODIC,       CSR_MSIX_HW_INT_MASK_AD, 0x18},
-       {MSIX_HW_INT_CAUSES_REG_SW_ERR,         CSR_MSIX_HW_INT_MASK_AD, 0x29},
-       {MSIX_HW_INT_CAUSES_REG_SCD,            CSR_MSIX_HW_INT_MASK_AD, 0x2A},
-       {MSIX_HW_INT_CAUSES_REG_FH_TX,          CSR_MSIX_HW_INT_MASK_AD, 0x2B},
-       {MSIX_HW_INT_CAUSES_REG_HW_ERR,         CSR_MSIX_HW_INT_MASK_AD, 0x2D},
-       {MSIX_HW_INT_CAUSES_REG_HAP,            CSR_MSIX_HW_INT_MASK_AD, 0x2E},
-};
-
-static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
-{
-       struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
-       int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
-       int i;
-
-       /*
-        * Access all non RX causes and map them to the default irq.
-        * In case we are missing at least one interrupt vector,
-        * the first interrupt vector will serve non-RX and FBQ causes.
-        */
-       for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
-               iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
-               iwl_clear_bit(trans, causes_list[i].mask_reg,
-                             causes_list[i].cause_num);
-       }
-}
-
-static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
-{
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       u32 offset =
-               trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
-       u32 val, idx;
-
-       /*
-        * The first RX queue - fallback queue, which is designated for
-        * management frame, command responses etc, is always mapped to the
-        * first interrupt vector. The other RX queues are mapped to
-        * the other (N - 2) interrupt vectors.
-        */
-       val = BIT(MSIX_FH_INT_CAUSES_Q(0));
-       for (idx = 1; idx < trans->num_rx_queues; idx++) {
-               iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
-                          MSIX_FH_INT_CAUSES_Q(idx - offset));
-               val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
-       }
-       iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
-
-       val = MSIX_FH_INT_CAUSES_Q(0);
-       if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
-               val |= MSIX_NON_AUTO_CLEAR_CAUSE;
-       iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
-
-       if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
-               iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
-}
-
-static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
-{
-       struct iwl_trans *trans = trans_pcie->trans;
-
-       if (!trans_pcie->msix_enabled) {
-               if (trans->cfg->mq_rx_supported)
-                       iwl_write_prph(trans, UREG_CHICK,
-                                      UREG_CHICK_MSI_ENABLE);
-               return;
-       }
-
-       iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
-
-       /*
-        * Each cause from the causes list above and the RX causes is
-        * represented as a byte in the IVAR table. The first nibble
-        * represents the bound interrupt vector of the cause, the second
-        * represents no auto clear for this cause. This will be set if its
-        * interrupt vector is bound to serve other causes.
-        */
-       iwl_pcie_map_rx_causes(trans);
-
-       iwl_pcie_map_non_rx_causes(trans);
-
-       trans_pcie->fh_init_mask =
-               ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
-       trans_pcie->fh_mask = trans_pcie->fh_init_mask;
-       trans_pcie->hw_init_mask =
-               ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
-       trans_pcie->hw_mask = trans_pcie->hw_init_mask;
-}
-
 static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
                                        struct iwl_trans *trans)
 {
@@ -1659,7 +1686,6 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       bool hw_rfkill;
        int err;
 
        lockdep_assert_held(&trans_pcie->mutex);
@@ -1677,19 +1703,15 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
        iwl_pcie_apm_init(trans);
 
        iwl_pcie_init_msix(trans_pcie);
+
        /* From now on, the op_mode will be kept updated about RF kill state */
        iwl_enable_rfkill_int(trans);
 
        /* Set is_down to false here so that...*/
        trans_pcie->is_down = false;
 
-       hw_rfkill = iwl_is_rfkill_set(trans);
-       if (hw_rfkill)
-               set_bit(STATUS_RFKILL, &trans->status);
-       else
-               clear_bit(STATUS_RFKILL, &trans->status);
-       /* ... rfkill can call stop_device and set it false if needed */
-       iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+       /* ...rfkill can call stop_device and set it false if needed */
+       iwl_trans_check_hw_rf_kill(trans);
 
        /* Make sure we sync here, because we'll need full access later */
        if (low_power)
@@ -2960,16 +2982,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                                       PCIE_LINK_STATE_CLKPM);
        }
 
-       if (cfg->mq_rx_supported)
-               addr_size = 64;
-       else
-               addr_size = 36;
-
        if (cfg->use_tfh) {
+               addr_size = 64;
                trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
                trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
-
        } else {
+               addr_size = 36;
                trans_pcie->max_tbs = IWL_NUM_OF_TBS;
                trans_pcie->tfd_size = sizeof(struct iwl_tfd);
        }
index e44e5adc2b95871ed300df60ae71b7d98a1c9f4c..911cf98681074725b449f2b281379994269312f7 100644 (file)
@@ -2096,6 +2096,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                                   struct iwl_cmd_meta *out_meta,
                                   struct iwl_device_cmd *dev_cmd, u16 tb1_len)
 {
+       struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
        struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
        struct ieee80211_hdr *hdr = (void *)skb->data;
        unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
@@ -2145,6 +2146,13 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
         */
        skb_pull(skb, hdr_len + iv_len);
 
+       /*
+        * Remove the length of all the headers that we don't actually
+        * have in the MPDU by themselves, but that we duplicate into
+        * all the different MSDUs inside the A-MSDU.
+        */
+       le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
+
        tso_start(skb, &tso);
 
        while (total_len) {
@@ -2155,7 +2163,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                unsigned int hdr_tb_len;
                dma_addr_t hdr_tb_phys;
                struct tcphdr *tcph;
-               u8 *iph;
+               u8 *iph, *subf_hdrs_start = hdr_page->pos;
 
                total_len -= data_left;
 
@@ -2216,6 +2224,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
                                       hdr_tb_len, false);
                trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
                                               hdr_tb_len);
+               /* add this subframe's headers' length to the tx_cmd */
+               le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
 
                /* prepare the start_hdr for the next subframe */
                start_hdr = hdr_page->pos;
@@ -2408,9 +2418,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                tb1_len = len;
        }
 
-       /* The first TB points to bi-directional DMA data */
-       memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
-              IWL_FIRST_TB_SIZE);
+       /*
+        * The first TB points to bi-directional DMA data, we'll
+        * memcpy the data into it later.
+        */
        iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
                               IWL_FIRST_TB_SIZE, true);
 
@@ -2434,6 +2445,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
                goto out_err;
        }
 
+       /* building the A-MSDU might have changed this data, so memcpy it now */
+       memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
+              IWL_FIRST_TB_SIZE);
+
        tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
        /* Set up entry for this TFD in Tx byte-count array */
        iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
index 9d96b7c928f7655e4a79e4b13717ce0a23357259..28cf9748900108959df907190dc34d9da3057ceb 100644 (file)
@@ -294,14 +294,6 @@ int orinoco_stop(struct net_device *dev)
 }
 EXPORT_SYMBOL(orinoco_stop);
 
-struct net_device_stats *orinoco_get_stats(struct net_device *dev)
-{
-       struct orinoco_private *priv = ndev_priv(dev);
-
-       return &priv->stats;
-}
-EXPORT_SYMBOL(orinoco_get_stats);
-
 void orinoco_set_multicast_list(struct net_device *dev)
 {
        struct orinoco_private *priv = ndev_priv(dev);
@@ -433,7 +425,7 @@ EXPORT_SYMBOL(orinoco_process_xmit_skb);
 static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct orinoco_private *priv = ndev_priv(dev);
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device_stats *stats = &dev->stats;
        struct hermes *hw = &priv->hw;
        int err = 0;
        u16 txfid = priv->txfid;
@@ -593,10 +585,7 @@ static void __orinoco_ev_alloc(struct net_device *dev, struct hermes *hw)
 
 static void __orinoco_ev_tx(struct net_device *dev, struct hermes *hw)
 {
-       struct orinoco_private *priv = ndev_priv(dev);
-       struct net_device_stats *stats = &priv->stats;
-
-       stats->tx_packets++;
+       dev->stats.tx_packets++;
 
        netif_wake_queue(dev);
 
@@ -605,8 +594,7 @@ static void __orinoco_ev_tx(struct net_device *dev, struct hermes *hw)
 
 static void __orinoco_ev_txexc(struct net_device *dev, struct hermes *hw)
 {
-       struct orinoco_private *priv = ndev_priv(dev);
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device_stats *stats = &dev->stats;
        u16 fid = hermes_read_regn(hw, TXCOMPLFID);
        u16 status;
        struct hermes_txexc_data hdr;
@@ -662,7 +650,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, struct hermes *hw)
 void orinoco_tx_timeout(struct net_device *dev)
 {
        struct orinoco_private *priv = ndev_priv(dev);
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device_stats *stats = &dev->stats;
        struct hermes *hw = &priv->hw;
 
        printk(KERN_WARNING "%s: Tx timeout! "
@@ -749,7 +737,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
        int len;
        struct sk_buff *skb;
        struct orinoco_private *priv = ndev_priv(dev);
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device_stats *stats = &dev->stats;
        struct hermes *hw = &priv->hw;
 
        len = le16_to_cpu(desc->data_len);
@@ -840,7 +828,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
 void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw)
 {
        struct orinoco_private *priv = ndev_priv(dev);
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device_stats *stats = &dev->stats;
        struct iw_statistics *wstats = &priv->wstats;
        struct sk_buff *skb = NULL;
        u16 rxfid, status;
@@ -959,7 +947,7 @@ static void orinoco_rx(struct net_device *dev,
                       struct sk_buff *skb)
 {
        struct orinoco_private *priv = ndev_priv(dev);
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device_stats *stats = &dev->stats;
        u16 status, fc;
        int length;
        struct ethhdr *hdr;
@@ -2137,7 +2125,6 @@ static const struct net_device_ops orinoco_netdev_ops = {
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_tx_timeout         = orinoco_tx_timeout,
-       .ndo_get_stats          = orinoco_get_stats,
 };
 
 /* Allocate private data.
index 5fa1c3e3713f835387353ba781cbb24ce95df6dc..430862a6a24bd97a7663e6c79175fa2b067f4c2b 100644 (file)
@@ -84,7 +84,6 @@ struct orinoco_private {
 
        /* Net device stuff */
        struct net_device *ndev;
-       struct net_device_stats stats;
        struct iw_statistics wstats;
 
        /* Hardware control variables */
@@ -206,7 +205,6 @@ int orinoco_process_xmit_skb(struct sk_buff *skb,
 /* Common ndo functions exported for reuse by orinoco_usb */
 int orinoco_open(struct net_device *dev);
 int orinoco_stop(struct net_device *dev);
-struct net_device_stats *orinoco_get_stats(struct net_device *dev);
 void orinoco_set_multicast_list(struct net_device *dev);
 int orinoco_change_mtu(struct net_device *dev, int new_mtu);
 void orinoco_tx_timeout(struct net_device *dev);
index bca6935a94db9a4bd767bfa0b7d2d7c82cd6b60c..98e1380b9917f1e4889b140784d270904eec418e 100644 (file)
@@ -403,8 +403,7 @@ static void ezusb_ctx_complete(struct request_context *ctx)
 
                if ((ctx->out_rid == EZUSB_RID_TX) && upriv->dev) {
                        struct net_device *dev = upriv->dev;
-                       struct orinoco_private *priv = ndev_priv(dev);
-                       struct net_device_stats *stats = &priv->stats;
+                       struct net_device_stats *stats = &dev->stats;
 
                        if (ctx->state != EZUSB_CTX_COMPLETE)
                                stats->tx_errors++;
@@ -1183,7 +1182,7 @@ static int ezusb_program(struct hermes *hw, const char *buf,
 static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct orinoco_private *priv = ndev_priv(dev);
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device_stats *stats = &dev->stats;
        struct ezusb_priv *upriv = priv->card;
        u8 mic[MICHAEL_MIC_LEN + 1];
        int err = 0;
@@ -1556,7 +1555,6 @@ static const struct net_device_ops ezusb_netdev_ops = {
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_tx_timeout         = orinoco_tx_timeout,
-       .ndo_get_stats          = orinoco_get_stats,
 };
 
 static int ezusb_probe(struct usb_interface *interface,
index 7ff2efadcecadc2001f9aad585be526635cdcaaa..3f97acb57e66f1d20fa73a13edbf78382ee3f38f 100644 (file)
@@ -2086,7 +2086,7 @@ static int lbs_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
  * Initialization
  */
 
-static struct cfg80211_ops lbs_cfg80211_ops = {
+static const struct cfg80211_ops lbs_cfg80211_ops = {
        .set_monitor_channel = lbs_cfg_set_monitor_channel,
        .libertas_set_mesh_channel = lbs_cfg_set_mesh_channel,
        .scan = lbs_cfg_scan,
index 301170cccfff7494b95dfa195ac03c3a11fb271e..033ff881c7518109cf0278552af223ea24307f79 100644 (file)
@@ -305,7 +305,7 @@ int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
        }
 
        lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
-       return 0;
+       return ret;
 }
 
 static int lbs_wait_for_ds_awake(struct lbs_private *priv)
index c47d6366875d06c5c0c3ceb2af29f31bd55e5158..a75013ac84d7ccdd147f68bc9b10cba667cfeb7f 100644 (file)
@@ -101,13 +101,6 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
 {
        struct txpd *local_tx_pd;
        struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
-       unsigned int pad;
-       int headroom = (priv->adapter->iface_type ==
-                       MWIFIEX_USB) ? 0 : INTF_HEADER_LEN;
-
-       pad = ((void *)skb->data - sizeof(*local_tx_pd) -
-               headroom - NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1);
-       skb_push(skb, pad);
 
        skb_push(skb, sizeof(*local_tx_pd));
 
@@ -121,12 +114,10 @@ mwifiex_11n_form_amsdu_txpd(struct mwifiex_private *priv,
        local_tx_pd->bss_num = priv->bss_num;
        local_tx_pd->bss_type = priv->bss_type;
        /* Always zero as the data is followed by struct txpd */
-       local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) +
-                                                pad);
+       local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
        local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU);
        local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
-                                                sizeof(*local_tx_pd) -
-                                                pad);
+                                                sizeof(*local_tx_pd));
 
        if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
                local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
@@ -190,7 +181,11 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
                                       ra_list_flags);
                return -1;
        }
-       skb_reserve(skb_aggr, MWIFIEX_MIN_DATA_HEADER_LEN);
+
+       /* skb_aggr->data already 64 byte align, just reserve bus interface
+        * header and txpd.
+        */
+       skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
        tx_info_aggr =  MWIFIEX_SKB_TXCB(skb_aggr);
 
        memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
index 145cc4b5103b80bc2a0b0f5c94987a2d1349b39d..1e3bd435a694534f4a450b32e8b0bd69702e5269 100644 (file)
@@ -2078,7 +2078,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
        ie_len = ie_buf[1] + sizeof(struct ieee_types_header);
 
        band = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
-       chan = __ieee80211_get_channel(priv->wdev.wiphy,
+       chan = ieee80211_get_channel(priv->wdev.wiphy,
                        ieee80211_channel_to_frequency(bss_info.bss_chan,
                                                       band));
 
index b9284b5332946baa5c23b1a5c71f5b2668851efe..ae2b69db59940b44e3cf7f6dae2075ce28c390ae 100644 (file)
@@ -114,7 +114,8 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
        if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
                p += sprintf(p, "multicast_count=\"%d\"\n",
                             netdev_mc_count(netdev));
-               p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid);
+               p += sprintf(p, "essid=\"%.*s\"\n", info.ssid.ssid_len,
+                            info.ssid.ssid);
                p += sprintf(p, "bssid=\"%pM\"\n", info.bssid);
                p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
                p += sprintf(p, "country_code = \"%s\"\n", info.country_code);
index bec300b9c2ea51bf478d6060b19b952360a36f31..188e4c3708363ea29915c478796c046d1ac9d15a 100644 (file)
@@ -27,7 +27,7 @@
 #include <linux/timer.h>
 #include <linux/ieee80211.h>
 #include <uapi/linux/if_arp.h>
-#include <net/mac80211.h>
+#include <net/cfg80211.h>
 
 #define MWIFIEX_BSS_COEX_COUNT      2
 #define MWIFIEX_MAX_BSS_NUM         (3)
index ea455948a68aaddc69a96a9454b697e66605cb12..cb6a1a81d44e213c7315c4e64a7e2051b4919a62 100644 (file)
@@ -434,14 +434,14 @@ enum mwifiex_channel_flags {
 #define HostCmd_ACT_BITWISE_SET               0x0002
 #define HostCmd_ACT_BITWISE_CLR               0x0003
 #define HostCmd_RESULT_OK                     0x0000
-
-#define HostCmd_ACT_MAC_RX_ON                 0x0001
-#define HostCmd_ACT_MAC_TX_ON                 0x0002
-#define HostCmd_ACT_MAC_WEP_ENABLE            0x0008
-#define HostCmd_ACT_MAC_ETHERNETII_ENABLE     0x0010
-#define HostCmd_ACT_MAC_PROMISCUOUS_ENABLE    0x0080
-#define HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE  0x0100
-#define HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON     0x2000
+#define HostCmd_ACT_MAC_RX_ON                 BIT(0)
+#define HostCmd_ACT_MAC_TX_ON                 BIT(1)
+#define HostCmd_ACT_MAC_WEP_ENABLE            BIT(3)
+#define HostCmd_ACT_MAC_ETHERNETII_ENABLE     BIT(4)
+#define HostCmd_ACT_MAC_PROMISCUOUS_ENABLE    BIT(7)
+#define HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE  BIT(8)
+#define HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON BIT(13)
+#define HostCmd_ACT_MAC_DYNAMIC_BW_ENABLE     BIT(16)
 
 #define HostCmd_BSS_MODE_IBSS               0x0002
 #define HostCmd_BSS_MODE_ANY                0x0003
@@ -550,6 +550,7 @@ enum mwifiex_channel_flags {
 #define EVENT_TX_DATA_PAUSE             0x00000055
 #define EVENT_EXT_SCAN_REPORT           0x00000058
 #define EVENT_RXBA_SYNC                 0x00000059
+#define EVENT_UNKNOWN_DEBUG             0x00000063
 #define EVENT_BG_SCAN_STOPPED           0x00000065
 #define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
 #define EVENT_MULTI_CHAN_INFO           0x0000006a
@@ -1084,8 +1085,7 @@ struct host_cmd_ds_802_11_mac_address {
 };
 
 struct host_cmd_ds_mac_control {
-       __le16 action;
-       __le16 reserved;
+       __le32 action;
 };
 
 struct host_cmd_ds_mac_multicast_adr {
index b36cb3fef35881112e7ff1246f9fb085e393bdb1..756948385b60166cc02f9cd66aeb0584513e67ee 100644 (file)
@@ -92,7 +92,8 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
        for (i = 0; i < ARRAY_SIZE(priv->wep_key); i++)
                memset(&priv->wep_key[i], 0, sizeof(struct mwifiex_wep_key));
        priv->wep_key_curr_index = 0;
-       priv->curr_pkt_filter = HostCmd_ACT_MAC_RX_ON | HostCmd_ACT_MAC_TX_ON |
+       priv->curr_pkt_filter = HostCmd_ACT_MAC_DYNAMIC_BW_ENABLE |
+                               HostCmd_ACT_MAC_RX_ON | HostCmd_ACT_MAC_TX_ON |
                                HostCmd_ACT_MAC_ETHERNETII_ENABLE;
 
        priv->beacon_period = 100; /* beacon interval */
@@ -408,8 +409,6 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
 static void
 mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
 {
-       int idx;
-
        if (!adapter) {
                pr_err("%s: adapter is NULL\n", __func__);
                return;
@@ -427,23 +426,6 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
        mwifiex_dbg(adapter, INFO, "info: free cmd buffer\n");
        mwifiex_free_cmd_buffer(adapter);
 
-       for (idx = 0; idx < adapter->num_mem_types; idx++) {
-               struct memory_type_mapping *entry =
-                               &adapter->mem_type_mapping_tbl[idx];
-
-               if (entry->mem_ptr) {
-                       vfree(entry->mem_ptr);
-                       entry->mem_ptr = NULL;
-               }
-               entry->mem_size = 0;
-       }
-
-       if (adapter->drv_info_dump) {
-               vfree(adapter->drv_info_dump);
-               adapter->drv_info_dump = NULL;
-               adapter->drv_info_size = 0;
-       }
-
        if (adapter->sleep_cfm)
                dev_kfree_skb_any(adapter->sleep_cfm);
 }
@@ -656,10 +638,9 @@ void mwifiex_free_priv(struct mwifiex_private *priv)
  *      - Free the adapter
  *      - Notify completion
  */
-int
+void
 mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
 {
-       int ret = -EINPROGRESS;
        struct mwifiex_private *priv;
        s32 i;
        unsigned long flags;
@@ -667,15 +648,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
 
        /* mwifiex already shutdown */
        if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY)
-               return 0;
-
-       adapter->hw_status = MWIFIEX_HW_STATUS_CLOSING;
-       /* wait for mwifiex_process to complete */
-       if (adapter->mwifiex_processing) {
-               mwifiex_dbg(adapter, WARN,
-                           "main process is still running\n");
-               return ret;
-       }
+               return;
 
        /* cancel current command */
        if (adapter->curr_cmd) {
@@ -726,11 +699,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
        mwifiex_adapter_cleanup(adapter);
 
        spin_unlock(&adapter->mwifiex_lock);
-
-       /* Notify completion */
-       ret = mwifiex_shutdown_fw_complete(adapter);
-
-       return ret;
+       adapter->hw_status = MWIFIEX_HW_STATUS_NOT_READY;
 }
 
 /*
index e5c3a8aa3929ac8e53f228f0e26671e872ac1cff..5ebca1d0cfc750969793c26ac5f37e858e897fc4 100644 (file)
@@ -248,15 +248,14 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
        if (adapter->mwifiex_processing || adapter->main_locked) {
                adapter->more_task_flag = true;
                spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
-               goto exit_main_proc;
+               return 0;
        } else {
                adapter->mwifiex_processing = true;
                spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
        }
 process_start:
        do {
-               if ((adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING) ||
-                   (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY))
+               if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY)
                        break;
 
                /* For non-USB interfaces, If we process interrupts first, it
@@ -464,9 +463,6 @@ process_start:
        adapter->mwifiex_processing = false;
        spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
 
-exit_main_proc:
-       if (adapter->hw_status == MWIFIEX_HW_STATUS_CLOSING)
-               mwifiex_shutdown_drv(adapter);
        return ret;
 }
 EXPORT_SYMBOL_GPL(mwifiex_main_process);
@@ -645,16 +641,14 @@ err_dnld_fw:
        if (adapter->if_ops.unregister_dev)
                adapter->if_ops.unregister_dev(adapter);
 
+       adapter->surprise_removed = true;
+       mwifiex_terminate_workqueue(adapter);
+
        if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
                pr_debug("info: %s: shutdown mwifiex\n", __func__);
-               adapter->init_wait_q_woken = false;
-
-               if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
-                       wait_event_interruptible(adapter->init_wait_q,
-                                                adapter->init_wait_q_woken);
+               mwifiex_shutdown_drv(adapter);
        }
-       adapter->surprise_removed = true;
-       mwifiex_terminate_workqueue(adapter);
+
        init_failed = true;
 done:
        if (adapter->cal_data) {
@@ -1032,7 +1026,7 @@ void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter)
 }
 EXPORT_SYMBOL_GPL(mwifiex_multi_chan_resync);
 
-void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
+int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
 {
        void *p;
        char drv_version[64];
@@ -1042,21 +1036,17 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
        int i, idx;
        struct netdev_queue *txq;
        struct mwifiex_debug_info *debug_info;
-
-       if (adapter->drv_info_dump) {
-               vfree(adapter->drv_info_dump);
-               adapter->drv_info_dump = NULL;
-               adapter->drv_info_size = 0;
-       }
+       void *drv_info_dump;
 
        mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n");
 
-       adapter->drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);
+       /* memory allocate here should be free in mwifiex_upload_device_dump*/
+       drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);
 
-       if (!adapter->drv_info_dump)
-               return;
+       if (!drv_info_dump)
+               return 0;
 
-       p = (char *)(adapter->drv_info_dump);
+       p = (char *)(drv_info_dump);
        p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
 
        mwifiex_drv_get_driver_version(adapter, drv_version,
@@ -1140,18 +1130,20 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
                kfree(debug_info);
        }
 
-       adapter->drv_info_size = p - adapter->drv_info_dump;
        mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n");
+       *drv_info = drv_info_dump;
+       return p - drv_info_dump;
 }
 EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump);
 
-void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter)
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
+                               int drv_info_size)
 {
        u8 idx, *dump_data, *fw_dump_ptr;
        u32 dump_len;
 
        dump_len = (strlen("========Start dump driverinfo========\n") +
-                      adapter->drv_info_size +
+                      drv_info_size +
                       strlen("\n========End dump========\n"));
 
        for (idx = 0; idx < adapter->num_mem_types; idx++) {
@@ -1181,8 +1173,8 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter)
 
        strcpy(fw_dump_ptr, "========Start dump driverinfo========\n");
        fw_dump_ptr += strlen("========Start dump driverinfo========\n");
-       memcpy(fw_dump_ptr, adapter->drv_info_dump, adapter->drv_info_size);
-       fw_dump_ptr += adapter->drv_info_size;
+       memcpy(fw_dump_ptr, drv_info, drv_info_size);
+       fw_dump_ptr += drv_info_size;
        strcpy(fw_dump_ptr, "\n========End dump========\n");
        fw_dump_ptr += strlen("\n========End dump========\n");
 
@@ -1220,18 +1212,12 @@ done:
                struct memory_type_mapping *entry =
                        &adapter->mem_type_mapping_tbl[idx];
 
-               if (entry->mem_ptr) {
-                       vfree(entry->mem_ptr);
-                       entry->mem_ptr = NULL;
-               }
+               vfree(entry->mem_ptr);
+               entry->mem_ptr = NULL;
                entry->mem_size = 0;
        }
 
-       if (adapter->drv_info_dump) {
-               vfree(adapter->drv_info_dump);
-               adapter->drv_info_dump = NULL;
-               adapter->drv_info_size = 0;
-       }
+       vfree(drv_info);
 }
 EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
 
@@ -1362,7 +1348,7 @@ static void mwifiex_main_work_queue(struct work_struct *work)
  * This function gets called during PCIe function level reset. Required
  * code is extracted from mwifiex_remove_card()
  */
-static int
+int
 mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
 {
        struct mwifiex_private *priv;
@@ -1399,11 +1385,8 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
        }
 
        mwifiex_dbg(adapter, CMD, "cmd: calling mwifiex_shutdown_drv...\n");
-       adapter->init_wait_q_woken = false;
 
-       if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
-               wait_event_interruptible(adapter->init_wait_q,
-                                        adapter->init_wait_q_woken);
+       mwifiex_shutdown_drv(adapter);
        if (adapter->if_ops.down_dev)
                adapter->if_ops.down_dev(adapter);
 
@@ -1434,24 +1417,18 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
 exit_return:
        return 0;
 }
+EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw);
 
 /* This function gets called during PCIe function level reset. Required
  * code is extracted from mwifiex_add_card()
  */
-static int
-mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct completion *fw_done,
-                 struct mwifiex_if_ops *if_ops, u8 iface_type)
+int
+mwifiex_reinit_sw(struct mwifiex_adapter *adapter)
 {
-       char fw_name[32];
-       struct pcie_service_card *card = adapter->card;
-
        mwifiex_init_lock_list(adapter);
        if (adapter->if_ops.up_dev)
                adapter->if_ops.up_dev(adapter);
 
-       adapter->iface_type = iface_type;
-       adapter->fw_done = fw_done;
-
        adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
        adapter->surprise_removed = false;
        init_waitqueue_head(&adapter->init_wait_q);
@@ -1488,18 +1465,12 @@ mwifiex_reinit_sw(struct mwifiex_adapter *adapter, struct completion *fw_done,
         * mwifiex_register_dev()
         */
        mwifiex_dbg(adapter, INFO, "%s, mwifiex_init_hw_fw()...\n", __func__);
-       strcpy(fw_name, adapter->fw_name);
-       strcpy(adapter->fw_name, PCIE8997_DEFAULT_WIFIFW_NAME);
 
-       adapter->tx_buf_size = card->pcie.tx_buf_size;
-       adapter->ext_scan = card->pcie.can_ext_scan;
        if (mwifiex_init_hw_fw(adapter, false)) {
-               strcpy(adapter->fw_name, fw_name);
                mwifiex_dbg(adapter, ERROR,
                            "%s: firmware init failed\n", __func__);
                goto err_init_fw;
        }
-       strcpy(adapter->fw_name, fw_name);
        mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
 
        complete_all(adapter->fw_done);
@@ -1509,43 +1480,22 @@ err_init_fw:
        mwifiex_dbg(adapter, ERROR, "info: %s: unregister device\n", __func__);
        if (adapter->if_ops.unregister_dev)
                adapter->if_ops.unregister_dev(adapter);
+
+err_kmalloc:
+       adapter->surprise_removed = true;
+       mwifiex_terminate_workqueue(adapter);
        if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
                mwifiex_dbg(adapter, ERROR,
                            "info: %s: shutdown mwifiex\n", __func__);
-               adapter->init_wait_q_woken = false;
-
-               if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
-                       wait_event_interruptible(adapter->init_wait_q,
-                                                adapter->init_wait_q_woken);
+               mwifiex_shutdown_drv(adapter);
        }
 
-err_kmalloc:
-       mwifiex_terminate_workqueue(adapter);
-       adapter->surprise_removed = true;
        complete_all(adapter->fw_done);
        mwifiex_dbg(adapter, INFO, "%s, error\n", __func__);
 
        return -1;
 }
-
-/* This function processes pre and post PCIe function level resets.
- * It performs software cleanup without touching PCIe specific code.
- * Also, during initialization PCIe stuff is skipped.
- */
-void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare)
-{
-       struct mwifiex_if_ops if_ops;
-
-       if (!prepare) {
-               mwifiex_reinit_sw(adapter, adapter->fw_done, &if_ops,
-                                 adapter->iface_type);
-       } else {
-               memcpy(&if_ops, &adapter->if_ops,
-                      sizeof(struct mwifiex_if_ops));
-               mwifiex_shutdown_sw(adapter);
-       }
-}
-EXPORT_SYMBOL_GPL(mwifiex_do_flr);
+EXPORT_SYMBOL_GPL(mwifiex_reinit_sw);
 
 static irqreturn_t mwifiex_irq_wakeup_handler(int irq, void *priv)
 {
@@ -1569,13 +1519,13 @@ static void mwifiex_probe_of(struct mwifiex_adapter *adapter)
        struct device *dev = adapter->dev;
 
        if (!dev->of_node)
-               return;
+               goto err_exit;
 
        adapter->dt_node = dev->of_node;
        adapter->irq_wakeup = irq_of_parse_and_map(adapter->dt_node, 0);
        if (!adapter->irq_wakeup) {
-               dev_info(dev, "fail to parse irq_wakeup from device tree\n");
-               return;
+               dev_dbg(dev, "fail to parse irq_wakeup from device tree\n");
+               goto err_exit;
        }
 
        ret = devm_request_irq(dev, adapter->irq_wakeup,
@@ -1595,7 +1545,7 @@ static void mwifiex_probe_of(struct mwifiex_adapter *adapter)
        return;
 
 err_exit:
-       adapter->irq_wakeup = 0;
+       adapter->irq_wakeup = -1;
 }
 
 /*
@@ -1681,17 +1631,13 @@ err_init_fw:
        pr_debug("info: %s: unregister device\n", __func__);
        if (adapter->if_ops.unregister_dev)
                adapter->if_ops.unregister_dev(adapter);
-       if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
-               pr_debug("info: %s: shutdown mwifiex\n", __func__);
-               adapter->init_wait_q_woken = false;
-
-               if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
-                       wait_event_interruptible(adapter->init_wait_q,
-                                                adapter->init_wait_q_woken);
-       }
 err_registerdev:
        adapter->surprise_removed = true;
        mwifiex_terminate_workqueue(adapter);
+       if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) {
+               pr_debug("info: %s: shutdown mwifiex\n", __func__);
+               mwifiex_shutdown_drv(adapter);
+       }
 err_kmalloc:
        mwifiex_free_adapter(adapter);
 
@@ -1741,11 +1687,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
 
        mwifiex_dbg(adapter, CMD,
                    "cmd: calling mwifiex_shutdown_drv...\n");
-       adapter->init_wait_q_woken = false;
 
-       if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
-               wait_event_interruptible(adapter->init_wait_q,
-                                        adapter->init_wait_q_woken);
+       mwifiex_shutdown_drv(adapter);
        mwifiex_dbg(adapter, CMD,
                    "cmd: mwifiex_shutdown_drv done\n");
        if (atomic_read(&adapter->rx_pending) ||
index 5c9bd944b6ea9a6c476aed143f306d4e17744c86..5c8297207f339559461f43cee1dc9b0585a23042 100644 (file)
@@ -248,7 +248,6 @@ enum MWIFIEX_HARDWARE_STATUS {
        MWIFIEX_HW_STATUS_INITIALIZING,
        MWIFIEX_HW_STATUS_INIT_DONE,
        MWIFIEX_HW_STATUS_RESET,
-       MWIFIEX_HW_STATUS_CLOSING,
        MWIFIEX_HW_STATUS_NOT_READY
 };
 
@@ -530,7 +529,7 @@ struct mwifiex_private {
        u8 tx_timeout_cnt;
        struct net_device *netdev;
        struct net_device_stats stats;
-       u16 curr_pkt_filter;
+       u32 curr_pkt_filter;
        u32 bss_mode;
        u32 pkt_tx_ctrl;
        u16 tx_power_level;
@@ -995,8 +994,6 @@ struct mwifiex_adapter {
        u8 key_api_major_ver, key_api_minor_ver;
        struct memory_type_mapping *mem_type_mapping_tbl;
        u8 num_mem_types;
-       void *drv_info_dump;
-       u32 drv_info_size;
        bool scan_chan_gap_enabled;
        struct sk_buff_head rx_data_q;
        bool mfg_mode;
@@ -1041,9 +1038,7 @@ int mwifiex_init_fw(struct mwifiex_adapter *adapter);
 
 int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
 
-int mwifiex_shutdown_drv(struct mwifiex_adapter *adapter);
-
-int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter);
+void mwifiex_shutdown_drv(struct mwifiex_adapter *adapter);
 
 int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
 
@@ -1644,8 +1639,9 @@ void mwifiex_hist_data_add(struct mwifiex_private *priv,
 u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
                            u8 rx_rate, u8 ht_info);
 
-void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter);
-void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter);
+int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info);
+void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
+                               int drv_info_size);
 void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
 void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
 int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action,
@@ -1670,5 +1666,6 @@ void mwifiex_debugfs_remove(void);
 void mwifiex_dev_debugfs_init(struct mwifiex_private *priv);
 void mwifiex_dev_debugfs_remove(struct mwifiex_private *priv);
 #endif
-void mwifiex_do_flr(struct mwifiex_adapter *adapter, bool prepare);
+int mwifiex_reinit_sw(struct mwifiex_adapter *adapter);
+int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter);
 #endif /* !_MWIFIEX_MAIN_H_ */
index 4db07da81d8daaa862a64f660d710cfd323c3fd0..a0d918094889df6cd9de14046b773d6112b2006b 100644 (file)
@@ -31,8 +31,6 @@
 #define PCIE_VERSION   "1.0"
 #define DRV_NAME        "Marvell mwifiex PCIe"
 
-static u8 user_rmmod;
-
 static struct mwifiex_if_ops pcie_ops;
 
 static const struct of_device_id mwifiex_pcie_of_match_table[] = {
@@ -51,6 +49,8 @@ static int mwifiex_pcie_probe_of(struct device *dev)
        return 0;
 }
 
+static void mwifiex_pcie_work(struct work_struct *work);
+
 static int
 mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
                       size_t size, int flags)
@@ -78,6 +78,42 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
        pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
 }
 
+/*
+ * This function writes data into PCIE card register.
+ */
+static int mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data)
+{
+       struct pcie_service_card *card = adapter->card;
+
+       iowrite32(data, card->pci_mmap1 + reg);
+
+       return 0;
+}
+
+/* This function reads data from PCIE card register.
+ */
+static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data)
+{
+       struct pcie_service_card *card = adapter->card;
+
+       *data = ioread32(card->pci_mmap1 + reg);
+       if (*data == 0xffffffff)
+               return 0xffffffff;
+
+       return 0;
+}
+
+/* This function reads u8 data from PCIE card register. */
+static int mwifiex_read_reg_byte(struct mwifiex_adapter *adapter,
+                                int reg, u8 *data)
+{
+       struct pcie_service_card *card = adapter->card;
+
+       *data = ioread8(card->pci_mmap1 + reg);
+
+       return 0;
+}
+
 /*
  * This function reads sleep cookie and checks if FW is ready
  */
@@ -219,6 +255,7 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
                card->pcie.mem_type_mapping_tbl = data->mem_type_mapping_tbl;
                card->pcie.num_mem_types = data->num_mem_types;
                card->pcie.can_ext_scan = data->can_ext_scan;
+               INIT_WORK(&card->work, mwifiex_pcie_work);
        }
 
        /* device tree node parsing and platform specific configuration*/
@@ -245,6 +282,9 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
        struct pcie_service_card *card;
        struct mwifiex_adapter *adapter;
        struct mwifiex_private *priv;
+       const struct mwifiex_pcie_card_reg *reg;
+       u32 fw_status;
+       int ret;
 
        card = pci_get_drvdata(pdev);
 
@@ -254,7 +294,15 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
        if (!adapter || !adapter->priv_num)
                return;
 
-       if (user_rmmod && !adapter->mfg_mode) {
+       cancel_work_sync(&card->work);
+
+       reg = card->pcie.reg;
+       if (reg)
+               ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
+       else
+               fw_status = -1;
+
+       if (fw_status == FIRMWARE_READY_PCIE && !adapter->mfg_mode) {
                mwifiex_deauthenticate_all(adapter);
 
                priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
@@ -269,7 +317,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
 
 static void mwifiex_pcie_shutdown(struct pci_dev *pdev)
 {
-       user_rmmod = 1;
        mwifiex_pcie_remove(pdev);
 
        return;
@@ -330,7 +377,7 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
                 * Cleanup all software without cleaning anything related to
                 * PCIe and HW.
                 */
-               mwifiex_do_flr(adapter, prepare);
+               mwifiex_shutdown_sw(adapter);
                adapter->surprise_removed = true;
        } else {
                /* Kernel stores and restores PCIe function context before and
@@ -338,7 +385,7 @@ static void mwifiex_pcie_reset_notify(struct pci_dev *pdev, bool prepare)
                 * and firmware including firmware redownload
                 */
                adapter->surprise_removed = false;
-               mwifiex_do_flr(adapter, prepare);
+               mwifiex_reinit_sw(adapter);
        }
        mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__);
 }
@@ -368,43 +415,6 @@ static struct pci_driver __refdata mwifiex_pcie = {
        .err_handler = mwifiex_pcie_err_handler,
 };
 
-/*
- * This function writes data into PCIE card register.
- */
-static int mwifiex_write_reg(struct mwifiex_adapter *adapter, int reg, u32 data)
-{
-       struct pcie_service_card *card = adapter->card;
-
-       iowrite32(data, card->pci_mmap1 + reg);
-
-       return 0;
-}
-
-/*
- * This function reads data from PCIE card register.
- */
-static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data)
-{
-       struct pcie_service_card *card = adapter->card;
-
-       *data = ioread32(card->pci_mmap1 + reg);
-       if (*data == 0xffffffff)
-               return 0xffffffff;
-
-       return 0;
-}
-
-/* This function reads u8 data from PCIE card register. */
-static int mwifiex_read_reg_byte(struct mwifiex_adapter *adapter,
-                                int reg, u8 *data)
-{
-       struct pcie_service_card *card = adapter->card;
-
-       *data = ioread8(card->pci_mmap1 + reg);
-
-       return 0;
-}
-
 /*
  * This function adds delay loop to ensure FW is awake before proceeding.
  */
@@ -429,16 +439,25 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
        struct pcie_service_card *card = adapter->card;
        u8 *buffer;
        u32 sleep_cookie, count;
+       struct sk_buff *cmdrsp = card->cmdrsp_buf;
 
        for (count = 0; count < max_delay_loop_cnt; count++) {
-               buffer = card->cmdrsp_buf->data - INTF_HEADER_LEN;
-               sleep_cookie = *(u32 *)buffer;
+               pci_dma_sync_single_for_cpu(card->dev,
+                                           MWIFIEX_SKB_DMA_ADDR(cmdrsp),
+                                           sizeof(sleep_cookie),
+                                           PCI_DMA_FROMDEVICE);
+               buffer = cmdrsp->data;
+               sleep_cookie = READ_ONCE(*(u32 *)buffer);
 
                if (sleep_cookie == MWIFIEX_DEF_SLEEP_COOKIE) {
                        mwifiex_dbg(adapter, INFO,
                                    "sleep cookie found at count %d\n", count);
                        break;
                }
+               pci_dma_sync_single_for_device(card->dev,
+                                              MWIFIEX_SKB_DMA_ADDR(cmdrsp),
+                                              sizeof(sleep_cookie),
+                                              PCI_DMA_FROMDEVICE);
                usleep_range(20, 30);
        }
 
@@ -450,7 +469,6 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
 /* This function wakes up the card by reading fw_status register. */
 static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
 {
-       u32 fw_status;
        struct pcie_service_card *card = adapter->card;
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
@@ -460,10 +478,10 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
        if (reg->sleep_cookie)
                mwifiex_pcie_dev_wakeup_delay(adapter);
 
-       /* Reading fw_status register will wakeup device */
-       if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) {
+       /* Accessing fw_status register will wakeup device */
+       if (mwifiex_write_reg(adapter, reg->fw_status, FIRMWARE_READY_PCIE)) {
                mwifiex_dbg(adapter, ERROR,
-                           "Reading fw_status register failed\n");
+                           "Writing fw_status register failed\n");
                return -1;
        }
 
@@ -1681,7 +1699,13 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
        mwifiex_dbg(adapter, CMD,
                    "info: Rx CMD Response\n");
 
-       mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
+       if (adapter->curr_cmd)
+               mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
+       else
+               pci_dma_sync_single_for_cpu(card->dev,
+                                           MWIFIEX_SKB_DMA_ADDR(skb),
+                                           MWIFIEX_UPLD_SIZE,
+                                           PCI_DMA_FROMDEVICE);
 
        /* Unmap the command as a response has been received. */
        if (card->cmd_buf) {
@@ -1694,10 +1718,13 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
        rx_len = le16_to_cpu(pkt_len);
        skb_put(skb, MWIFIEX_UPLD_SIZE - skb->len);
        skb_trim(skb, rx_len);
-       skb_pull(skb, INTF_HEADER_LEN);
 
        if (!adapter->curr_cmd) {
                if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
+                       pci_dma_sync_single_for_device(card->dev,
+                                               MWIFIEX_SKB_DMA_ADDR(skb),
+                                               MWIFIEX_SLEEP_COOKIE_SIZE,
+                                               PCI_DMA_FROMDEVICE);
                        if (mwifiex_write_reg(adapter,
                                              PCIE_CPU_INT_EVENT,
                                              CPU_INTR_SLEEP_CFM_DONE)) {
@@ -1707,6 +1734,9 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
                        }
                        mwifiex_delay_for_sleep_cookie(adapter,
                                                       MWIFIEX_MAX_DELAY_COUNT);
+                       mwifiex_unmap_pci_memory(adapter, skb,
+                                                PCI_DMA_FROMDEVICE);
+                       skb_pull(skb, INTF_HEADER_LEN);
                        while (reg->sleep_cookie && (count++ < 10) &&
                               mwifiex_pcie_ok_to_access_hw(adapter))
                                usleep_range(50, 60);
@@ -1724,6 +1754,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
                                           PCI_DMA_FROMDEVICE))
                        return -1;
        } else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
+               skb_pull(skb, INTF_HEADER_LEN);
                adapter->curr_cmd->resp_skb = skb;
                adapter->cmd_resp_received = true;
                /* Take the pointer and set it to CMD node and will
@@ -2325,79 +2356,41 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter)
                        }
                }
        }
-       while (pcie_ireg & HOST_INTR_MASK) {
-               if (pcie_ireg & HOST_INTR_DNLD_DONE) {
-                       pcie_ireg &= ~HOST_INTR_DNLD_DONE;
-                       mwifiex_dbg(adapter, INTR,
-                                   "info: TX DNLD Done\n");
-                       ret = mwifiex_pcie_send_data_complete(adapter);
-                       if (ret)
-                               return ret;
-               }
-               if (pcie_ireg & HOST_INTR_UPLD_RDY) {
-                       pcie_ireg &= ~HOST_INTR_UPLD_RDY;
-                       mwifiex_dbg(adapter, INTR,
-                                   "info: Rx DATA\n");
-                       ret = mwifiex_pcie_process_recv_data(adapter);
-                       if (ret)
-                               return ret;
-               }
-               if (pcie_ireg & HOST_INTR_EVENT_RDY) {
-                       pcie_ireg &= ~HOST_INTR_EVENT_RDY;
-                       mwifiex_dbg(adapter, INTR,
-                                   "info: Rx EVENT\n");
-                       ret = mwifiex_pcie_process_event_ready(adapter);
-                       if (ret)
-                               return ret;
-               }
-
-               if (pcie_ireg & HOST_INTR_CMD_DONE) {
-                       pcie_ireg &= ~HOST_INTR_CMD_DONE;
-                       if (adapter->cmd_sent) {
-                               mwifiex_dbg(adapter, INTR,
-                                           "info: CMD sent Interrupt\n");
-                               adapter->cmd_sent = false;
-                       }
-                       /* Handle command response */
-                       ret = mwifiex_pcie_process_cmd_complete(adapter);
-                       if (ret)
-                               return ret;
-                       if (adapter->hs_activated)
-                               return ret;
-               }
-
-               if (card->msi_enable) {
-                       spin_lock_irqsave(&adapter->int_lock, flags);
-                       adapter->int_status = 0;
-                       spin_unlock_irqrestore(&adapter->int_lock, flags);
-               }
-
-               if (mwifiex_pcie_ok_to_access_hw(adapter)) {
-                       if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS,
-                                            &pcie_ireg)) {
-                               mwifiex_dbg(adapter, ERROR,
-                                           "Read register failed\n");
-                               return -1;
-                       }
-
-                       if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) {
-                               if (mwifiex_write_reg(adapter,
-                                                     PCIE_HOST_INT_STATUS,
-                                                     ~pcie_ireg)) {
-                                       mwifiex_dbg(adapter, ERROR,
-                                                   "Write register failed\n");
-                                       return -1;
-                               }
-                       }
 
+       if (pcie_ireg & HOST_INTR_DNLD_DONE) {
+               pcie_ireg &= ~HOST_INTR_DNLD_DONE;
+               mwifiex_dbg(adapter, INTR, "info: TX DNLD Done\n");
+               ret = mwifiex_pcie_send_data_complete(adapter);
+               if (ret)
+                       return ret;
+       }
+       if (pcie_ireg & HOST_INTR_UPLD_RDY) {
+               pcie_ireg &= ~HOST_INTR_UPLD_RDY;
+               mwifiex_dbg(adapter, INTR, "info: Rx DATA\n");
+               ret = mwifiex_pcie_process_recv_data(adapter);
+               if (ret)
+                       return ret;
+       }
+       if (pcie_ireg & HOST_INTR_EVENT_RDY) {
+               pcie_ireg &= ~HOST_INTR_EVENT_RDY;
+               mwifiex_dbg(adapter, INTR, "info: Rx EVENT\n");
+               ret = mwifiex_pcie_process_event_ready(adapter);
+               if (ret)
+                       return ret;
+       }
+       if (pcie_ireg & HOST_INTR_CMD_DONE) {
+               pcie_ireg &= ~HOST_INTR_CMD_DONE;
+               if (adapter->cmd_sent) {
+                       mwifiex_dbg(adapter, INTR,
+                                   "info: CMD sent Interrupt\n");
+                       adapter->cmd_sent = false;
                }
-               if (!card->msi_enable) {
-                       spin_lock_irqsave(&adapter->int_lock, flags);
-                       pcie_ireg |= adapter->int_status;
-                       adapter->int_status = 0;
-                       spin_unlock_irqrestore(&adapter->int_lock, flags);
-               }
+               /* Handle command response */
+               ret = mwifiex_pcie_process_cmd_complete(adapter);
+               if (ret)
+                       return ret;
        }
+
        mwifiex_dbg(adapter, INTR,
                    "info: cmd_sent=%d data_sent=%d\n",
                    adapter->cmd_sent, adapter->data_sent);
@@ -2715,31 +2708,35 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
 
 static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
 {
-       mwifiex_drv_info_dump(adapter);
+       int drv_info_size;
+       void *drv_info;
+
+       drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info);
        mwifiex_pcie_fw_dump(adapter);
-       mwifiex_upload_device_dump(adapter);
+       mwifiex_upload_device_dump(adapter, drv_info, drv_info_size);
 }
 
-static unsigned long iface_work_flags;
-static struct mwifiex_adapter *save_adapter;
 static void mwifiex_pcie_work(struct work_struct *work)
 {
+       struct pcie_service_card *card =
+               container_of(work, struct pcie_service_card, work);
+
        if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
-                              &iface_work_flags))
-               mwifiex_pcie_device_dump_work(save_adapter);
+                              &card->work_flags))
+               mwifiex_pcie_device_dump_work(card->adapter);
 }
 
-static DECLARE_WORK(pcie_work, mwifiex_pcie_work);
 /* This function dumps FW information */
 static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
 {
-       save_adapter = adapter;
-       if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags))
+       struct pcie_service_card *card = adapter->card;
+
+       if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags))
                return;
 
-       set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
+       set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
 
-       schedule_work(&pcie_work);
+       schedule_work(&card->work);
 }
 
 /*
@@ -2752,7 +2749,7 @@ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter)
  *      - Allocate command response ring buffer
  *      - Allocate sleep cookie buffer
  */
-static int mwifiex_pcie_init(struct mwifiex_adapter *adapter)
+static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
        int ret;
@@ -2861,13 +2858,16 @@ err_enable_dev:
  *      - Command response ring buffer
  *      - Sleep cookie buffer
  */
-static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
+static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
        struct pci_dev *pdev = card->dev;
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+       int ret;
+       u32 fw_status;
 
-       if (user_rmmod) {
+       ret = mwifiex_read_reg(adapter, reg->fw_status, &fw_status);
+       if (fw_status == FIRMWARE_READY_PCIE) {
                mwifiex_dbg(adapter, INFO,
                            "Clearing driver ready signature\n");
                if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000))
@@ -3058,7 +3058,7 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
  *      - Allocate event BD ring buffers
  *      - Allocate command response ring buffer
  *      - Allocate sleep cookie buffer
- * Part of mwifiex_pcie_init(), not reset the PCIE registers
+ * Part of mwifiex_init_pcie(), not reset the PCIE registers
  */
 static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
 {
@@ -3067,6 +3067,17 @@ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter)
        struct pci_dev *pdev = card->dev;
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
+       /* Bluetooth is not on pcie interface. Download Wifi only firmware
+        * during pcie FLR, so that bluetooth part of firmware which is
+        * already running doesn't get affected.
+        */
+       strcpy(adapter->fw_name, PCIE8997_DEFAULT_WIFIFW_NAME);
+
+       /* tx_buf_size might be changed to 3584 by firmware during
+        * data transfer, we should reset it to default size.
+        */
+       adapter->tx_buf_size = card->pcie.tx_buf_size;
+
        card->cmdrsp_buf = NULL;
        ret = mwifiex_pcie_create_txbd_ring(adapter);
        if (ret) {
@@ -3128,7 +3139,6 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
                mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n");
 
        adapter->seq_num = 0;
-       adapter->tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K;
 
        if (reg->sleep_cookie)
                mwifiex_pcie_delete_sleep_cookie_buf(adapter);
@@ -3141,8 +3151,8 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter)
 }
 
 static struct mwifiex_if_ops pcie_ops = {
-       .init_if =                      mwifiex_pcie_init,
-       .cleanup_if =                   mwifiex_pcie_cleanup,
+       .init_if =                      mwifiex_init_pcie,
+       .cleanup_if =                   mwifiex_cleanup_pcie,
        .check_fw_status =              mwifiex_check_fw_status,
        .check_winner_status =          mwifiex_check_winner_status,
        .prog_fw =                      mwifiex_prog_fw_w_helper,
@@ -3168,49 +3178,7 @@ static struct mwifiex_if_ops pcie_ops = {
        .up_dev =                       mwifiex_pcie_up_dev,
 };
 
-/*
- * This function initializes the PCIE driver module.
- *
- * This registers the device with PCIE bus.
- */
-static int mwifiex_pcie_init_module(void)
-{
-       int ret;
-
-       pr_debug("Marvell PCIe Driver\n");
-
-       /* Clear the flag in case user removes the card. */
-       user_rmmod = 0;
-
-       ret = pci_register_driver(&mwifiex_pcie);
-       if (ret)
-               pr_err("Driver register failed!\n");
-       else
-               pr_debug("info: Driver registered successfully!\n");
-
-       return ret;
-}
-
-/*
- * This function cleans up the PCIE driver.
- *
- * The following major steps are followed for cleanup -
- *      - Resume the device if its suspended
- *      - Disconnect the device if connected
- *      - Shutdown the firmware
- *      - Unregister the device from PCIE bus.
- */
-static void mwifiex_pcie_cleanup_module(void)
-{
-       /* Set the flag as user is removing this module. */
-       user_rmmod = 1;
-
-       cancel_work_sync(&pcie_work);
-       pci_unregister_driver(&mwifiex_pcie);
-}
-
-module_init(mwifiex_pcie_init_module);
-module_exit(mwifiex_pcie_cleanup_module);
+module_pci_driver(mwifiex_pcie);
 
 MODULE_AUTHOR("Marvell International Ltd.");
 MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION);
index ae3365d1c34e8acc8d882c56b8972c9c17dd5a85..00e8ee5ad4a834ed1905c5b98acee85be3ca1a32 100644 (file)
 /* FW awake cookie after FW ready */
 #define FW_AWAKE_COOKIE                                                (0xAA55AA55)
 #define MWIFIEX_DEF_SLEEP_COOKIE                       0xBEEFBEEF
+#define MWIFIEX_SLEEP_COOKIE_SIZE                      4
 #define MWIFIEX_MAX_DELAY_COUNT                                100
 
 struct mwifiex_pcie_card_reg {
@@ -386,6 +387,8 @@ struct pcie_service_card {
 #endif
        struct mwifiex_msix_context msix_ctx[MWIFIEX_NUM_MSIX_VECTORS];
        struct mwifiex_msix_context share_irq_ctx;
+       struct work_struct work;
+       unsigned long work_flags;
 };
 
 static inline int
index 740d79cd91fa1722f74530f0ae3c0a9271d3b1bc..a4b356d267f982b2646dc90f6eb1a2c3deac1425 100644 (file)
 
 #define SDIO_VERSION   "1.0"
 
-/* The mwifiex_sdio_remove() callback function is called when
- * user removes this module from kernel space or ejects
- * the card from the slot. The driver handles these 2 cases
- * differently.
- * If the user is removing the module, the few commands (FUNC_SHUTDOWN,
- * HS_CANCEL etc.) are sent to the firmware.
- * If the card is removed, there is no need to send these command.
- *
- * The variable 'user_rmmod' is used to distinguish these two
- * scenarios. This flag is initialized as FALSE in case the card
- * is removed, and will be set to TRUE for module removal when
- * module_exit function is called.
- */
-static u8 user_rmmod;
+static void mwifiex_sdio_work(struct work_struct *work);
 
 static struct mwifiex_if_ops sdio_ops;
-static unsigned long iface_work_flags;
 
 static struct memory_type_mapping generic_mem_type_map[] = {
        {"DUMP", NULL, 0, 0xDD},
@@ -116,7 +102,6 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
        init_completion(&card->fw_done);
 
        card->func = func;
-       card->device_id = id;
 
        func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
 
@@ -136,6 +121,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
                card->fw_dump_enh = data->fw_dump_enh;
                card->can_auto_tdls = data->can_auto_tdls;
                card->can_ext_scan = data->can_ext_scan;
+               INIT_WORK(&card->work, mwifiex_sdio_work);
        }
 
        sdio_claim_host(func);
@@ -212,6 +198,171 @@ static int mwifiex_sdio_resume(struct device *dev)
        return 0;
 }
 
+/* Write data into SDIO card register. Caller claims SDIO device. */
+static int
+mwifiex_write_reg_locked(struct sdio_func *func, u32 reg, u8 data)
+{
+       int ret = -1;
+
+       sdio_writeb(func, data, reg, &ret);
+       return ret;
+}
+
+/* This function writes data into SDIO card register.
+ */
+static int
+mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data)
+{
+       struct sdio_mmc_card *card = adapter->card;
+       int ret;
+
+       sdio_claim_host(card->func);
+       ret = mwifiex_write_reg_locked(card->func, reg, data);
+       sdio_release_host(card->func);
+
+       return ret;
+}
+
+/* This function reads data from SDIO card register.
+ */
+static int
+mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u8 *data)
+{
+       struct sdio_mmc_card *card = adapter->card;
+       int ret = -1;
+       u8 val;
+
+       sdio_claim_host(card->func);
+       val = sdio_readb(card->func, reg, &ret);
+       sdio_release_host(card->func);
+
+       *data = val;
+
+       return ret;
+}
+
+/* This function writes multiple data into SDIO card memory.
+ *
+ * This does not work in suspended mode.
+ */
+static int
+mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
+                       u8 *buffer, u32 pkt_len, u32 port)
+{
+       struct sdio_mmc_card *card = adapter->card;
+       int ret;
+       u8 blk_mode =
+               (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE;
+       u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
+       u32 blk_cnt =
+               (blk_mode ==
+                BLOCK_MODE) ? (pkt_len /
+                               MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len;
+       u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
+
+       if (adapter->is_suspended) {
+               mwifiex_dbg(adapter, ERROR,
+                           "%s: not allowed while suspended\n", __func__);
+               return -1;
+       }
+
+       sdio_claim_host(card->func);
+
+       ret = sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size);
+
+       sdio_release_host(card->func);
+
+       return ret;
+}
+
+/* This function reads multiple data from SDIO card memory.
+ */
+static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
+                                 u32 len, u32 port, u8 claim)
+{
+       struct sdio_mmc_card *card = adapter->card;
+       int ret;
+       u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE
+                      : BLOCK_MODE;
+       u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
+       u32 blk_cnt = (blk_mode == BLOCK_MODE) ? (len / MWIFIEX_SDIO_BLOCK_SIZE)
+                       : len;
+       u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
+
+       if (claim)
+               sdio_claim_host(card->func);
+
+       ret = sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size);
+
+       if (claim)
+               sdio_release_host(card->func);
+
+       return ret;
+}
+
+/* This function reads the firmware status.
+ */
+static int
+mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
+{
+       struct sdio_mmc_card *card = adapter->card;
+       const struct mwifiex_sdio_card_reg *reg = card->reg;
+       u8 fws0, fws1;
+
+       if (mwifiex_read_reg(adapter, reg->status_reg_0, &fws0))
+               return -1;
+
+       if (mwifiex_read_reg(adapter, reg->status_reg_1, &fws1))
+               return -1;
+
+       *dat = (u16)((fws1 << 8) | fws0);
+       return 0;
+}
+
+/* This function checks the firmware status in card.
+ */
+static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
+                                  u32 poll_num)
+{
+       int ret = 0;
+       u16 firmware_stat;
+       u32 tries;
+
+       for (tries = 0; tries < poll_num; tries++) {
+               ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
+               if (ret)
+                       continue;
+               if (firmware_stat == FIRMWARE_READY_SDIO) {
+                       ret = 0;
+                       break;
+               }
+
+               msleep(100);
+               ret = -1;
+       }
+
+       return ret;
+}
+
+/* This function checks if WLAN is the winner.
+ */
+static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
+{
+       int ret = 0;
+       u8 winner = 0;
+       struct sdio_mmc_card *card = adapter->card;
+
+       if (mwifiex_read_reg(adapter, card->reg->status_reg_0, &winner))
+               return -1;
+
+       if (winner)
+               adapter->winner = 0;
+       else
+               adapter->winner = 1;
+
+       return ret;
+}
+
 /*
  * SDIO remove.
  *
@@ -223,6 +374,8 @@ mwifiex_sdio_remove(struct sdio_func *func)
        struct sdio_mmc_card *card;
        struct mwifiex_adapter *adapter;
        struct mwifiex_private *priv;
+       int ret = 0;
+       u16 firmware_stat;
 
        card = sdio_get_drvdata(func);
        if (!card)
@@ -234,9 +387,12 @@ mwifiex_sdio_remove(struct sdio_func *func)
        if (!adapter || !adapter->priv_num)
                return;
 
+       cancel_work_sync(&card->work);
+
        mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num);
 
-       if (user_rmmod && !adapter->mfg_mode) {
+       ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
+       if (firmware_stat == FIRMWARE_READY_SDIO && !adapter->mfg_mode) {
                mwifiex_deauthenticate_all(adapter);
 
                priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
@@ -364,111 +520,6 @@ static struct sdio_driver mwifiex_sdio = {
        }
 };
 
-/* Write data into SDIO card register. Caller claims SDIO device. */
-static int
-mwifiex_write_reg_locked(struct sdio_func *func, u32 reg, u8 data)
-{
-       int ret = -1;
-       sdio_writeb(func, data, reg, &ret);
-       return ret;
-}
-
-/*
- * This function writes data into SDIO card register.
- */
-static int
-mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data)
-{
-       struct sdio_mmc_card *card = adapter->card;
-       int ret;
-
-       sdio_claim_host(card->func);
-       ret = mwifiex_write_reg_locked(card->func, reg, data);
-       sdio_release_host(card->func);
-
-       return ret;
-}
-
-/*
- * This function reads data from SDIO card register.
- */
-static int
-mwifiex_read_reg(struct mwifiex_adapter *adapter, u32 reg, u8 *data)
-{
-       struct sdio_mmc_card *card = adapter->card;
-       int ret = -1;
-       u8 val;
-
-       sdio_claim_host(card->func);
-       val = sdio_readb(card->func, reg, &ret);
-       sdio_release_host(card->func);
-
-       *data = val;
-
-       return ret;
-}
-
-/*
- * This function writes multiple data into SDIO card memory.
- *
- * This does not work in suspended mode.
- */
-static int
-mwifiex_write_data_sync(struct mwifiex_adapter *adapter,
-                       u8 *buffer, u32 pkt_len, u32 port)
-{
-       struct sdio_mmc_card *card = adapter->card;
-       int ret;
-       u8 blk_mode =
-               (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE : BLOCK_MODE;
-       u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
-       u32 blk_cnt =
-               (blk_mode ==
-                BLOCK_MODE) ? (pkt_len /
-                               MWIFIEX_SDIO_BLOCK_SIZE) : pkt_len;
-       u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
-
-       if (adapter->is_suspended) {
-               mwifiex_dbg(adapter, ERROR,
-                           "%s: not allowed while suspended\n", __func__);
-               return -1;
-       }
-
-       sdio_claim_host(card->func);
-
-       ret = sdio_writesb(card->func, ioport, buffer, blk_cnt * blk_size);
-
-       sdio_release_host(card->func);
-
-       return ret;
-}
-
-/*
- * This function reads multiple data from SDIO card memory.
- */
-static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *buffer,
-                                 u32 len, u32 port, u8 claim)
-{
-       struct sdio_mmc_card *card = adapter->card;
-       int ret;
-       u8 blk_mode = (port & MWIFIEX_SDIO_BYTE_MODE_MASK) ? BYTE_MODE
-                      : BLOCK_MODE;
-       u32 blk_size = (blk_mode == BLOCK_MODE) ? MWIFIEX_SDIO_BLOCK_SIZE : 1;
-       u32 blk_cnt = (blk_mode == BLOCK_MODE) ? (len / MWIFIEX_SDIO_BLOCK_SIZE)
-                       : len;
-       u32 ioport = (port & MWIFIEX_SDIO_IO_PORT_MASK);
-
-       if (claim)
-               sdio_claim_host(card->func);
-
-       ret = sdio_readsb(card->func, buffer, ioport, blk_cnt * blk_size);
-
-       if (claim)
-               sdio_release_host(card->func);
-
-       return ret;
-}
-
 /*
  * This function wakes up the card.
  *
@@ -754,27 +805,6 @@ mwifiex_sdio_poll_card_status(struct mwifiex_adapter *adapter, u8 bits)
        return -1;
 }
 
-/*
- * This function reads the firmware status.
- */
-static int
-mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
-{
-       struct sdio_mmc_card *card = adapter->card;
-       const struct mwifiex_sdio_card_reg *reg = card->reg;
-       u8 fws0, fws1;
-
-       if (mwifiex_read_reg(adapter, reg->status_reg_0, &fws0))
-               return -1;
-
-       if (mwifiex_read_reg(adapter, reg->status_reg_1, &fws1))
-               return -1;
-
-       *dat = (u16) ((fws1 << 8) | fws0);
-
-       return 0;
-}
-
 /*
  * This function disables the host interrupt.
  *
@@ -1079,51 +1109,6 @@ done:
        return ret;
 }
 
-/*
- * This function checks the firmware status in card.
- */
-static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
-                                  u32 poll_num)
-{
-       int ret = 0;
-       u16 firmware_stat;
-       u32 tries;
-
-       for (tries = 0; tries < poll_num; tries++) {
-               ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
-               if (ret)
-                       continue;
-               if (firmware_stat == FIRMWARE_READY_SDIO) {
-                       ret = 0;
-                       break;
-               } else {
-                       msleep(100);
-                       ret = -1;
-               }
-       }
-
-       return ret;
-}
-
-/* This function checks if WLAN is the winner.
- */
-static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
-{
-       int ret = 0;
-       u8 winner = 0;
-       struct sdio_mmc_card *card = adapter->card;
-
-       if (mwifiex_read_reg(adapter, card->reg->status_reg_0, &winner))
-               return -1;
-
-       if (winner)
-               adapter->winner = 0;
-       else
-               adapter->winner = 1;
-
-       return ret;
-}
-
 /*
  * This function decode sdio aggreation pkt.
  *
@@ -2204,54 +2189,25 @@ mwifiex_update_mp_end_port(struct mwifiex_adapter *adapter, u16 port)
                    port, card->mp_data_port_mask);
 }
 
-static void mwifiex_recreate_adapter(struct sdio_mmc_card *card)
+static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
 {
+       struct sdio_mmc_card *card = adapter->card;
        struct sdio_func *func = card->func;
-       const struct sdio_device_id *device_id = card->device_id;
-
-       /* TODO mmc_hw_reset does not require destroying and re-probing the
-        * whole adapter. Hence there was no need to for this rube-goldberg
-        * design to reload the fw from an external workqueue. If we don't
-        * destroy the adapter we could reload the fw from
-        * mwifiex_main_work_queue directly.
-        * The real difficulty with fw reset is to restore all the user
-        * settings applied through ioctl. By destroying and recreating the
-        * adapter, we take the easy way out, since we rely on user space to
-        * restore them. We assume that user space will treat the new
-        * incarnation of the adapter(interfaces) as if they had been just
-        * discovered and initializes them from scratch.
-        */
 
-       mwifiex_sdio_remove(func);
-
-       /*
-        * Normally, we would let the driver core take care of releasing these.
-        * But we're not letting the driver core handle this one. See above
-        * TODO.
-        */
-       sdio_set_drvdata(func, NULL);
-       devm_kfree(&func->dev, card);
+       mwifiex_shutdown_sw(adapter);
 
        /* power cycle the adapter */
        sdio_claim_host(func);
        mmc_hw_reset(func->card->host);
        sdio_release_host(func);
 
-       mwifiex_sdio_probe(func, device_id);
-}
-
-static struct mwifiex_adapter *save_adapter;
-static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
-{
-       struct sdio_mmc_card *card = adapter->card;
-
-       /* TODO card pointer is unprotected. If the adapter is removed
-        * physically, sdio core might trigger mwifiex_sdio_remove, before this
-        * workqueue is run, which will destroy the adapter struct. When this
-        * workqueue eventually exceutes it will dereference an invalid adapter
-        * pointer
+       /* Previous save_adapter won't be valid after this. We will cancel
+        * pending work requests.
         */
-       mwifiex_recreate_adapter(card);
+       clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+       clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
+
+       mwifiex_reinit_sw(adapter);
 }
 
 /* This function read/write firmware */
@@ -2542,47 +2498,53 @@ done:
 static void mwifiex_sdio_device_dump_work(struct mwifiex_adapter *adapter)
 {
        struct sdio_mmc_card *card = adapter->card;
+       int drv_info_size;
+       void *drv_info;
 
-       mwifiex_drv_info_dump(adapter);
+       drv_info_size = mwifiex_drv_info_dump(adapter, &drv_info);
        if (card->fw_dump_enh)
                mwifiex_sdio_generic_fw_dump(adapter);
        else
                mwifiex_sdio_fw_dump(adapter);
-       mwifiex_upload_device_dump(adapter);
+       mwifiex_upload_device_dump(adapter, drv_info, drv_info_size);
 }
 
 static void mwifiex_sdio_work(struct work_struct *work)
 {
+       struct sdio_mmc_card *card =
+               container_of(work, struct sdio_mmc_card, work);
+
        if (test_and_clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP,
-                              &iface_work_flags))
-               mwifiex_sdio_device_dump_work(save_adapter);
+                              &card->work_flags))
+               mwifiex_sdio_device_dump_work(card->adapter);
        if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET,
-                              &iface_work_flags))
-               mwifiex_sdio_card_reset_work(save_adapter);
+                              &card->work_flags))
+               mwifiex_sdio_card_reset_work(card->adapter);
 }
 
-static DECLARE_WORK(sdio_work, mwifiex_sdio_work);
 /* This function resets the card */
 static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter)
 {
-       save_adapter = adapter;
-       if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags))
+       struct sdio_mmc_card *card = adapter->card;
+
+       if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags))
                return;
 
-       set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags);
+       set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
 
-       schedule_work(&sdio_work);
+       schedule_work(&card->work);
 }
 
 /* This function dumps FW information */
 static void mwifiex_sdio_device_dump(struct mwifiex_adapter *adapter)
 {
-       save_adapter = adapter;
-       if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags))
+       struct sdio_mmc_card *card = adapter->card;
+
+       if (test_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags))
                return;
 
-       set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
-       schedule_work(&sdio_work);
+       set_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+       schedule_work(&card->work);
 }
 
 /* Function to dump SDIO function registers and SDIO scratch registers in case
@@ -2678,6 +2640,33 @@ mwifiex_sdio_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
        return p - drv_buf;
 }
 
+/* sdio device/function initialization, code is extracted
+ * from init_if handler and register_dev handler.
+ */
+static void mwifiex_sdio_up_dev(struct mwifiex_adapter *adapter)
+{
+       struct sdio_mmc_card *card = adapter->card;
+       u8 sdio_ireg;
+
+       sdio_claim_host(card->func);
+       sdio_enable_func(card->func);
+       sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE);
+       sdio_release_host(card->func);
+
+       /* tx_buf_size might be changed to 3584 by firmware during
+        * data transfer, we will reset to default size.
+        */
+       adapter->tx_buf_size = card->tx_buf_size;
+
+       /* Read the host_int_status_reg for ACK the first interrupt got
+        * from the bootloader. If we don't do this we get a interrupt
+        * as soon as we register the irq.
+        */
+       mwifiex_read_reg(adapter, card->reg->host_int_status_reg, &sdio_ireg);
+
+       mwifiex_init_sdio_ioport(adapter);
+}
+
 static struct mwifiex_if_ops sdio_ops = {
        .init_if = mwifiex_init_sdio,
        .cleanup_if = mwifiex_cleanup_sdio,
@@ -2703,43 +2692,10 @@ static struct mwifiex_if_ops sdio_ops = {
        .reg_dump = mwifiex_sdio_reg_dump,
        .device_dump = mwifiex_sdio_device_dump,
        .deaggr_pkt = mwifiex_deaggr_sdio_pkt,
+       .up_dev = mwifiex_sdio_up_dev,
 };
 
-/*
- * This function initializes the SDIO driver.
- *
- * This registers the device with SDIO bus.
- */
-static int
-mwifiex_sdio_init_module(void)
-{
-       /* Clear the flag in case user removes the card. */
-       user_rmmod = 0;
-
-       return sdio_register_driver(&mwifiex_sdio);
-}
-
-/*
- * This function cleans up the SDIO driver.
- *
- * The following major steps are followed for cleanup -
- *      - Resume the device if its suspended
- *      - Disconnect the device if connected
- *      - Shutdown the firmware
- *      - Unregister the device from SDIO bus.
- */
-static void
-mwifiex_sdio_cleanup_module(void)
-{
-       /* Set the flag as user is removing this module. */
-       user_rmmod = 1;
-       cancel_work_sync(&sdio_work);
-
-       sdio_unregister_driver(&mwifiex_sdio);
-}
-
-module_init(mwifiex_sdio_init_module);
-module_exit(mwifiex_sdio_cleanup_module);
+module_driver(mwifiex_sdio, sdio_register_driver, sdio_unregister_driver);
 
 MODULE_AUTHOR("Marvell International Ltd.");
 MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
index cdbf3a3ac7f994fbb56ced7df60b501199976202..dccf7fd1aef32cb89235723639fbc2fe4816427d 100644 (file)
@@ -268,8 +268,8 @@ struct sdio_mmc_card {
        struct mwifiex_sdio_mpa_tx mpa_tx;
        struct mwifiex_sdio_mpa_rx mpa_rx;
 
-       /* needed for card reset */
-       const struct sdio_device_id *device_id;
+       struct work_struct work;
+       unsigned long work_flags;
 };
 
 struct mwifiex_sdio_device {
index 125e448712ddb5aadd43c5d16c4eda87856485a8..2f1f4d190b28429c43dc61770f53f81a09353616 100644 (file)
@@ -76,7 +76,7 @@ mwifiex_cmd_802_11_rssi_info(struct mwifiex_private *priv,
  */
 static int mwifiex_cmd_mac_control(struct mwifiex_private *priv,
                                   struct host_cmd_ds_command *cmd,
-                                  u16 cmd_action, u16 *action)
+                                  u16 cmd_action, u32 *action)
 {
        struct host_cmd_ds_mac_control *mac_ctrl = &cmd->params.mac_ctrl;
 
@@ -89,7 +89,7 @@ static int mwifiex_cmd_mac_control(struct mwifiex_private *priv,
        cmd->command = cpu_to_le16(HostCmd_CMD_MAC_CONTROL);
        cmd->size =
                cpu_to_le16(sizeof(struct host_cmd_ds_mac_control) + S_DS_GEN);
-       mac_ctrl->action = cpu_to_le16(*action);
+       mac_ctrl->action = cpu_to_le32(*action);
 
        return 0;
 }
@@ -1935,8 +1935,8 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                mwifiex_dbg(priv->adapter, ERROR,
                            "0x%x command not supported by firmware\n",
                            cmd_no);
-                       return -EOPNOTSUPP;
-               }
+               return -EOPNOTSUPP;
+       }
 
        /* Prepare command */
        switch (cmd_no) {
index 9df0c4dc06ed9bc95b739f1fc11a79fe0c19a937..d63d163eb1ecaaa441d4da1861fc9038f7cd326e 100644 (file)
@@ -824,7 +824,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
        case EVENT_RSSI_LOW:
                cfg80211_cqm_rssi_notify(priv->netdev,
                                         NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
-                                        GFP_KERNEL);
+                                        0, GFP_KERNEL);
                mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
                                 HostCmd_ACT_GEN_GET, 0, NULL, false);
                priv->subsc_evt_rssi_state = RSSI_LOW_RECVD;
@@ -839,7 +839,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
        case EVENT_RSSI_HIGH:
                cfg80211_cqm_rssi_notify(priv->netdev,
                                         NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
-                                        GFP_KERNEL);
+                                        0, GFP_KERNEL);
                mwifiex_send_cmd(priv, HostCmd_CMD_RSSI_INFO,
                                 HostCmd_ACT_GEN_GET, 0, NULL, false);
                priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD;
@@ -1009,6 +1009,10 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                            adapter->event_skb->len -
                                            sizeof(eventcause));
                break;
+       /* Debugging event; not used, but let's not print an ERROR for it. */
+       case EVENT_UNKNOWN_DEBUG:
+               mwifiex_dbg(adapter, EVENT, "event: debug\n");
+               break;
        default:
                mwifiex_dbg(adapter, ERROR, "event: unknown event id: %#x\n",
                            eventcause);
index 644f3a2487414e40945f5af89094009f9bc733a2..1532ac9cee0b2f0a4bffda01073db5fa181286ca 100644 (file)
@@ -1159,8 +1159,6 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
                        encrypt_key.is_rx_seq_valid = true;
                }
        } else {
-               if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
-                       return 0;
                encrypt_key.key_disable = true;
                if (mac_addr)
                        memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
index c563160b3b6be93f8dd3c96f317f289fbe1d9d7b..9cf3334adf4d56e2b3dcbc77bf847c028001a443 100644 (file)
@@ -22,7 +22,6 @@
 
 #define USB_VERSION    "1.0"
 
-static u8 user_rmmod;
 static struct mwifiex_if_ops usb_ops;
 
 static struct usb_device_id mwifiex_usb_table[] = {
@@ -618,7 +617,7 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
        if (!adapter || !adapter->priv_num)
                return;
 
-       if (user_rmmod && !adapter->mfg_mode) {
+       if (card->udev->state != USB_STATE_NOTATTACHED && !adapter->mfg_mode) {
                mwifiex_deauthenticate_all(adapter);
 
                mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
@@ -1201,43 +1200,7 @@ static struct mwifiex_if_ops usb_ops = {
        .is_port_ready =        mwifiex_usb_is_port_ready,
 };
 
-/* This function initializes the USB driver module.
- *
- * This registers the device with USB bus.
- */
-static int mwifiex_usb_init_module(void)
-{
-       int ret;
-
-       pr_debug("Marvell USB8797 Driver\n");
-
-       ret = usb_register(&mwifiex_usb_driver);
-       if (ret)
-               pr_err("Driver register failed!\n");
-       else
-               pr_debug("info: Driver registered successfully!\n");
-
-       return ret;
-}
-
-/* This function cleans up the USB driver.
- *
- * The following major steps are followed in .disconnect for cleanup:
- *      - Resume the device if its suspended
- *      - Disconnect the device if connected
- *      - Shutdown the firmware
- *      - Unregister the device from USB bus.
- */
-static void mwifiex_usb_cleanup_module(void)
-{
-       /* set the flag as user is removing this module */
-       user_rmmod = 1;
-
-       usb_deregister(&mwifiex_usb_driver);
-}
-
-module_init(mwifiex_usb_init_module);
-module_exit(mwifiex_usb_cleanup_module);
+module_usb_driver(mwifiex_usb_driver);
 
 MODULE_AUTHOR("Marvell International Ltd.");
 MODULE_DESCRIPTION("Marvell WiFi-Ex USB Driver version" USB_VERSION);
index 18fbb96a46e94f290dbf173e1e9db1ceaffc5487..b1ab8da121dd8a9e015fb0a82bf7e0c5ab031749 100644 (file)
@@ -145,21 +145,6 @@ int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter)
        return 0;
 }
 
-/*
- * Firmware shutdown complete callback handler.
- *
- * This function sets the hardware status to not ready and wakes up
- * the function waiting on the init wait queue for the firmware
- * shutdown to complete.
- */
-int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter)
-{
-       adapter->hw_status = MWIFIEX_HW_STATUS_NOT_READY;
-       adapter->init_wait_q_woken = true;
-       wake_up_interruptible(&adapter->init_wait_q);
-       return 0;
-}
-
 /*
  * This function sends init/shutdown command
  * to firmware.
index 085c5b423bdfa8e4992be6b12047b3b86191a375..19874439ac40fc6419ccee96e062846a972067c8 100644 (file)
@@ -1200,7 +1200,7 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
        /*
         * Dump beacon to userspace through debugfs.
         */
-       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
 out:
        /*
         * Enable beaconing again.
index 9832fd50c7935e39c27653b1ad935ab6dd0176fc..791434de8052e7ecba344c9a5f379a7b78b1f35e 100644 (file)
@@ -1349,7 +1349,7 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
        /*
         * Dump beacon to userspace through debugfs.
         */
-       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
 out:
        /*
         * Enable beaconing again.
index cd3ab5a9e98da6d25d8d978b28d593afe0033818..0d2670a56c4c5c116ac0c9108ace68bb1bfe261e 100644 (file)
@@ -55,7 +55,7 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
  * If the csr_mutex is already held then the _lock variants must
  * be used instead.
  */
-static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev,
+static void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev,
                                           const unsigned int offset,
                                           u16 *value)
 {
@@ -66,7 +66,7 @@ static inline void rt2500usb_register_read(struct rt2x00_dev *rt2x00dev,
        *value = le16_to_cpu(reg);
 }
 
-static inline void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
+static void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
                                                const unsigned int offset,
                                                u16 *value)
 {
@@ -77,16 +77,7 @@ static inline void rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev,
        *value = le16_to_cpu(reg);
 }
 
-static inline void rt2500usb_register_multiread(struct rt2x00_dev *rt2x00dev,
-                                               const unsigned int offset,
-                                               void *value, const u16 length)
-{
-       rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ,
-                                     USB_VENDOR_REQUEST_IN, offset,
-                                     value, length);
-}
-
-static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
+static void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
                                            const unsigned int offset,
                                            u16 value)
 {
@@ -96,7 +87,7 @@ static inline void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev,
                                      &reg, sizeof(reg));
 }
 
-static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev,
+static void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev,
                                                 const unsigned int offset,
                                                 u16 value)
 {
@@ -106,7 +97,7 @@ static inline void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev,
                                       &reg, sizeof(reg), REGISTER_TIMEOUT);
 }
 
-static inline void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
+static void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
                                                 const unsigned int offset,
                                                 void *value, const u16 length)
 {
@@ -1170,7 +1161,7 @@ static void rt2500usb_write_beacon(struct queue_entry *entry,
        /*
         * Dump beacon to userspace through debugfs.
         */
-       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
 
        /*
         * USB devices cannot blindly pass the skb->len as the
index 95c1d7c0a2f388803aedda326087b11e03e438cb..256496bfbafb2ef29b4560c9d93753f731035656 100644 (file)
@@ -72,6 +72,7 @@
 #define RF5592                         0x000f
 #define RF3070                         0x3070
 #define RF3290                         0x3290
+#define RF5350                         0x5350
 #define RF5360                         0x5360
 #define RF5362                         0x5362
 #define RF5370                         0x5370
@@ -2286,6 +2287,8 @@ struct mac_iveiv_entry {
 #define RFCSR30_RX_H20M                        FIELD8(0x04)
 #define RFCSR30_RX_VCM                 FIELD8(0x18)
 #define RFCSR30_RF_CALIBRATION         FIELD8(0x80)
+#define RF3322_RFCSR30_TX_H20M         FIELD8(0x01)
+#define RF3322_RFCSR30_RX_H20M         FIELD8(0x02)
 
 /*
  * RFCSR 31:
@@ -2300,6 +2303,12 @@ struct mac_iveiv_entry {
 /* RFCSR 36 bits for RF3053 */
 #define RFCSR36_RF_BS                  FIELD8(0x80)
 
+/*
+ * RFCSR 34:
+ */
+#define RFCSR34_TX0_EXT_PA             FIELD8(0x04)
+#define RFCSR34_TX1_EXT_PA             FIELD8(0x08)
+
 /*
  * RFCSR 38:
  */
@@ -2311,6 +2320,18 @@ struct mac_iveiv_entry {
 #define RFCSR39_RX_DIV                 FIELD8(0x40)
 #define RFCSR39_RX_LO2_EN              FIELD8(0x80)
 
+/*
+ * RFCSR 41:
+ */
+#define RFCSR41_BIT1                   FIELD8(0x01)
+#define RFCSR41_BIT4                   FIELD8(0x08)
+
+/*
+ * RFCSR 42:
+ */
+#define RFCSR42_BIT1                   FIELD8(0x01)
+#define RFCSR42_BIT4                   FIELD8(0x08)
+
 /*
  * RFCSR 49:
  */
@@ -2324,6 +2345,8 @@ struct mac_iveiv_entry {
  * RFCSR 50:
  */
 #define RFCSR50_TX                     FIELD8(0x3f)
+#define RFCSR50_TX0_EXT_PA             FIELD8(0x02)
+#define RFCSR50_TX1_EXT_PA             FIELD8(0x10)
 #define RFCSR50_EP                     FIELD8(0xc0)
 /* bits for RT3593 */
 #define RFCSR50_TX_LO1_EN              FIELD8(0x20)
@@ -2471,6 +2494,8 @@ enum rt2800_eeprom_word {
  * INTERNAL_TX_ALC: 0: disable, 1: enable
  * BT_COEXIST: 0: disable, 1: enable
  * DAC_TEST: 0: disable, 1: enable
+ * EXTERNAL_TX0_PA: 0: disable, 1: enable (only on RT3352)
+ * EXTERNAL_TX1_PA: 0: disable, 1: enable (only on RT3352)
  */
 #define EEPROM_NIC_CONF1_HW_RADIO              FIELD16(0x0001)
 #define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC       FIELD16(0x0002)
@@ -2487,6 +2512,8 @@ enum rt2800_eeprom_word {
 #define EEPROM_NIC_CONF1_INTERNAL_TX_ALC       FIELD16(0x2000)
 #define EEPROM_NIC_CONF1_BT_COEXIST            FIELD16(0x4000)
 #define EEPROM_NIC_CONF1_DAC_TEST              FIELD16(0x8000)
+#define EEPROM_NIC_CONF1_EXTERNAL_TX0_PA_3352  FIELD16(0x4000)
+#define EEPROM_NIC_CONF1_EXTERNAL_TX1_PA_3352  FIELD16(0x8000)
 
 /*
  * EEPROM frequency
@@ -2979,7 +3006,9 @@ struct rt2800_drv_data {
        u8 bbp26;
        u8 txmixer_gain_24g;
        u8 txmixer_gain_5g;
+       u8 max_psdu;
        unsigned int tbtt_tick;
+       unsigned int ampdu_factor_cnt[4];
        DECLARE_BITMAP(sta_ids, STA_IDS_SIZE);
 };
 
index 4fb79e05078ffe87f2d64ca480dc1ff3a6a5ae0f..8223a15203165d32f09c03fff245162d04968b60 100644 (file)
@@ -373,9 +373,6 @@ static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
        int i, count;
 
        rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
-       if (rt2x00_get_field32(reg, WLAN_EN))
-               return 0;
-
        rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
        rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
        rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
@@ -967,8 +964,6 @@ static void rt2800_update_beacons_setup(struct rt2x00_dev *rt2x00dev)
                bcn_num++;
        }
 
-       WARN_ON_ONCE(bcn_num != rt2x00dev->intf_beaconing);
-
        rt2800_register_write(rt2x00dev, BCN_OFFSET0, (u32) reg);
        rt2800_register_write(rt2x00dev, BCN_OFFSET1, (u32) (reg >> 32));
 
@@ -1019,7 +1014,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
        /*
         * Dump beacon to userspace through debugfs.
         */
-       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
 
        /*
         * Write entire beacon with TXWI and padding to register.
@@ -1418,6 +1413,23 @@ int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
 }
 EXPORT_SYMBOL_GPL(rt2800_config_pairwise_key);
 
+static void rt2800_set_max_psdu_len(struct rt2x00_dev *rt2x00dev)
+{
+       u8 i, max_psdu;
+       u32 reg;
+       struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+
+       for (i = 0; i < 3; i++)
+               if (drv_data->ampdu_factor_cnt[i] > 0)
+                       break;
+
+       max_psdu = min(drv_data->max_psdu, i);
+
+       rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
+       rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, max_psdu);
+       rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
+}
+
 int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
                   struct ieee80211_sta *sta)
 {
@@ -1425,6 +1437,17 @@ int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
        struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
        struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
 
+       /*
+        * Limit global maximum TX AMPDU length to smallest value of all
+        * connected stations. In AP mode this can be suboptimal, but we
+        * do not have a choice if some connected STA is not capable to
+        * receive the same amount of data like the others.
+        */
+       if (sta->ht_cap.ht_supported) {
+               drv_data->ampdu_factor_cnt[sta->ht_cap.ampdu_factor & 3]++;
+               rt2800_set_max_psdu_len(rt2x00dev);
+       }
+
        /*
         * Search for the first free WCID entry and return the corresponding
         * index.
@@ -1457,9 +1480,16 @@ int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
 }
 EXPORT_SYMBOL_GPL(rt2800_sta_add);
 
-int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, int wcid)
+int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta)
 {
        struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+       struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
+       int wcid = sta_priv->wcid;
+
+       if (sta->ht_cap.ht_supported) {
+               drv_data->ampdu_factor_cnt[sta->ht_cap.ampdu_factor & 3]--;
+               rt2800_set_max_psdu_len(rt2x00dev);
+       }
 
        if (wcid > WCID_END)
                return 0;
@@ -1902,9 +1932,14 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
        rt2x00dev->lna_gain = lna_gain;
 }
 
+static inline bool rt2800_clk_is_20mhz(struct rt2x00_dev *rt2x00dev)
+{
+       return clk_get_rate(rt2x00dev->clk) == 20000000;
+}
+
 #define FREQ_OFFSET_BOUND      0x5f
 
-static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev)
+static void rt2800_freq_cal_mode1(struct rt2x00_dev *rt2x00dev)
 {
        u8 freq_offset, prev_freq_offset;
        u8 rfcsr, prev_rfcsr;
@@ -2075,7 +2110,9 @@ static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
        rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
        rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
        rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
-       msleep(1);
+
+       usleep_range(1000, 1500);
+
        rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
        rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
 }
@@ -2380,7 +2417,7 @@ static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev,
        }
        rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
 
-       rt2800_adjust_freq_offset(rt2x00dev);
+       rt2800_freq_cal_mode1(rt2x00dev);
 
        if (conf_is_ht40(conf)) {
                txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw40,
@@ -2570,7 +2607,7 @@ static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
                rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
        rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
 
-       rt2800_adjust_freq_offset(rt2x00dev);
+       rt2800_freq_cal_mode1(rt2x00dev);
 
        if (rf->channel <= 14) {
                if (rf->channel == 6)
@@ -2611,7 +2648,7 @@ static void rt2800_config_channel_rf3322(struct rt2x00_dev *rt2x00dev,
        else
                rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2);
 
-       rt2800_adjust_freq_offset(rt2x00dev);
+       rt2800_freq_cal_mode1(rt2x00dev);
 
        rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
        rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
@@ -2676,7 +2713,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
        rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
        rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
 
-       rt2800_adjust_freq_offset(rt2x00dev);
+       rt2800_freq_cal_mode1(rt2x00dev);
 
        if (rf->channel <= 14) {
                int idx = rf->channel-1;
@@ -2721,6 +2758,13 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
                                        0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d,
                                        0x8a, 0x88, 0x88, 0x87, 0x87, 0x86};
 
+                               rt2800_rfcsr_write(rt2x00dev, 59,
+                                                  r59_non_bt[idx]);
+                       } else if (rt2x00_rt(rt2x00dev, RT5350)) {
+                               static const char r59_non_bt[] = {0x0b, 0x0b,
+                                       0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a,
+                                       0x0a, 0x09, 0x08, 0x07, 0x07, 0x06};
+
                                rt2800_rfcsr_write(rt2x00dev, 59,
                                                   r59_non_bt[idx]);
                        }
@@ -2971,7 +3015,7 @@ static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev,
        }
 
        /* TODO proper frequency adjustment */
-       rt2800_adjust_freq_offset(rt2x00dev);
+       rt2800_freq_cal_mode1(rt2x00dev);
 
        /* TODO merge with others */
        rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
@@ -3160,6 +3204,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
                rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
                break;
        case RF3070:
+       case RF5350:
        case RF5360:
        case RF5362:
        case RF5370:
@@ -3178,6 +3223,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        if (rt2x00_rf(rt2x00dev, RF3070) ||
            rt2x00_rf(rt2x00dev, RF3290) ||
            rt2x00_rf(rt2x00dev, RF3322) ||
+           rt2x00_rf(rt2x00dev, RF5350) ||
            rt2x00_rf(rt2x00dev, RF5360) ||
            rt2x00_rf(rt2x00dev, RF5362) ||
            rt2x00_rf(rt2x00dev, RF5370) ||
@@ -3185,8 +3231,17 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
            rt2x00_rf(rt2x00dev, RF5390) ||
            rt2x00_rf(rt2x00dev, RF5392)) {
                rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
-               rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
-               rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
+               if (rt2x00_rf(rt2x00dev, RF3322)) {
+                       rt2x00_set_field8(&rfcsr, RF3322_RFCSR30_TX_H20M,
+                                         conf_is_ht40(conf));
+                       rt2x00_set_field8(&rfcsr, RF3322_RFCSR30_RX_H20M,
+                                         conf_is_ht40(conf));
+               } else {
+                       rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M,
+                                         conf_is_ht40(conf));
+                       rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M,
+                                         conf_is_ht40(conf));
+               }
                rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
 
                rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
@@ -3197,11 +3252,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        /*
         * Change BBP settings
         */
+
        if (rt2x00_rt(rt2x00dev, RT3352)) {
+               rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
+               rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
+               rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
+
                rt2800_bbp_write(rt2x00dev, 27, 0x0);
                rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
                rt2800_bbp_write(rt2x00dev, 27, 0x20);
                rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
+               rt2800_bbp_write(rt2x00dev, 86, 0x38);
+               rt2800_bbp_write(rt2x00dev, 83, 0x6a);
        } else if (rt2x00_rt(rt2x00dev, RT3593)) {
                if (rf->channel > 14) {
                        /* Disable CCK Packet detection on 5GHz */
@@ -3407,7 +3469,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
                }
        }
 
-       msleep(1);
+       usleep_range(1000, 1500);
 
        /*
         * Clear channel statistic counters
@@ -3419,7 +3481,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
        /*
         * Clear update flag
         */
-       if (rt2x00_rt(rt2x00dev, RT3352)) {
+       if (rt2x00_rt(rt2x00dev, RT3352) ||
+           rt2x00_rt(rt2x00dev, RT5350)) {
                rt2800_bbp_read(rt2x00dev, 49, &bbp);
                rt2x00_set_field8(&bbp, BBP49_UPDATE_FLAG, 0);
                rt2800_bbp_write(rt2x00dev, 49, bbp);
@@ -4300,21 +4363,25 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
        case RF3053:
        case RF3070:
        case RF3290:
+       case RF5350:
        case RF5360:
        case RF5362:
        case RF5370:
        case RF5372:
        case RF5390:
        case RF5392:
+       case RF5592:
                rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
                rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
                rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
                break;
        default:
+               WARN_ONCE(1, "Not supported RF chipet %x for VCO recalibration",
+                         rt2x00dev->chip.rf);
                return;
        }
 
-       mdelay(1);
+       usleep_range(1000, 1500);
 
        rt2800_register_read(rt2x00dev, TX_PIN_CFG, &tx_pin);
        if (rt2x00dev->rf_channel <= 14) {
@@ -4536,6 +4603,7 @@ EXPORT_SYMBOL_GPL(rt2800_link_tuner);
  */
 static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
 {
+       struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
        u32 reg;
        u16 eeprom;
        unsigned int i;
@@ -4678,6 +4746,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
                rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
+       } else if (rt2x00_rt(rt2x00dev, RT5350)) {
+               rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
        } else {
                rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
                rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -4702,14 +4772,18 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
 
        rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
        rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
-       if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) ||
-           rt2x00_rt(rt2x00dev, RT2883) ||
-           rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E))
-               rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
-       else
-               rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
-       rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 0);
-       rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
+       if (rt2x00_is_usb(rt2x00dev)) {
+               drv_data->max_psdu = 3;
+       } else if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) ||
+                  rt2x00_rt(rt2x00dev, RT2883) ||
+                  rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E)) {
+               drv_data->max_psdu = 2;
+       } else {
+               drv_data->max_psdu = 1;
+       }
+       rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, drv_data->max_psdu);
+       rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 10);
+       rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 10);
        rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
 
        rt2800_register_read(rt2x00dev, LED_CFG, &reg);
@@ -4725,8 +4799,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
 
        rt2800_register_read(rt2x00dev, TX_RTY_CFG, &reg);
-       rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT, 15);
-       rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT, 31);
+       rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT, 2);
+       rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT, 2);
        rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
        rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
        rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
@@ -4858,10 +4932,10 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
        rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, reg);
 
        rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg);
-       rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32);
+       rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 7);
        rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES,
                           IEEE80211_MAX_RTS_THRESHOLD);
-       rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_FBK_EN, 0);
+       rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_FBK_EN, 1);
        rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
 
        rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
@@ -5319,9 +5393,13 @@ static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev)
 
        rt2800_bbp_write(rt2x00dev, 82, 0x62);
 
-       rt2800_bbp_write(rt2x00dev, 83, 0x6a);
-
-       rt2800_bbp_write(rt2x00dev, 84, 0x99);
+       if (rt2x00_rt(rt2x00dev, RT5350)) {
+               rt2800_bbp_write(rt2x00dev, 83, 0x7a);
+               rt2800_bbp_write(rt2x00dev, 84, 0x9a);
+       } else {
+               rt2800_bbp_write(rt2x00dev, 83, 0x6a);
+               rt2800_bbp_write(rt2x00dev, 84, 0x99);
+       }
 
        rt2800_bbp_write(rt2x00dev, 86, 0x38);
 
@@ -5335,9 +5413,13 @@ static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev)
 
        rt2800_bbp_write(rt2x00dev, 104, 0x92);
 
-       rt2800_bbp_write(rt2x00dev, 105, 0x34);
-
-       rt2800_bbp_write(rt2x00dev, 106, 0x05);
+       if (rt2x00_rt(rt2x00dev, RT5350)) {
+               rt2800_bbp_write(rt2x00dev, 105, 0x3c);
+               rt2800_bbp_write(rt2x00dev, 106, 0x03);
+       } else {
+               rt2800_bbp_write(rt2x00dev, 105, 0x34);
+               rt2800_bbp_write(rt2x00dev, 106, 0x05);
+       }
 
        rt2800_bbp_write(rt2x00dev, 120, 0x50);
 
@@ -5362,6 +5444,16 @@ static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev)
        rt2800_bbp_write(rt2x00dev, 143, 0xa2);
 
        rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+
+       if (rt2x00_rt(rt2x00dev, RT5350)) {
+               /* Antenna Software OFDM */
+               rt2800_bbp_write(rt2x00dev, 150, 0x40);
+               /* Antenna Software CCK */
+               rt2800_bbp_write(rt2x00dev, 151, 0x30);
+               rt2800_bbp_write(rt2x00dev, 152, 0xa3);
+               /* Clear previously selected antenna */
+               rt2800_bbp_write(rt2x00dev, 154, 0);
+       }
 }
 
 static void rt2800_init_bbp_3390(struct rt2x00_dev *rt2x00dev)
@@ -5662,6 +5754,7 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
                rt2800_init_bbp_3290(rt2x00dev);
                break;
        case RT3352:
+       case RT5350:
                rt2800_init_bbp_3352(rt2x00dev);
                break;
        case RT3390:
@@ -6135,6 +6228,12 @@ static void rt2800_init_rfcsr_3290(struct rt2x00_dev *rt2x00dev)
 
 static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
 {
+       int tx0_int_pa = test_bit(CAPABILITY_INTERNAL_PA_TX0,
+                                 &rt2x00dev->cap_flags);
+       int tx1_int_pa = test_bit(CAPABILITY_INTERNAL_PA_TX1,
+                                 &rt2x00dev->cap_flags);
+       u8 rfcsr;
+
        rt2800_rf_init_calibration(rt2x00dev, 30);
 
        rt2800_rfcsr_write(rt2x00dev, 0, 0xf0);
@@ -6170,15 +6269,30 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
        rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
        rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
        rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
-       rt2800_rfcsr_write(rt2x00dev, 34, 0x01);
+       rfcsr = 0x01;
+       if (!tx0_int_pa)
+               rt2x00_set_field8(&rfcsr, RFCSR34_TX0_EXT_PA, 1);
+       if (!tx1_int_pa)
+               rt2x00_set_field8(&rfcsr, RFCSR34_TX1_EXT_PA, 1);
+       rt2800_rfcsr_write(rt2x00dev, 34, rfcsr);
        rt2800_rfcsr_write(rt2x00dev, 35, 0x03);
        rt2800_rfcsr_write(rt2x00dev, 36, 0xbd);
        rt2800_rfcsr_write(rt2x00dev, 37, 0x3c);
        rt2800_rfcsr_write(rt2x00dev, 38, 0x5f);
        rt2800_rfcsr_write(rt2x00dev, 39, 0xc5);
        rt2800_rfcsr_write(rt2x00dev, 40, 0x33);
-       rt2800_rfcsr_write(rt2x00dev, 41, 0x5b);
-       rt2800_rfcsr_write(rt2x00dev, 42, 0x5b);
+       rfcsr = 0x52;
+       if (tx0_int_pa) {
+               rt2x00_set_field8(&rfcsr, RFCSR41_BIT1, 1);
+               rt2x00_set_field8(&rfcsr, RFCSR41_BIT4, 1);
+       }
+       rt2800_rfcsr_write(rt2x00dev, 41, rfcsr);
+       rfcsr = 0x52;
+       if (tx1_int_pa) {
+               rt2x00_set_field8(&rfcsr, RFCSR42_BIT1, 1);
+               rt2x00_set_field8(&rfcsr, RFCSR42_BIT4, 1);
+       }
+       rt2800_rfcsr_write(rt2x00dev, 42, rfcsr);
        rt2800_rfcsr_write(rt2x00dev, 43, 0xdb);
        rt2800_rfcsr_write(rt2x00dev, 44, 0xdb);
        rt2800_rfcsr_write(rt2x00dev, 45, 0xdb);
@@ -6186,15 +6300,20 @@ static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev)
        rt2800_rfcsr_write(rt2x00dev, 47, 0x0d);
        rt2800_rfcsr_write(rt2x00dev, 48, 0x14);
        rt2800_rfcsr_write(rt2x00dev, 49, 0x00);
-       rt2800_rfcsr_write(rt2x00dev, 50, 0x2d);
-       rt2800_rfcsr_write(rt2x00dev, 51, 0x7f);
-       rt2800_rfcsr_write(rt2x00dev, 52, 0x00);
-       rt2800_rfcsr_write(rt2x00dev, 53, 0x52);
-       rt2800_rfcsr_write(rt2x00dev, 54, 0x1b);
-       rt2800_rfcsr_write(rt2x00dev, 55, 0x7f);
-       rt2800_rfcsr_write(rt2x00dev, 56, 0x00);
-       rt2800_rfcsr_write(rt2x00dev, 57, 0x52);
-       rt2800_rfcsr_write(rt2x00dev, 58, 0x1b);
+       rfcsr = 0x2d;
+       if (!tx0_int_pa)
+               rt2x00_set_field8(&rfcsr, RFCSR50_TX0_EXT_PA, 1);
+       if (!tx1_int_pa)
+               rt2x00_set_field8(&rfcsr, RFCSR50_TX1_EXT_PA, 1);
+       rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
+       rt2800_rfcsr_write(rt2x00dev, 51, (tx0_int_pa ? 0x7f : 0x52));
+       rt2800_rfcsr_write(rt2x00dev, 52, (tx0_int_pa ? 0x00 : 0xc0));
+       rt2800_rfcsr_write(rt2x00dev, 53, (tx0_int_pa ? 0x52 : 0xd2));
+       rt2800_rfcsr_write(rt2x00dev, 54, (tx0_int_pa ? 0x1b : 0xc0));
+       rt2800_rfcsr_write(rt2x00dev, 55, (tx1_int_pa ? 0x7f : 0x52));
+       rt2800_rfcsr_write(rt2x00dev, 56, (tx1_int_pa ? 0x00 : 0xc0));
+       rt2800_rfcsr_write(rt2x00dev, 57, (tx0_int_pa ? 0x52 : 0x49));
+       rt2800_rfcsr_write(rt2x00dev, 58, (tx1_int_pa ? 0x1b : 0xc0));
        rt2800_rfcsr_write(rt2x00dev, 59, 0x00);
        rt2800_rfcsr_write(rt2x00dev, 60, 0x00);
        rt2800_rfcsr_write(rt2x00dev, 61, 0x00);
@@ -6415,7 +6534,7 @@ static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev)
        rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
        rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
 
-       rt2800_adjust_freq_offset(rt2x00dev);
+       rt2800_freq_cal_mode1(rt2x00dev);
 
        rt2800_rfcsr_read(rt2x00dev, 18, &rfcsr);
        rt2x00_set_field8(&rfcsr, RFCSR18_XO_TUNE_BYPASS, 1);
@@ -6446,6 +6565,76 @@ static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev)
        /* TODO: enable stream mode support */
 }
 
+static void rt2800_init_rfcsr_5350(struct rt2x00_dev *rt2x00dev)
+{
+       rt2800_rfcsr_write(rt2x00dev, 0, 0xf0);
+       rt2800_rfcsr_write(rt2x00dev, 1, 0x23);
+       rt2800_rfcsr_write(rt2x00dev, 2, 0x50);
+       rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
+       rt2800_rfcsr_write(rt2x00dev, 4, 0x49);
+       rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
+       rt2800_rfcsr_write(rt2x00dev, 6, 0xe0);
+       rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 8, 0xf1);
+       rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
+       rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
+       rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
+       rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
+       if (rt2800_clk_is_20mhz(rt2x00dev))
+               rt2800_rfcsr_write(rt2x00dev, 13, 0x1f);
+       else
+               rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
+       rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 16, 0xc0);
+       rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
+       rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+       rt2800_rfcsr_write(rt2x00dev, 23, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
+       rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
+       rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 29, 0xd0);
+       rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+       rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+       rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+       rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
+       rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
+       rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
+       rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
+       rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
+       rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
+       rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
+       rt2800_rfcsr_write(rt2x00dev, 42, 0xd5);
+       rt2800_rfcsr_write(rt2x00dev, 43, 0x9b);
+       rt2800_rfcsr_write(rt2x00dev, 44, 0x0c);
+       rt2800_rfcsr_write(rt2x00dev, 45, 0xa6);
+       rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
+       rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
+       rt2800_rfcsr_write(rt2x00dev, 49, 0x80);
+       rt2800_rfcsr_write(rt2x00dev, 50, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 51, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
+       rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 54, 0x38);
+       rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
+       rt2800_rfcsr_write(rt2x00dev, 56, 0x82);
+       rt2800_rfcsr_write(rt2x00dev, 57, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 58, 0x39);
+       rt2800_rfcsr_write(rt2x00dev, 59, 0x0b);
+       rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
+       rt2800_rfcsr_write(rt2x00dev, 61, 0xd1);
+       rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
+       rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
+}
+
 static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
 {
        rt2800_rf_init_calibration(rt2x00dev, 2);
@@ -6641,7 +6830,7 @@ static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev)
        rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
        msleep(1);
 
-       rt2800_adjust_freq_offset(rt2x00dev);
+       rt2800_freq_cal_mode1(rt2x00dev);
 
        /* Enable DC filter */
        if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C))
@@ -6683,6 +6872,9 @@ static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
        case RT3593:
                rt2800_init_rfcsr_3593(rt2x00dev);
                break;
+       case RT5350:
+               rt2800_init_rfcsr_5350(rt2x00dev);
+               break;
        case RT5390:
                rt2800_init_rfcsr_5390(rt2x00dev);
                break;
@@ -7060,6 +7252,10 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
            rt2x00_rt(rt2x00dev, RT5390) ||
            rt2x00_rt(rt2x00dev, RT5392))
                rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf);
+       else if (rt2x00_rt(rt2x00dev, RT3352))
+               rf = RF3322;
+       else if (rt2x00_rt(rt2x00dev, RT5350))
+               rf = RF5350;
        else
                rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
 
@@ -7078,6 +7274,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        case RF3290:
        case RF3320:
        case RF3322:
+       case RF5350:
        case RF5360:
        case RF5362:
        case RF5370:
@@ -7149,7 +7346,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
        /*
         * Detect if this device has Bluetooth co-existence.
         */
-       if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST))
+       if (!rt2x00_rt(rt2x00dev, RT3352) &&
+           rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST))
                __set_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags);
 
        /*
@@ -7178,6 +7376,22 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
                                        EIRP_MAX_TX_POWER_LIMIT)
                __set_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags);
 
+       /*
+        * Detect if device uses internal or external PA
+        */
+       rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+
+       if (rt2x00_rt(rt2x00dev, RT3352)) {
+               if (!rt2x00_get_field16(eeprom,
+                   EEPROM_NIC_CONF1_EXTERNAL_TX0_PA_3352))
+                   __set_bit(CAPABILITY_INTERNAL_PA_TX0,
+                             &rt2x00dev->cap_flags);
+               if (!rt2x00_get_field16(eeprom,
+                   EEPROM_NIC_CONF1_EXTERNAL_TX1_PA_3352))
+                   __set_bit(CAPABILITY_INTERNAL_PA_TX1,
+                             &rt2x00dev->cap_flags);
+       }
+
        return 0;
 }
 
@@ -7322,6 +7536,27 @@ static const struct rf_channel rf_vals_3x[] = {
        {173, 0x61, 0, 9},
 };
 
+/*
+ * RF value list for rt3xxx with Xtal20MHz
+ * Supports: 2.4 GHz (all) (RF3322)
+ */
+static const struct rf_channel rf_vals_3x_xtal20[] = {
+       {1,    0xE2,     2,  0x14},
+       {2,    0xE3,     2,  0x14},
+       {3,    0xE4,     2,  0x14},
+       {4,    0xE5,     2,  0x14},
+       {5,    0xE6,     2,  0x14},
+       {6,    0xE7,     2,  0x14},
+       {7,    0xE8,     2,  0x14},
+       {8,    0xE9,     2,  0x14},
+       {9,    0xEA,     2,  0x14},
+       {10,   0xEB,     2,  0x14},
+       {11,   0xEC,     2,  0x14},
+       {12,   0xED,     2,  0x14},
+       {13,   0xEE,     2,  0x14},
+       {14,   0xF0,     2,  0x18},
+};
+
 static const struct rf_channel rf_vals_5592_xtal20[] = {
        /* Channel, N, K, mod, R */
        {1, 482, 4, 10, 3},
@@ -7469,6 +7704,13 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
         */
        rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
 
+       /*
+        * Change default retry settings to values corresponding more closely
+        * to rate[0].count setting of minstrel rate control algorithm.
+        */
+       rt2x00dev->hw->wiphy->retry_short = 2;
+       rt2x00dev->hw->wiphy->retry_long = 2;
+
        /*
         * Initialize all hw fields.
         */
@@ -7536,6 +7778,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
        case RF3290:
        case RF3320:
        case RF3322:
+       case RF5350:
        case RF5360:
        case RF5362:
        case RF5370:
@@ -7543,7 +7786,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
        case RF5390:
        case RF5392:
                spec->num_channels = 14;
-               spec->channels = rf_vals_3x;
+               if (rt2800_clk_is_20mhz(rt2x00dev))
+                       spec->channels = rf_vals_3x_xtal20;
+               else
+                       spec->channels = rf_vals_3x;
                break;
 
        case RF3052:
@@ -7593,7 +7839,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
 
        spec->ht.cap |= rx_chains << IEEE80211_HT_CAP_RX_STBC_SHIFT;
 
-       spec->ht.ampdu_factor = 3;
+       spec->ht.ampdu_factor = (rx_chains > 1) ? 3 : 2;
        spec->ht.ampdu_density = 4;
        spec->ht.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
        if (tx_chains != rx_chains) {
@@ -7669,12 +7915,14 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
        case RF3053:
        case RF3070:
        case RF3290:
+       case RF5350:
        case RF5360:
        case RF5362:
        case RF5370:
        case RF5372:
        case RF5390:
        case RF5392:
+       case RF5592:
                __set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags);
                break;
        }
@@ -7708,6 +7956,7 @@ static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev)
        case RT3390:
        case RT3572:
        case RT3593:
+       case RT5350:
        case RT5390:
        case RT5392:
        case RT5592:
index 83f1a44fb9b481cb5f2a8dda9e9914b8113e91bb..0a8b4df665fe36bde6be9fd1e20fd232797c8d19 100644 (file)
@@ -183,7 +183,7 @@ int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
                               struct ieee80211_key_conf *key);
 int rt2800_sta_add(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif,
                   struct ieee80211_sta *sta);
-int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, int wcid);
+int rt2800_sta_remove(struct rt2x00_dev *rt2x00dev, struct ieee80211_sta *sta);
 void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
                          const unsigned int filter_flags);
 void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
index f38c44061b5ba664f832d62b4a899b8a80568b1f..205a7b8ac8a7a252845f8e8c5866f2ed4cd1e14d 100644 (file)
@@ -123,7 +123,7 @@ static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry)
        if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
                return false;
 
-       tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
+       tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(500));
        if (unlikely(tout))
                rt2x00_dbg(entry->queue->rt2x00dev,
                           "TX status timeout for entry %d in queue %d\n",
@@ -435,47 +435,6 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
        return retval;
 }
 
-/*
- * Watchdog handlers
- */
-static void rt2800usb_watchdog(struct rt2x00_dev *rt2x00dev)
-{
-       unsigned int i;
-       u32 reg;
-
-       rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
-       if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q)) {
-               rt2x00_warn(rt2x00dev, "TX HW queue 0 timed out, invoke forced kick\n");
-
-               rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40012);
-
-               for (i = 0; i < 10; i++) {
-                       udelay(10);
-                       if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q))
-                               break;
-               }
-
-               rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006);
-       }
-
-       rt2x00usb_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
-       if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q)) {
-               rt2x00_warn(rt2x00dev, "TX HW queue 1 timed out, invoke forced kick\n");
-
-               rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf4000a);
-
-               for (i = 0; i < 10; i++) {
-                       udelay(10);
-                       if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q))
-                               break;
-               }
-
-               rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006);
-       }
-
-       rt2x00usb_watchdog(rt2x00dev);
-}
-
 /*
  * TX descriptor initialization
  */
@@ -643,10 +602,9 @@ static void rt2800usb_txdone_nostatus(struct rt2x00_dev *rt2x00dev)
                            !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
                                break;
 
-                       if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
+                       if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags) ||
+                           rt2800usb_entry_txstatus_timeout(entry))
                                rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
-                       else if (rt2800usb_entry_txstatus_timeout(entry))
-                               rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
                        else
                                break;
                }
@@ -877,7 +835,6 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
        .link_tuner             = rt2800_link_tuner,
        .gain_calibration       = rt2800_gain_calibration,
        .vco_calibration        = rt2800_vco_calibration,
-       .watchdog               = rt2800usb_watchdog,
        .start_queue            = rt2800usb_start_queue,
        .kick_queue             = rt2x00usb_kick_queue,
        .stop_queue             = rt2800usb_stop_queue,
index aa3d4ceef4adf57bc1dde39099c90cca01c39067..26869b3bef45ff9d8ed1e7b4701773c8e7b04251 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/hrtimer.h>
 #include <linux/average.h>
 #include <linux/usb.h>
+#include <linux/clk.h>
 
 #include <net/mac80211.h>
 
@@ -169,6 +170,7 @@ struct rt2x00_chip {
 #define RT3572         0x3572
 #define RT3593         0x3593
 #define RT3883         0x3883  /* WSOC */
+#define RT5350         0x5350  /* WSOC 2.4GHz */
 #define RT5390         0x5390  /* 2.4GHz */
 #define RT5392         0x5392  /* 2.4GHz */
 #define RT5592         0x5592
@@ -627,7 +629,7 @@ struct rt2x00lib_ops {
                        struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta);
        int (*sta_remove) (struct rt2x00_dev *rt2x00dev,
-                          int wcid);
+                          struct ieee80211_sta *sta);
 };
 
 /*
@@ -716,6 +718,8 @@ enum rt2x00_capability_flags {
        CAPABILITY_DOUBLE_ANTENNA,
        CAPABILITY_BT_COEXIST,
        CAPABILITY_VCO_RECALIBRATION,
+       CAPABILITY_INTERNAL_PA_TX0,
+       CAPABILITY_INTERNAL_PA_TX1,
 };
 
 /*
@@ -833,6 +837,10 @@ struct rt2x00_dev {
         */
        struct mutex csr_mutex;
 
+       /*
+        * Mutex to synchronize config and link tuner.
+        */
+       struct mutex conf_mutex;
        /*
         * Current packet filter configuration for the device.
         * This contains all currently active FIF_* flags send
@@ -1005,6 +1013,9 @@ struct rt2x00_dev {
        unsigned int extra_tx_headroom;
 
        struct usb_anchor *anchor;
+
+       /* Clock for System On Chip devices. */
+       struct clk *clk;
 };
 
 struct rt2x00_bar_list_entry {
@@ -1389,11 +1400,11 @@ void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop);
  */
 #ifdef CONFIG_RT2X00_LIB_DEBUGFS
 void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
-                           enum rt2x00_dump_type type, struct sk_buff *skb);
+                           enum rt2x00_dump_type type, struct queue_entry *entry);
 #else
 static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
                                          enum rt2x00_dump_type type,
-                                         struct sk_buff *skb)
+                                         struct queue_entry *entry)
 {
 }
 #endif /* CONFIG_RT2X00_LIB_DEBUGFS */
index 6a1f508d472f0e6ac3be534d780d209e90a0278d..350507458ddc4e0bdf77fe0d99e8f9e55c182113 100644 (file)
@@ -249,6 +249,22 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
         */
        rt2x00dev->ops->lib->config(rt2x00dev, &libconf, ieee80211_flags);
 
+       if (conf->flags & IEEE80211_CONF_PS)
+               set_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
+       else
+               clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
+
+       if (conf->flags & IEEE80211_CONF_MONITOR)
+               set_bit(CONFIG_MONITORING, &rt2x00dev->flags);
+       else
+               clear_bit(CONFIG_MONITORING, &rt2x00dev->flags);
+
+       rt2x00dev->curr_band = conf->chandef.chan->band;
+       rt2x00dev->curr_freq = conf->chandef.chan->center_freq;
+       rt2x00dev->tx_power = conf->power_level;
+       rt2x00dev->short_retry = conf->short_frame_max_tx_count;
+       rt2x00dev->long_retry = conf->long_frame_max_tx_count;
+
        /*
         * Some configuration changes affect the link quality
         * which means we need to reset the link tuner.
@@ -271,20 +287,4 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
                                   &rt2x00dev->autowakeup_work,
                                   autowake_timeout - 15);
        }
-
-       if (conf->flags & IEEE80211_CONF_PS)
-               set_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
-       else
-               clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
-
-       if (conf->flags & IEEE80211_CONF_MONITOR)
-               set_bit(CONFIG_MONITORING, &rt2x00dev->flags);
-       else
-               clear_bit(CONFIG_MONITORING, &rt2x00dev->flags);
-
-       rt2x00dev->curr_band = conf->chandef.chan->band;
-       rt2x00dev->curr_freq = conf->chandef.chan->center_freq;
-       rt2x00dev->tx_power = conf->power_level;
-       rt2x00dev->short_retry = conf->short_frame_max_tx_count;
-       rt2x00dev->long_retry = conf->long_frame_max_tx_count;
 }
index 72ae530e4a3bdc499f73ef72e3ce8c85351b8033..964aefdc11f004593d4f171a87895687dde57108 100644 (file)
@@ -157,9 +157,10 @@ void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev,
 }
 
 void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
-                           enum rt2x00_dump_type type, struct sk_buff *skb)
+                           enum rt2x00_dump_type type, struct queue_entry *entry)
 {
        struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf;
+       struct sk_buff *skb = entry->skb;
        struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
        struct sk_buff *skbcopy;
        struct rt2x00dump_hdr *dump_hdr;
@@ -196,8 +197,8 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
        dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf);
        dump_hdr->chip_rev = cpu_to_le16(rt2x00dev->chip.rev);
        dump_hdr->type = cpu_to_le16(type);
-       dump_hdr->queue_index = skbdesc->entry->queue->qid;
-       dump_hdr->entry_index = skbdesc->entry->entry_idx;
+       dump_hdr->queue_index = entry->queue->qid;
+       dump_hdr->entry_index = entry->entry_idx;
        dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec);
        dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_usec);
 
index eb7b714436577b1abc64f096608d3c3b80acba24..dd6678109b7e4609533c296f774dea5864df5370 100644 (file)
@@ -87,9 +87,6 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
         */
        rt2x00queue_start_queues(rt2x00dev);
        rt2x00link_start_tuner(rt2x00dev);
-       rt2x00link_start_agc(rt2x00dev);
-       if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
-               rt2x00link_start_vcocal(rt2x00dev);
 
        /*
         * Start watchdog monitoring.
@@ -112,9 +109,6 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
        /*
         * Stop all queues
         */
-       rt2x00link_stop_agc(rt2x00dev);
-       if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
-               rt2x00link_stop_vcocal(rt2x00dev);
        rt2x00link_stop_tuner(rt2x00dev);
        rt2x00queue_stop_queues(rt2x00dev);
        rt2x00queue_flush_queues(rt2x00dev, true);
@@ -369,7 +363,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
         * Send frame to debugfs immediately, after this call is completed
         * we are going to overwrite the skb->cb array.
         */
-       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb);
+       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry);
 
        /*
         * Determine if the frame has been successfully transmitted and
@@ -778,7 +772,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
         */
        rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc);
        rt2x00debug_update_crypto(rt2x00dev, &rxdesc);
-       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry->skb);
+       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry);
 
        /*
         * Initialize RX status information, and send frame
@@ -1319,6 +1313,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
 
        spin_lock_init(&rt2x00dev->irqmask_lock);
        mutex_init(&rt2x00dev->csr_mutex);
+       mutex_init(&rt2x00dev->conf_mutex);
        INIT_LIST_HEAD(&rt2x00dev->bar_list);
        spin_lock_init(&rt2x00dev->bar_list_lock);
 
@@ -1441,21 +1436,6 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
        cancel_work_sync(&rt2x00dev->intf_work);
        cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
        cancel_work_sync(&rt2x00dev->sleep_work);
-#if IS_ENABLED(CONFIG_RT2X00_LIB_USB)
-       if (rt2x00_is_usb(rt2x00dev)) {
-               usb_kill_anchored_urbs(rt2x00dev->anchor);
-               hrtimer_cancel(&rt2x00dev->txstatus_timer);
-               cancel_work_sync(&rt2x00dev->rxdone_work);
-               cancel_work_sync(&rt2x00dev->txdone_work);
-       }
-#endif
-       if (rt2x00dev->workqueue)
-               destroy_workqueue(rt2x00dev->workqueue);
-
-       /*
-        * Free the tx status fifo.
-        */
-       kfifo_free(&rt2x00dev->txstatus_fifo);
 
        /*
         * Kill the tx status tasklet.
@@ -1471,6 +1451,14 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
         */
        rt2x00lib_uninitialize(rt2x00dev);
 
+       if (rt2x00dev->workqueue)
+               destroy_workqueue(rt2x00dev->workqueue);
+
+       /*
+        * Free the tx status fifo.
+        */
+       kfifo_free(&rt2x00dev->txstatus_fifo);
+
        /*
         * Free extra components
         */
index fb7c349ccc9c04374c5e6a362fb93de94fa77cb1..9ddc1681b86ae7f5e785e81da1de4896c133bfa2 100644 (file)
  * Interval defines
  */
 #define WATCHDOG_INTERVAL      round_jiffies_relative(HZ)
-#define LINK_TUNE_INTERVAL     round_jiffies_relative(HZ)
-#define AGC_INTERVAL           round_jiffies_relative(4 * HZ)
-#define VCO_INTERVAL           round_jiffies_relative(10 * HZ) /* 10 sec */
+#define LINK_TUNE_SECONDS      1
+#define LINK_TUNE_INTERVAL     round_jiffies_relative(LINK_TUNE_SECONDS * HZ)
+#define AGC_SECONDS            4
+#define VCO_SECONDS            10
 
 /*
  * rt2x00_rate: Per rate device information
@@ -270,30 +271,6 @@ void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev);
  */
 void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev);
 
-/**
- * rt2x00link_start_agc - Start periodic gain calibration
- * @rt2x00dev: Pointer to &struct rt2x00_dev.
- */
-void rt2x00link_start_agc(struct rt2x00_dev *rt2x00dev);
-
-/**
- * rt2x00link_start_vcocal - Start periodic VCO calibration
- * @rt2x00dev: Pointer to &struct rt2x00_dev.
- */
-void rt2x00link_start_vcocal(struct rt2x00_dev *rt2x00dev);
-
-/**
- * rt2x00link_stop_agc - Stop periodic gain calibration
- * @rt2x00dev: Pointer to &struct rt2x00_dev.
- */
-void rt2x00link_stop_agc(struct rt2x00_dev *rt2x00dev);
-
-/**
- * rt2x00link_stop_vcocal - Stop periodic VCO calibration
- * @rt2x00dev: Pointer to &struct rt2x00_dev.
- */
-void rt2x00link_stop_vcocal(struct rt2x00_dev *rt2x00dev);
-
 /**
  * rt2x00link_register - Initialize link tuning & watchdog functionality
  * @rt2x00dev: Pointer to &struct rt2x00_dev.
index 017188e5a73628b2c0454fbb1916646da9ab650d..2010a7715f2115a913182358d825b0d2492818cc 100644 (file)
@@ -233,15 +233,13 @@ void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
        struct link *link = &rt2x00dev->link;
 
        /*
-        * Link tuning should only be performed when
-        * an active sta interface exists. AP interfaces
-        * don't need link tuning and monitor mode interfaces
-        * should never have to work with link tuners.
+        * Single monitor mode interfaces should never have
+        * work with link tuners.
         */
-       if (!rt2x00dev->intf_sta_count)
+       if (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count)
                return;
 
-       /**
+       /*
         * While scanning, link tuning is disabled. By default
         * the most sensitive settings will be used to make sure
         * that all beacons and probe responses will be received
@@ -308,21 +306,10 @@ static void rt2x00link_reset_qual(struct rt2x00_dev *rt2x00dev)
        qual->tx_failed = 0;
 }
 
-static void rt2x00link_tuner(struct work_struct *work)
+static void rt2x00link_tuner_sta(struct rt2x00_dev *rt2x00dev, struct link *link)
 {
-       struct rt2x00_dev *rt2x00dev =
-           container_of(work, struct rt2x00_dev, link.work.work);
-       struct link *link = &rt2x00dev->link;
        struct link_qual *qual = &rt2x00dev->link.qual;
 
-       /*
-        * When the radio is shutting down we should
-        * immediately cease all link tuning.
-        */
-       if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
-           test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags))
-               return;
-
        /*
         * Update statistics.
         */
@@ -360,6 +347,38 @@ static void rt2x00link_tuner(struct work_struct *work)
         */
        if (rt2x00lib_antenna_diversity(rt2x00dev))
                rt2x00link_reset_qual(rt2x00dev);
+}
+
+static void rt2x00link_tuner(struct work_struct *work)
+{
+       struct rt2x00_dev *rt2x00dev =
+           container_of(work, struct rt2x00_dev, link.work.work);
+       struct link *link = &rt2x00dev->link;
+
+       /*
+        * When the radio is shutting down we should
+        * immediately cease all link tuning.
+        */
+       if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
+           test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags))
+               return;
+
+       /* Do not race with rt2x00mac_config(). */
+       mutex_lock(&rt2x00dev->conf_mutex);
+
+       if (rt2x00dev->intf_sta_count)
+               rt2x00link_tuner_sta(rt2x00dev, link);
+
+       if (rt2x00dev->ops->lib->gain_calibration &&
+           (link->count % (AGC_SECONDS / LINK_TUNE_SECONDS)) == 0)
+               rt2x00dev->ops->lib->gain_calibration(rt2x00dev);
+
+       if (rt2x00dev->ops->lib->vco_calibration &&
+           rt2x00_has_cap_vco_recalibration(rt2x00dev) &&
+           (link->count % (VCO_SECONDS / LINK_TUNE_SECONDS)) == 0)
+               rt2x00dev->ops->lib->vco_calibration(rt2x00dev);
+
+       mutex_unlock(&rt2x00dev->conf_mutex);
 
        /*
         * Increase tuner counter, and reschedule the next link tuner run.
@@ -408,85 +427,8 @@ static void rt2x00link_watchdog(struct work_struct *work)
                                             WATCHDOG_INTERVAL);
 }
 
-void rt2x00link_start_agc(struct rt2x00_dev *rt2x00dev)
-{
-       struct link *link = &rt2x00dev->link;
-
-       if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
-           rt2x00dev->ops->lib->gain_calibration)
-               ieee80211_queue_delayed_work(rt2x00dev->hw,
-                                            &link->agc_work,
-                                            AGC_INTERVAL);
-}
-
-void rt2x00link_start_vcocal(struct rt2x00_dev *rt2x00dev)
-{
-       struct link *link = &rt2x00dev->link;
-
-       if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
-           rt2x00dev->ops->lib->vco_calibration)
-               ieee80211_queue_delayed_work(rt2x00dev->hw,
-                                            &link->vco_work,
-                                            VCO_INTERVAL);
-}
-
-void rt2x00link_stop_agc(struct rt2x00_dev *rt2x00dev)
-{
-       cancel_delayed_work_sync(&rt2x00dev->link.agc_work);
-}
-
-void rt2x00link_stop_vcocal(struct rt2x00_dev *rt2x00dev)
-{
-       cancel_delayed_work_sync(&rt2x00dev->link.vco_work);
-}
-
-static void rt2x00link_agc(struct work_struct *work)
-{
-       struct rt2x00_dev *rt2x00dev =
-           container_of(work, struct rt2x00_dev, link.agc_work.work);
-       struct link *link = &rt2x00dev->link;
-
-       /*
-        * When the radio is shutting down we should
-        * immediately cease the watchdog monitoring.
-        */
-       if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-               return;
-
-       rt2x00dev->ops->lib->gain_calibration(rt2x00dev);
-
-       if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
-               ieee80211_queue_delayed_work(rt2x00dev->hw,
-                                            &link->agc_work,
-                                            AGC_INTERVAL);
-}
-
-static void rt2x00link_vcocal(struct work_struct *work)
-{
-       struct rt2x00_dev *rt2x00dev =
-           container_of(work, struct rt2x00_dev, link.vco_work.work);
-       struct link *link = &rt2x00dev->link;
-
-       /*
-        * When the radio is shutting down we should
-        * immediately cease the VCO calibration.
-        */
-       if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
-               return;
-
-       rt2x00dev->ops->lib->vco_calibration(rt2x00dev);
-
-       if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
-               ieee80211_queue_delayed_work(rt2x00dev->hw,
-                                            &link->vco_work,
-                                            VCO_INTERVAL);
-}
-
 void rt2x00link_register(struct rt2x00_dev *rt2x00dev)
 {
-       INIT_DELAYED_WORK(&rt2x00dev->link.agc_work, rt2x00link_agc);
-       if (rt2x00_has_cap_vco_recalibration(rt2x00dev))
-               INIT_DELAYED_WORK(&rt2x00dev->link.vco_work, rt2x00link_vcocal);
        INIT_DELAYED_WORK(&rt2x00dev->link.watchdog_work, rt2x00link_watchdog);
        INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00link_tuner);
 }
index 13da95a24cf77bb366481252226f3c4566352d67..ecc96312a370377b1d29bc6f4dd450062a6538b4 100644 (file)
@@ -320,6 +320,9 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
         */
        rt2x00queue_stop_queue(rt2x00dev->rx);
 
+       /* Do not race with with link tuner. */
+       mutex_lock(&rt2x00dev->conf_mutex);
+
        /*
         * When we've just turned on the radio, we want to reprogram
         * everything to ensure a consistent state
@@ -335,6 +338,8 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
         */
        rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant);
 
+       mutex_unlock(&rt2x00dev->conf_mutex);
+
        /* Turn RX back on */
        rt2x00queue_start_queue(rt2x00dev->rx);
 
@@ -539,9 +544,8 @@ int rt2x00mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                         struct ieee80211_sta *sta)
 {
        struct rt2x00_dev *rt2x00dev = hw->priv;
-       struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta);
 
-       return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta_priv->wcid);
+       return rt2x00dev->ops->lib->sta_remove(rt2x00dev, sta);
 }
 EXPORT_SYMBOL_GPL(rt2x00mac_sta_remove);
 
@@ -739,7 +743,8 @@ void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                return;
 
        tx_queue_for_each(rt2x00dev, queue)
-               rt2x00queue_flush_queue(queue, drop);
+               if (!rt2x00queue_empty(queue))
+                       rt2x00queue_flush_queue(queue, drop);
 }
 EXPORT_SYMBOL_GPL(rt2x00mac_flush);
 
index f0178fd4fe5ff8c078749e6df8b82d1ea7a26a0c..da38d254c26f042be2beadb1a0bed76660a65bc6 100644 (file)
@@ -101,7 +101,7 @@ void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop)
        unsigned int i;
 
        for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
-               msleep(10);
+               msleep(50);
 }
 EXPORT_SYMBOL_GPL(rt2x00mmio_flush_queue);
 
index 68b620b2462f1d149387feb4f33cde8359e929c2..e1660b92b20c7793c88ea5d470e1d793f495bbf6 100644 (file)
@@ -83,7 +83,6 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
         */
        skbdesc = get_skb_frame_desc(skb);
        memset(skbdesc, 0, sizeof(*skbdesc));
-       skbdesc->entry = entry;
 
        if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) {
                dma_addr_t skb_dma;
@@ -306,13 +305,12 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
        struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        struct rt2x00_sta *sta_priv = NULL;
+       u8 density = 0;
 
        if (sta) {
-               txdesc->u.ht.mpdu_density =
-                   sta->ht_cap.ampdu_density;
-
                sta_priv = sta_to_rt2x00_sta(sta);
                txdesc->u.ht.wcid = sta_priv->wcid;
+               density = sta->ht_cap.ampdu_density;
        }
 
        /*
@@ -345,8 +343,6 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
                return;
        }
 
-       txdesc->u.ht.ba_size = 7;       /* FIXME: What value is needed? */
-
        /*
         * Only one STBC stream is supported for now.
         */
@@ -358,8 +354,11 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
         * frames that are intended to probe a specific tx rate.
         */
        if (tx_info->flags & IEEE80211_TX_CTL_AMPDU &&
-           !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
+           !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
                __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags);
+               txdesc->u.ht.mpdu_density = density;
+               txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
+       }
 
        /*
         * Set 40Mhz mode if necessary (for legacy rates this will
@@ -544,7 +543,7 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
         * All processing on the frame has been completed, this means
         * it is now ready to be dumped to userspace through debugfs.
         */
-       rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
+       rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry);
 }
 
 static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
@@ -689,7 +688,6 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
                goto out;
        }
 
-       skbdesc->entry = entry;
        entry->skb = skb;
 
        /*
@@ -774,7 +772,6 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
         */
        skbdesc = get_skb_frame_desc(intf->beacon->skb);
        memset(skbdesc, 0, sizeof(*skbdesc));
-       skbdesc->entry = intf->beacon;
 
        /*
         * Send beacon to hardware.
index 2233b911a1d7d7c5992db5681dcc1907d9719769..22d18818e85004261bbc6c8e6c63d6c8208058ac 100644 (file)
@@ -116,8 +116,6 @@ struct skb_frame_desc {
        __le32 iv[2];
 
        dma_addr_t skb_dma;
-
-       struct queue_entry *entry;
 };
 
 /**
index 69a0cdadb07f3d46a23fb8530c62d631233de743..29250f79c4a4bffc0996222eb2c6381877f47c6f 100644 (file)
@@ -93,6 +93,10 @@ int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops)
        rt2x00dev->irq = platform_get_irq(pdev, 0);
        rt2x00dev->name = pdev->dev.driver->name;
 
+       rt2x00dev->clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(rt2x00dev->clk))
+               rt2x00dev->clk = NULL;
+
        rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
 
        retval = rt2x00soc_alloc_reg(rt2x00dev);
index 6005e14213ca982c12cef1b1297c564b974aded0..c696f0ad6a68382c67af596516ba2b0b45270382 100644 (file)
@@ -319,10 +319,8 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
                          entry->skb->data, length,
                          rt2x00usb_interrupt_txdone, entry);
 
-       usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
        status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
        if (status) {
-               usb_unanchor_urb(entry_priv->urb);
                if (status == -ENODEV)
                        clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
                set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -410,10 +408,8 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
                          entry->skb->data, entry->skb->len,
                          rt2x00usb_interrupt_rxdone, entry);
 
-       usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
        status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
        if (status) {
-               usb_unanchor_urb(entry_priv->urb);
                if (status == -ENODEV)
                        clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
                set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -517,7 +513,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
                 * Wait for a little while to give the driver
                 * the oppurtunity to recover itself.
                 */
-               msleep(10);
+               msleep(50);
        }
 }
 EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue);
@@ -744,6 +740,11 @@ void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
 {
        struct data_queue *queue;
 
+       usb_kill_anchored_urbs(rt2x00dev->anchor);
+       hrtimer_cancel(&rt2x00dev->txstatus_timer);
+       cancel_work_sync(&rt2x00dev->rxdone_work);
+       cancel_work_sync(&rt2x00dev->txdone_work);
+
        queue_for_each(rt2x00dev, queue)
                rt2x00usb_free_entries(queue);
 }
@@ -824,10 +825,6 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
        if (retval)
                goto exit_free_device;
 
-       retval = rt2x00lib_probe_dev(rt2x00dev);
-       if (retval)
-               goto exit_free_reg;
-
        rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
                                        sizeof(struct usb_anchor),
                                        GFP_KERNEL);
@@ -835,10 +832,17 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
                retval = -ENOMEM;
                goto exit_free_reg;
        }
-
        init_usb_anchor(rt2x00dev->anchor);
+
+       retval = rt2x00lib_probe_dev(rt2x00dev);
+       if (retval)
+               goto exit_free_anchor;
+
        return 0;
 
+exit_free_anchor:
+       usb_kill_anchored_urbs(rt2x00dev->anchor);
+
 exit_free_reg:
        rt2x00usb_free_reg(rt2x00dev);
 
index 5306a3b2622d0233b2114ab1b1c14217cd2e11fd..973d418b81130321a95d0f4e1dacb11c4a225e20 100644 (file)
@@ -1903,8 +1903,7 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
 
        rt2x00_desc_read(txd, 5, &word);
        rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid);
-       rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE,
-                          skbdesc->entry->entry_idx);
+       rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, entry->entry_idx);
        rt2x00_set_field32(&word, TXD_W5_TX_POWER,
                           TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power));
        rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
@@ -1989,7 +1988,7 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
        /*
         * Dump beacon to userspace through debugfs.
         */
-       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
 
        /*
         * Write entire beacon with descriptor and padding to register.
index 1a29c4d205a5ec6272371940185efe07848eb7d2..bb8d307a789ff77a619dc05fc045e58f79ee624b 100644 (file)
@@ -1557,7 +1557,7 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
        /*
         * Dump beacon to userspace through debugfs.
         */
-       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
+       rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry);
 
        /*
         * Write entire beacon with descriptor and padding to register.
index df551b2b56ebc9ca0cea244e2dcac22153cf7129..95e3993d8a3319f5761cd3670dc7b7d15e25c5ed 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
index f9e2050812ab356babc702cd3dac151b395cc889..a41a29612582fdec94e14caa23b1afa0369fc4e1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * RTL8XXXU mac80211 USB driver - 8188c/8188r/8192c specific subdriver
  *
- * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com>
  *
  * Portions, notably calibration code:
  * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
index a1178c5d6ad8d4a55d0642b73f94396bb2e69a13..80fee699f58a0914440689481ee13e44eb113ed2 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * RTL8XXXU mac80211 USB driver - 8192e specific subdriver
  *
- * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com>
  *
  * Portions, notably calibration code:
  * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
index aef373028155fb8061d8fdd6eb7fd32ccb46b59b..174631132b96e4ca2d25689cec0e8b80d512bfbd 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * RTL8XXXU mac80211 USB driver - 8723a specific subdriver
  *
- * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com>
  *
  * Portions, notably calibration code:
  * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
index 02b8ddd98a95d207323dcac13d514c0b987c5afe..c4b86a84a721d06df7cb93604b5907739ad85a13 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * RTL8XXXU mac80211 USB driver - 8723b specific subdriver
  *
- * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com>
  *
  * Portions, notably calibration code:
  * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
index 3a86675020a210f4bd325b2d8376822e19203d59..e544dd1d618c3cea36d1a5d11068295e008c13c5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * RTL8XXXU mac80211 USB driver
  *
- * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com>
  *
  * Portions, notably calibration code:
  * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
@@ -48,7 +48,7 @@ static bool rtl8xxxu_dma_aggregation;
 static int rtl8xxxu_dma_agg_timeout = -1;
 static int rtl8xxxu_dma_agg_pages = -1;
 
-MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
+MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@gmail.com>");
 MODULE_DESCRIPTION("RTL8XXXu USB mac80211 Wireless LAN Driver");
 MODULE_LICENSE("GPL");
 MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin");
@@ -6000,6 +6000,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
                case 0x8176:
                case 0x8178:
                case 0x817f:
+               case 0x818b:
                        untested = 0;
                        break;
                }
@@ -6196,6 +6197,12 @@ static struct usb_device_id dev_table[] = {
        .driver_info = (unsigned long)&rtl8723au_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818b, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192eu_fops},
+/* TP-Link TL-WN822N v4 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0108, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192eu_fops},
+/* D-Link DWA-131 rev E1, tested by David Patiño */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3319, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192eu_fops},
 /* Tested by Myckel Habets */
 {USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0109, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192eu_fops},
@@ -6347,6 +6354,13 @@ static struct usb_device_id dev_table[] = {
        .driver_info = (unsigned long)&rtl8192cu_fops},
 {USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7822, 0xff, 0xff, 0xff),
        .driver_info = (unsigned long)&rtl8192cu_fops},
+/* found in rtl8192eu vendor driver */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0107, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192eu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(0x2019, 0xab33, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192eu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818c, 0xff, 0xff, 0xff),
+       .driver_info = (unsigned long)&rtl8192eu_fops},
 #endif
 { }
 };
index 315ccfb2dff5fa5d52e6304b809323f0cf780e2a..3d3e2e1ada6face1783d9e0e38e3c1f1c6664c6f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2017 Jes Sorensen <Jes.Sorensen@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
index 4ac928bf1f8e6c77f68386c353c20f26168c77e4..caea350f05aac7b2e3dc7137b0b4363abcd8c2d4 100644 (file)
@@ -207,8 +207,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
         *highest supported RX rate
         */
        if (rtlpriv->dm.supp_phymode_switch) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
-                        "Support phy mode switch\n");
+               pr_info("Support phy mode switch\n");
 
                ht_cap->mcs.rx_mask[0] = 0xFF;
                ht_cap->mcs.rx_mask[1] = 0xFF;
@@ -389,8 +388,8 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
                        /* <4> set mac->sband to wiphy->sband */
                        hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
                } else {
-                       RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Err BAND %d\n",
-                                rtlhal->current_bandtype);
+                       pr_err("Err BAND %d\n",
+                              rtlhal->current_bandtype);
                }
        }
        /* <5> set hw caps */
@@ -476,6 +475,8 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
                          (void *)rtl_swlps_rfon_wq_callback);
        INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq,
                          (void *)rtl_fwevt_wq_callback);
+       INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq,
+                         (void *)rtl_c2hcmd_wq_callback);
 
 }
 
@@ -490,6 +491,7 @@ void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
        cancel_delayed_work(&rtlpriv->works.ps_work);
        cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
        cancel_delayed_work(&rtlpriv->works.fwevt_wq);
+       cancel_delayed_work(&rtlpriv->works.c2hcmd_wq);
 }
 EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
 
@@ -544,7 +546,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
         * mac80211 hw  in _rtl_init_mac80211.
         */
        if (rtl_regd_init(hw, rtl_reg_notifier)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "REGD init failed\n");
+               pr_err("REGD init failed\n");
                return 1;
        }
 
@@ -557,6 +559,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
        spin_lock_init(&rtlpriv->locks.rf_lock);
        spin_lock_init(&rtlpriv->locks.waitq_lock);
        spin_lock_init(&rtlpriv->locks.entry_list_lock);
+       spin_lock_init(&rtlpriv->locks.c2hcmd_lock);
        spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
        spin_lock_init(&rtlpriv->locks.check_sendpkt_lock);
        spin_lock_init(&rtlpriv->locks.fw_ps_lock);
@@ -564,6 +567,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
        spin_lock_init(&rtlpriv->locks.iqk_lock);
        /* <5> init list */
        INIT_LIST_HEAD(&rtlpriv->entry_list);
+       INIT_LIST_HEAD(&rtlpriv->c2hcmd_list);
 
        rtlmac->link_state = MAC80211_NOLINK;
 
@@ -576,6 +580,7 @@ EXPORT_SYMBOL_GPL(rtl_init_core);
 
 void rtl_deinit_core(struct ieee80211_hw *hw)
 {
+       rtl_c2hcmd_launcher(hw, 0);
 }
 EXPORT_SYMBOL_GPL(rtl_deinit_core);
 
@@ -1694,8 +1699,7 @@ void rtl_watchdog_wq_callback(void *data)
                         * we should reconnect this AP
                         */
                        if (rtlpriv->link_info.roam_times >= 5) {
-                               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                        "AP off, try to reconnect now\n");
+                               pr_err("AP off, try to reconnect now\n");
                                rtlpriv->link_info.roam_times = 0;
                                ieee80211_connection_loss(
                                        rtlpriv->mac80211.vif);
@@ -1731,6 +1735,93 @@ void rtl_fwevt_wq_callback(void *data)
 
        rtlpriv->cfg->ops->c2h_command_handle(hw);
 }
+
+void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       unsigned long flags;
+       struct rtl_c2hcmd *c2hcmd;
+
+       c2hcmd = kmalloc(sizeof(*c2hcmd), GFP_KERNEL);
+
+       if (!c2hcmd)
+               goto label_err;
+
+       c2hcmd->val = kmalloc(len, GFP_KERNEL);
+
+       if (!c2hcmd->val)
+               goto label_err2;
+
+       /* fill data */
+       c2hcmd->tag = tag;
+       c2hcmd->len = len;
+       memcpy(c2hcmd->val, val, len);
+
+       /* enqueue */
+       spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags);
+
+       list_add_tail(&c2hcmd->list, &rtlpriv->c2hcmd_list);
+
+       spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags);
+
+       /* wake up wq */
+       queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.c2hcmd_wq, 0);
+
+       return;
+
+label_err2:
+       kfree(c2hcmd);
+
+label_err:
+       RT_TRACE(rtlpriv, COMP_CMD, DBG_WARNING,
+                "C2H cmd enqueue fail.\n");
+}
+EXPORT_SYMBOL(rtl_c2hcmd_enqueue);
+
+void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       unsigned long flags;
+       struct rtl_c2hcmd *c2hcmd;
+       int i;
+
+       for (i = 0; i < 200; i++) {
+               /* dequeue a task */
+               spin_lock_irqsave(&rtlpriv->locks.c2hcmd_lock, flags);
+
+               c2hcmd = list_first_entry_or_null(&rtlpriv->c2hcmd_list,
+                                                 struct rtl_c2hcmd, list);
+
+               if (c2hcmd)
+                       list_del(&c2hcmd->list);
+
+               spin_unlock_irqrestore(&rtlpriv->locks.c2hcmd_lock, flags);
+
+               /* do it */
+               if (!c2hcmd)
+                       break;
+
+               if (rtlpriv->cfg->ops->c2h_content_parsing && exec)
+                       rtlpriv->cfg->ops->c2h_content_parsing(hw,
+                                       c2hcmd->tag, c2hcmd->len, c2hcmd->val);
+
+               /* free */
+               kfree(c2hcmd->val);
+
+               kfree(c2hcmd);
+       }
+}
+
+void rtl_c2hcmd_wq_callback(void *data)
+{
+       struct rtl_works *rtlworks = container_of_dwork_rtl(data,
+                                                           struct rtl_works,
+                                                           c2hcmd_wq);
+       struct ieee80211_hw *hw = rtlworks->hw;
+
+       rtl_c2hcmd_launcher(hw, 1);
+}
+
 void rtl_easy_concurrent_retrytimer_callback(unsigned long data)
 {
        struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
@@ -1886,8 +1977,7 @@ void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
                                                      (u8 *)&iotype);
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Unknown Scan Backup operation.\n");
+                       pr_err("Unknown Scan Backup operation.\n");
                        break;
                }
        }
@@ -2086,65 +2176,6 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
 }
 EXPORT_SYMBOL_GPL(rtl_recognize_peer);
 
-/*********************************************************
- *
- * sysfs functions
- *
- *********************************************************/
-static ssize_t rtl_show_debug_level(struct device *d,
-                                   struct device_attribute *attr, char *buf)
-{
-       struct ieee80211_hw *hw = dev_get_drvdata(d);
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
-       return sprintf(buf, "0x%08X\n", rtlpriv->dbg.global_debuglevel);
-}
-
-static ssize_t rtl_store_debug_level(struct device *d,
-                                    struct device_attribute *attr,
-                                    const char *buf, size_t count)
-{
-       struct ieee80211_hw *hw = dev_get_drvdata(d);
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       unsigned long val;
-       int ret;
-
-       ret = kstrtoul(buf, 0, &val);
-       if (ret) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
-                        "%s is not in hex or decimal form.\n", buf);
-       } else {
-               rtlpriv->dbg.global_debuglevel = val;
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
-                        "debuglevel:%x\n",
-                        rtlpriv->dbg.global_debuglevel);
-       }
-
-       return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
-                  rtl_show_debug_level, rtl_store_debug_level);
-
-static struct attribute *rtl_sysfs_entries[] = {
-
-       &dev_attr_debug_level.attr,
-
-       NULL
-};
-
-/*
- * "name" is folder name witch will be
- * put in device directory like :
- * sys/devices/pci0000:00/0000:00:1c.4/
- * 0000:06:00.0/rtl_sysfs
- */
-struct attribute_group rtl_attribute_group = {
-       .name = "rtlsysfs",
-       .attrs = rtl_sysfs_entries,
-};
-EXPORT_SYMBOL_GPL(rtl_attribute_group);
-
 MODULE_AUTHOR("lizhaoming      <chaoming_li@realsil.com.cn>");
 MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
 MODULE_AUTHOR("Larry Finger    <Larry.FInger@lwfinger.net>");
index 74233d601a909b12b6eb33dcd1793db28efca3da..02ff0c5624a773fa0119a616b3bb3e6be42bf0e5 100644 (file)
@@ -136,6 +136,9 @@ int rtl_rx_agg_stop(struct ieee80211_hw *hw,
                    struct ieee80211_sta *sta, u16 tid);
 void rtl_watchdog_wq_callback(void *data);
 void rtl_fwevt_wq_callback(void *data);
+void rtl_c2hcmd_wq_callback(void *data);
+void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec);
+void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, u8 tag, u8 len, u8 *val);
 
 void rtl_get_tcb_desc(struct ieee80211_hw *hw,
                      struct ieee80211_tx_info *info,
@@ -148,7 +151,6 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
 u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
 void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
 u8 rtl_tid_to_ac(u8 tid);
-extern struct attribute_group rtl_attribute_group;
 void rtl_easy_concurrent_retrytimer_callback(unsigned long data);
 extern struct rtl_global_var rtl_global_var;
 void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation);
index d1454d4f08a556998e5e7ea96f886ae7aaedfe4c..20582df0465cf7689e54b12a3aa50018a8071d01 100644 (file)
@@ -1,4 +1,8 @@
-btcoexist-objs :=      halbtc8723b2ant.o       \
+btcoexist-objs :=      halbtc8192e2ant.o       \
+                       halbtc8723b1ant.o       \
+                       halbtc8723b2ant.o       \
+                       halbtc8821a1ant.o       \
+                       halbtc8821a2ant.o       \
                        halbtcoutsrc.o          \
                        rtl_btc.o
 
index a30af6cc21f3c9a60d242a97d33a34794c296d25..ffa1f438424d44c67a9f848b1e43b2969be71737 100644 (file)
@@ -59,9 +59,11 @@ static u32 glcoex_ver_8192e_2ant = 0x34;
 /**************************************************************
  *   local function start with halbtc8192e2ant_
  **************************************************************/
-static u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh,
+static u8 halbtc8192e2ant_btrssi_state(struct btc_coexist *btcoexist,
+                                       u8 level_num, u8 rssi_thresh,
                                       u8 rssi_thresh1)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        int btrssi = 0;
        u8 btrssi_state = coex_sta->pre_bt_rssi_state;
 
@@ -70,84 +72,46 @@ static u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh,
        if (level_num == 2) {
                if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
                    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                   "BT Rssi pre state = LOW\n");
-                       if (btrssi >= (rssi_thresh +
-                                      BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+                       if (btrssi >=
+                           (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
                                btrssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "BT Rssi state switch to High\n");
-                       } else {
+                       else
                                btrssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "BT Rssi state stay at Low\n");
-                       }
                } else {
-                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                   "BT Rssi pre state = HIGH\n");
-                       if (btrssi < rssi_thresh) {
+                       if (btrssi < rssi_thresh)
                                btrssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "BT Rssi state switch to Low\n");
-                       } else {
+                       else
                                btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "BT Rssi state stay at High\n");
-                       }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                   "BT Rssi thresh error!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], BT Rssi thresh error!!\n");
                        return coex_sta->pre_bt_rssi_state;
                }
-
                if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
                    (coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                   "BT Rssi pre state = LOW\n");
-                       if (btrssi >= (rssi_thresh +
-                                     BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+                       if (btrssi >=
+                           (rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
                                btrssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "BT Rssi state switch to Medium\n");
-                       } else {
+                       else
                                btrssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "BT Rssi state stay at Low\n");
-                       }
                } else if ((coex_sta->pre_bt_rssi_state ==
                            BTC_RSSI_STATE_MEDIUM) ||
                           (coex_sta->pre_bt_rssi_state ==
                            BTC_RSSI_STATE_STAY_MEDIUM)) {
-                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                   "[BTCoex], BT Rssi pre state = MEDIUM\n");
                        if (btrssi >= (rssi_thresh1 +
-                                      BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+                                       BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
                                btrssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "BT Rssi state switch to High\n");
-                       } else if (btrssi < rssi_thresh) {
+                       else if (btrssi < rssi_thresh)
                                btrssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "BT Rssi state switch to Low\n");
-                       } else {
+                       else
                                btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "BT Rssi state stay at Medium\n");
-                       }
                } else {
-                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                   "BT Rssi pre state = HIGH\n");
-                       if (btrssi < rssi_thresh1) {
+                       if (btrssi < rssi_thresh1)
                                btrssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "BT Rssi state switch to Medium\n");
-                       } else {
+                       else
                                btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "BT Rssi state stay at High\n");
-                       }
                }
        }
 
@@ -160,6 +124,7 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
                                         u8 index, u8 level_num, u8 rssi_thresh,
                                         u8 rssi_thresh1)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        int wifirssi = 0;
        u8 wifirssi_state = coex_sta->pre_wifi_rssi_state[index];
 
@@ -171,30 +136,20 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
                    (coex_sta->pre_wifi_rssi_state[index] ==
                     BTC_RSSI_STATE_STAY_LOW)) {
                        if (wifirssi >= (rssi_thresh +
-                                        BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+                                        BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
                                wifirssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "wifi RSSI state switch to High\n");
-                       } else {
+                       else
                                wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "wifi RSSI state stay at Low\n");
-                       }
                } else {
-                       if (wifirssi < rssi_thresh) {
+                       if (wifirssi < rssi_thresh)
                                wifirssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "wifi RSSI state switch to Low\n");
-                       } else {
+                       else
                                wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "wifi RSSI state stay at High\n");
-                       }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                   "wifi RSSI thresh error!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], wifi RSSI thresh error!!\n");
                        return coex_sta->pre_wifi_rssi_state[index];
                }
 
@@ -203,43 +158,26 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
                    (coex_sta->pre_wifi_rssi_state[index] ==
                     BTC_RSSI_STATE_STAY_LOW)) {
                        if (wifirssi >= (rssi_thresh +
-                                        BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+                                        BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
                                wifirssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "wifi RSSI state switch to Medium\n");
-                       } else {
+                       else
                                wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "wifi RSSI state stay at Low\n");
-                       }
                } else if ((coex_sta->pre_wifi_rssi_state[index] ==
                            BTC_RSSI_STATE_MEDIUM) ||
                           (coex_sta->pre_wifi_rssi_state[index] ==
                            BTC_RSSI_STATE_STAY_MEDIUM)) {
                        if (wifirssi >= (rssi_thresh1 +
-                                        BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
+                                        BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT))
                                wifirssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "wifi RSSI state switch to High\n");
-                       } else if (wifirssi < rssi_thresh) {
+                       else if (wifirssi < rssi_thresh)
                                wifirssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "wifi RSSI state switch to Low\n");
-                       } else {
+                       else
                                wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "wifi RSSI state stay at Medium\n");
-                       }
                } else {
-                       if (wifirssi < rssi_thresh1) {
+                       if (wifirssi < rssi_thresh1)
                                wifirssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "wifi RSSI state switch to Medium\n");
-                       } else {
+                       else
                                wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "wifi RSSI state stay at High\n");
-                       }
                }
        }
 
@@ -250,6 +188,7 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
 
 static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        static bool pre_bt_disabled;
        static u32 bt_disable_cnt;
        bool bt_active = true, bt_disabled = false;
@@ -273,26 +212,26 @@ static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist)
                bt_disabled = false;
                btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
                                   &bt_disabled);
-               btc_alg_dbg(ALGO_BT_MONITOR,
-                           "[BTCoex], BT is enabled !!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BT is enabled !!\n");
        } else {
                bt_disable_cnt++;
-               btc_alg_dbg(ALGO_BT_MONITOR,
-                           "[BTCoex], bt all counters = 0, %d times!!\n",
-                           bt_disable_cnt);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bt all counters = 0, %d times!!\n",
+                        bt_disable_cnt);
                if (bt_disable_cnt >= 2) {
                        bt_disabled = true;
                        btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
                                           &bt_disabled);
-                       btc_alg_dbg(ALGO_BT_MONITOR,
-                                   "[BTCoex], BT is disabled !!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], BT is disabled !!\n");
                }
        }
        if (pre_bt_disabled != bt_disabled) {
-               btc_alg_dbg(ALGO_BT_MONITOR,
-                           "[BTCoex], BT is from %s to %s!!\n",
-                           (pre_bt_disabled ? "disabled" : "enabled"),
-                           (bt_disabled ? "disabled" : "enabled"));
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BT is from %s to %s!!\n",
+                        (pre_bt_disabled ? "disabled" : "enabled"),
+                        (bt_disabled ? "disabled" : "enabled"));
                pre_bt_disabled = bt_disabled;
        }
 }
@@ -469,6 +408,7 @@ static void halbtc8192e2ant_limited_rx(struct btc_coexist *btcoexist,
 
 static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
        u32 reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
 
@@ -488,12 +428,12 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
        coex_sta->low_priority_tx = reg_lp_tx;
        coex_sta->low_priority_rx = reg_lp_rx;
 
-       btc_alg_dbg(ALGO_BT_MONITOR,
-                   "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
-                   reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
-       btc_alg_dbg(ALGO_BT_MONITOR,
-                   "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
-                   reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+                reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+                reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
 
        /* reset counter */
        btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -501,15 +441,16 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
 
 static void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        coex_sta->c2h_bt_info_req_sent = true;
 
        h2c_parameter[0] |= BIT0;       /* trigger */
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
-                   h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+                h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
@@ -572,6 +513,7 @@ static void halbtc8192e2ant_update_btlink_info(struct btc_coexist *btcoexist)
 
 static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
        bool bt_hson = false;
@@ -581,8 +523,8 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
 
        if (!bt_link_info->bt_link_exist) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "No BT link exists!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "No BT link exists!!!\n");
                return algorithm;
        }
 
@@ -597,27 +539,29 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
 
        if (numdiffprofile == 1) {
                if (bt_link_info->sco_exist) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "SCO only\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "SCO only\n");
                        algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
                } else {
                        if (bt_link_info->hid_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "HID only\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "HID only\n");
                                algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "A2DP only\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "A2DP only\n");
                                algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hson) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "PAN(HS) only\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "PAN(HS) only\n");
                                        algorithm =
                                                BT_8192E_2ANT_COEX_ALGO_PANHS;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "PAN(EDR) only\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "PAN(EDR) only\n");
                                        algorithm =
                                                BT_8192E_2ANT_COEX_ALGO_PANEDR;
                                }
@@ -626,21 +570,23 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
        } else if (numdiffprofile == 2) {
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "SCO + HID\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "SCO + HID\n");
                                algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
                        } else if (bt_link_info->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "SCO + A2DP ==> SCO\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "SCO + A2DP ==> SCO\n");
                                algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hson) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "SCO + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "SCO + PAN(HS)\n");
                                        algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "SCO + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "SCO + PAN(EDR)\n");
                                        algorithm =
                                                BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
                                }
@@ -649,38 +595,44 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
                                if (stack_info->num_of_hid >= 2) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "HID*2 + A2DP\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "HID*2 + A2DP\n");
                                        algorithm =
                                        BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "HID + A2DP\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "HID + A2DP\n");
                                        algorithm =
                                            BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
                                }
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
                                if (bt_hson) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "HID + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "HID + PAN(HS)\n");
                                        algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "HID + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "HID + PAN(EDR)\n");
                                        algorithm =
                                            BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
                                if (bt_hson) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "A2DP + PAN(HS)\n");
                                        algorithm =
                                            BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "A2DP + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "A2DP + PAN(EDR)\n");
                                        algorithm =
                                            BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP;
                                }
@@ -690,30 +642,34 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "SCO + HID + A2DP ==> HID\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "SCO + HID + A2DP ==> HID\n");
                                algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
                                if (bt_hson) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "SCO + HID + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "SCO + HID + PAN(HS)\n");
                                        algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "SCO + HID + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "SCO + HID + PAN(EDR)\n");
                                        algorithm =
                                                BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
                                if (bt_hson) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "SCO + A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "SCO + A2DP + PAN(HS)\n");
                                        algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "SCO + A2DP + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "SCO + A2DP + PAN(EDR)\n");
                                        algorithm =
                                            BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -723,13 +679,15 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hson) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "HID + A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "HID + A2DP + PAN(HS)\n");
                                        algorithm =
                                            BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "HID + A2DP + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "HID + A2DP + PAN(EDR)\n");
                                        algorithm =
                                        BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
                                }
@@ -741,12 +699,14 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hson) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "ErrorSCO+HID+A2DP+PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "ErrorSCO+HID+A2DP+PAN(HS)\n");
 
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "SCO+HID+A2DP+PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "SCO+HID+A2DP+PAN(EDR)\n");
                                        algorithm =
                                            BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -760,6 +720,7 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
 static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
                                                 u8 dac_swinglvl)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        /* There are several type of dacswing
@@ -767,10 +728,10 @@ static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
         */
        h2c_parameter[0] = dac_swinglvl;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl);
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
 }
@@ -778,13 +739,14 @@ static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
                                            u8 dec_btpwr_lvl)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        h2c_parameter[0] = dec_btpwr_lvl;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
-                   dec_btpwr_lvl, h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
+                dec_btpwr_lvl, h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
 }
@@ -792,15 +754,17 @@ static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist,
                                      bool force_exec, u8 dec_btpwr_lvl)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s Dec BT power level = %d\n",
-                   (force_exec ? "force to" : ""), dec_btpwr_lvl);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s Dec BT power level = %d\n",
+                force_exec ? "force to" : "", dec_btpwr_lvl);
        coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
-                           coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
+                        coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
        }
        halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr);
 
@@ -810,6 +774,7 @@ static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist,
                                              bool enable_autoreport)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        h2c_parameter[0] = 0;
@@ -817,10 +782,10 @@ static void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist,
        if (enable_autoreport)
                h2c_parameter[0] |= BIT0;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
-                   (enable_autoreport ? "Enabled!!" : "Disabled!!"),
-                   h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+                (enable_autoreport ? "Enabled!!" : "Disabled!!"),
+                h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
 }
@@ -829,17 +794,19 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
                                          bool force_exec,
                                          bool enable_autoreport)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s BT Auto report = %s\n",
-                   (force_exec ? "force to" : ""),
-                   ((enable_autoreport) ? "Enabled" : "Disabled"));
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s BT Auto report = %s\n",
+                (force_exec ? "force to" : ""),
+                ((enable_autoreport) ? "Enabled" : "Disabled"));
        coex_dm->cur_bt_auto_report = enable_autoreport;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
-                           coex_dm->pre_bt_auto_report,
-                           coex_dm->cur_bt_auto_report);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
+                        coex_dm->pre_bt_auto_report,
+                        coex_dm->cur_bt_auto_report);
 
                if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
                        return;
@@ -853,16 +820,18 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
                                            bool force_exec, u8 fw_dac_swinglvl)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s set FW Dac Swing level = %d\n",
-                   (force_exec ? "force to" : ""), fw_dac_swinglvl);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s set FW Dac Swing level = %d\n",
+                (force_exec ? "force to" : ""), fw_dac_swinglvl);
        coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
-                           coex_dm->pre_fw_dac_swing_lvl,
-                           coex_dm->cur_fw_dac_swing_lvl);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+                        coex_dm->pre_fw_dac_swing_lvl,
+                        coex_dm->cur_fw_dac_swing_lvl);
 
                if (coex_dm->pre_fw_dac_swing_lvl ==
                    coex_dm->cur_fw_dac_swing_lvl)
@@ -878,10 +847,12 @@ static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
 static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
                                                 bool rx_rf_shrink_on)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (rx_rf_shrink_on) {
                /* Shrink RF Rx LPF corner */
-               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                           "[BTCoex], Shrink RF Rx LPF corner!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Shrink RF Rx LPF corner!!\n");
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
                                          0xfffff, 0xffffc);
        } else {
@@ -889,8 +860,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
                 * After initialized, we can use coex_dm->btRf0x1eBackup
                 */
                if (btcoexist->initilized) {
-                       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                                   "[BTCoex], Resume RF Rx LPF corner!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Resume RF Rx LPF corner!!\n");
                        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
                                                  0xfffff,
                                                  coex_dm->bt_rf0x1e_backup);
@@ -901,17 +872,19 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
                                      bool force_exec, bool rx_rf_shrink_on)
 {
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s turn Rx RF Shrink = %s\n",
-                   (force_exec ? "force to" : ""),
-                   ((rx_rf_shrink_on) ? "ON" : "OFF"));
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn Rx RF Shrink = %s\n",
+                (force_exec ? "force to" : ""),
+                ((rx_rf_shrink_on) ? "ON" : "OFF"));
        coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
-                           coex_dm->pre_rf_rx_lpf_shrink,
-                           coex_dm->cur_rf_rx_lpf_shrink);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
+                        coex_dm->pre_rf_rx_lpf_shrink,
+                        coex_dm->cur_rf_rx_lpf_shrink);
 
                if (coex_dm->pre_rf_rx_lpf_shrink ==
                    coex_dm->cur_rf_rx_lpf_shrink)
@@ -926,10 +899,11 @@ static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist,
                                             u32 level)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 val = (u8)level;
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Write SwDacSwing = 0x%x\n", level);
        btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
 }
 
@@ -947,22 +921,24 @@ static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist,
                                     bool force_exec, bool dac_swingon,
                                     u32 dac_swinglvl)
 {
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n",
-                   (force_exec ? "force to" : ""),
-                   ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n",
+                (force_exec ? "force to" : ""),
+                ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl);
        coex_dm->cur_dac_swing_on = dac_swingon;
        coex_dm->cur_dac_swing_lvl = dac_swinglvl;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
-                           coex_dm->pre_dac_swing_on,
-                           coex_dm->pre_dac_swing_lvl);
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
-                           coex_dm->cur_dac_swing_on,
-                           coex_dm->cur_dac_swing_lvl);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
+                        coex_dm->pre_dac_swing_on,
+                        coex_dm->pre_dac_swing_lvl);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
+                        coex_dm->cur_dac_swing_on,
+                        coex_dm->cur_dac_swing_lvl);
 
                if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
                    (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -978,10 +954,12 @@ static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
                                          bool agc_table_en)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        /* BB AGC Gain Table */
        if (agc_table_en) {
-               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                           "[BTCoex], BB Agc Table On!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BB Agc Table On!\n");
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001);
@@ -989,8 +967,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001);
        } else {
-               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                           "[BTCoex], BB Agc Table Off!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BB Agc Table Off!\n");
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -1003,17 +981,19 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist,
                                     bool force_exec, bool agc_table_en)
 {
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s %s Agc Table\n",
-                   (force_exec ? "force to" : ""),
-                   ((agc_table_en) ? "Enable" : "Disable"));
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s %s Agc Table\n",
+                (force_exec ? "force to" : ""),
+                ((agc_table_en) ? "Enable" : "Disable"));
        coex_dm->cur_agc_table_en = agc_table_en;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
-                           coex_dm->pre_agc_table_en,
-                           coex_dm->cur_agc_table_en);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+                        coex_dm->pre_agc_table_en,
+                        coex_dm->cur_agc_table_en);
 
                if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
                        return;
@@ -1027,20 +1007,22 @@ static void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
                                           u32 val0x6c0, u32 val0x6c4,
                                           u32 val0x6c8, u8 val0x6cc)
 {
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
        btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
        btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
        btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
        btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
@@ -1049,30 +1031,32 @@ static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist,
                                       u32 val0x6c0, u32 val0x6c4,
                                       u32 val0x6c8, u8 val0x6cc)
 {
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
-                   (force_exec ? "force to" : ""), val0x6c0);
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
-                   val0x6c4, val0x6c8, val0x6cc);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
+                (force_exec ? "force to" : ""), val0x6c0);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+                val0x6c4, val0x6c8, val0x6cc);
        coex_dm->cur_val0x6c0 = val0x6c0;
        coex_dm->cur_val0x6c4 = val0x6c4;
        coex_dm->cur_val0x6c8 = val0x6c8;
        coex_dm->cur_val0x6cc = val0x6cc;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
-                           coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
-                           coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n",
-                           coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
-                           coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
+                        coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
+                        coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n",
+                        coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
+                        coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
 
                if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
                    (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1121,14 +1105,15 @@ static void btc8192e2ant_coex_tbl_w_type(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
                                                  bool enable)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        if (enable)
                h2c_parameter[0] |= BIT0; /* function enable */
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
-                   h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+                h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
 }
@@ -1136,18 +1121,20 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
                                          bool force_exec, bool enable)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s turn Ignore WlanAct %s\n",
-                   (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn Ignore WlanAct %s\n",
+                (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
        coex_dm->cur_ignore_wlan_act = enable;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], bPreIgnoreWlanAct = %d ",
-                           coex_dm->pre_ignore_wlan_act);
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "bCurIgnoreWlanAct = %d!!\n",
-                           coex_dm->cur_ignore_wlan_act);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bPreIgnoreWlanAct = %d ",
+                        coex_dm->pre_ignore_wlan_act);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "bCurIgnoreWlanAct = %d!!\n",
+                        coex_dm->cur_ignore_wlan_act);
 
                if (coex_dm->pre_ignore_wlan_act ==
                    coex_dm->cur_ignore_wlan_act)
@@ -1161,6 +1148,8 @@ static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1,
                                        u8 byte2, u8 byte3, u8 byte4, u8 byte5)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        u8 h2c_parameter[5] = {0};
 
        h2c_parameter[0] = byte1;
@@ -1175,11 +1164,11 @@ static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1,
        coex_dm->ps_tdma_para[3] = byte4;
        coex_dm->ps_tdma_para[4] = byte5;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
-                   h2c_parameter[0],
-                   h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
-                   h2c_parameter[3] << 8 | h2c_parameter[4]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+                h2c_parameter[0],
+                h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+                h2c_parameter[3] << 8 | h2c_parameter[4]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
@@ -1203,20 +1192,22 @@ static void btc8192e2ant_sw_mec2(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
                                    bool force_exec, bool turn_on, u8 type)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s turn %s PS TDMA, type=%d\n",
-                   (force_exec ? "force to" : ""),
-                   (turn_on ? "ON" : "OFF"), type);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+                (force_exec ? "force to" : ""),
+                (turn_on ? "ON" : "OFF"), type);
        coex_dm->cur_ps_tdma_on = turn_on;
        coex_dm->cur_ps_tdma = type;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
-                           coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
-                           coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+                        coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+                        coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
 
                if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
                    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1340,11 +1331,12 @@ static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
                                              u8 sstype)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 mimops = BTC_MIMO_PS_DYNAMIC;
        u32 disra_mask = 0x0;
 
-       btc_alg_dbg(ALGO_TRACE,
-                   "[BTCoex], REAL set SS Type = %d\n", sstype);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], REAL set SS Type = %d\n", sstype);
 
        disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype,
                                                   coex_dm->curra_masktype);
@@ -1376,9 +1368,11 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
 static void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist,
                                          bool force_exec, u8 new_sstype)
 {
-       btc_alg_dbg(ALGO_TRACE,
-                   "[BTCoex], %s Switch SS Type = %d\n",
-                   (force_exec ? "force to" : ""), new_sstype);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s Switch SS Type = %d\n",
+                (force_exec ? "force to" : ""), new_sstype);
        coex_dm->cur_sstype = new_sstype;
 
        if (!force_exec) {
@@ -1440,6 +1434,7 @@ static void halbtc8192e2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
 
 static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
        bool common = false, wifi_connected = false, wifi_busy = false;
        bool bt_hson = false, low_pwr_disable = false;
@@ -1459,8 +1454,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
                                   &low_pwr_disable);
 
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Wifi non-connected idle!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi non-connected idle!!\n");
 
                if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
                     coex_dm->bt_status) ||
@@ -1496,8 +1491,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
                                           BTC_SET_ACT_DISABLE_LOW_POWER,
                                           &low_pwr_disable);
 
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "Wifi connected + BT non connected-idle!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "Wifi connected + BT non connected-idle!!\n");
 
                        halbtc8192e2ant_switch_sstype(btcoexist,
                                                      NORMAL_EXEC, 2);
@@ -1524,8 +1519,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
 
                        if (bt_hson)
                                return false;
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "Wifi connected + BT connected-idle!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "Wifi connected + BT connected-idle!!\n");
 
                        halbtc8192e2ant_switch_sstype(btcoexist,
                                                      NORMAL_EXEC, 2);
@@ -1550,12 +1545,12 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
                                           &low_pwr_disable);
 
                        if (wifi_busy) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "Wifi Connected-Busy + BT Busy!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "Wifi Connected-Busy + BT Busy!!\n");
                                common = false;
                        } else {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "Wifi Connected-Idle + BT Busy!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "Wifi Connected-Idle + BT Busy!!\n");
 
                                halbtc8192e2ant_switch_sstype(btcoexist,
                                                              NORMAL_EXEC, 1);
@@ -1581,9 +1576,11 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
 static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
                          int result)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (tx_pause) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 1\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 1\n");
 
                if (coex_dm->cur_ps_tdma == 71) {
                        halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1678,8 +1675,8 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 0\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 71);
@@ -1782,9 +1779,11 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
 static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
                          int result)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (tx_pause) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 1\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 1\n");
                if (coex_dm->cur_ps_tdma == 1) {
                        halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 6);
@@ -1873,8 +1872,8 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 0\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 2);
@@ -1968,9 +1967,11 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
 static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause,
                          int result)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (tx_pause) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 1\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 1\n");
                if (coex_dm->cur_ps_tdma == 1) {
                        halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 7);
@@ -2059,8 +2060,8 @@ static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 0\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 3);
@@ -2155,6 +2156,7 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                                 bool sco_hid, bool tx_pause,
                                                 u8 max_interval)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        static int up, dn, m, n, wait_cnt;
        /* 0: no change, +1: increase WiFi duration,
         * -1: decrease WiFi duration
@@ -2162,13 +2164,13 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
        int result;
        u8 retry_cnt = 0;
 
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], TdmaDurationAdjust()\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], TdmaDurationAdjust()\n");
 
        if (!coex_dm->auto_tdma_adjust) {
                coex_dm->auto_tdma_adjust = true;
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], first run TdmaDurationAdjust()!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], first run TdmaDurationAdjust()!!\n");
                if (sco_hid) {
                        if (tx_pause) {
                                if (max_interval == 1) {
@@ -2181,11 +2183,6 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                                                NORMAL_EXEC,
                                                                true, 14);
                                        coex_dm->tdma_adj_type = 14;
-                               } else if (max_interval == 3) {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 15);
-                                       coex_dm->tdma_adj_type = 15;
                                } else {
                                        halbtc8192e2ant_ps_tdma(btcoexist,
                                                                NORMAL_EXEC,
@@ -2203,11 +2200,6 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                                                NORMAL_EXEC,
                                                                true, 10);
                                        coex_dm->tdma_adj_type = 10;
-                               } else if (max_interval == 3) {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 11);
-                                       coex_dm->tdma_adj_type = 11;
                                } else {
                                        halbtc8192e2ant_ps_tdma(btcoexist,
                                                                NORMAL_EXEC,
@@ -2227,11 +2219,6 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                                                NORMAL_EXEC,
                                                                true, 6);
                                        coex_dm->tdma_adj_type = 6;
-                               } else if (max_interval == 3) {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 7);
-                                       coex_dm->tdma_adj_type = 7;
                                } else {
                                        halbtc8192e2ant_ps_tdma(btcoexist,
                                                                NORMAL_EXEC,
@@ -2249,11 +2236,6 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                                                NORMAL_EXEC,
                                                                true, 2);
                                        coex_dm->tdma_adj_type = 2;
-                               } else if (max_interval == 3) {
-                                       halbtc8192e2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 3);
-                                       coex_dm->tdma_adj_type = 3;
                                } else {
                                        halbtc8192e2ant_ps_tdma(btcoexist,
                                                                NORMAL_EXEC,
@@ -2272,11 +2254,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
        } else {
                /* accquire the BT TRx retry count from BT_Info byte2 */
                retry_cnt = coex_sta->bt_retry_cnt;
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], retry_cnt = %d\n", retry_cnt);
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
-                           up, dn, m, n, wait_cnt);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], retry_cnt = %d\n", retry_cnt);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
+                        up, dn, m, n, wait_cnt);
                result = 0;
                wait_cnt++;
                /* no retry in the last 2-second duration */
@@ -2293,8 +2275,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                up = 0;
                                dn = 0;
                                result = 1;
-                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                           "[BTCoex]Increase wifi duration!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex]Increase wifi duration!!\n");
                        }
                } else if (retry_cnt <= 3) {
                        up--;
@@ -2317,8 +2299,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                dn = 0;
                                wait_cnt = 0;
                                result = -1;
-                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                           "Reduce wifi duration for retry<3\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "Reduce wifi duration for retry<3\n");
                        }
                } else {
                        if (wait_cnt == 1)
@@ -2334,12 +2316,12 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                        dn = 0;
                        wait_cnt = 0;
                        result = -1;
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "Decrease wifi duration for retryCounter>3!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "Decrease wifi duration for retryCounter>3!!\n");
                }
 
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], max Interval = %d\n", max_interval);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], max Interval = %d\n", max_interval);
                if (max_interval == 1)
                        btc8192e_int1(btcoexist, tx_pause, result);
                else if (max_interval == 2)
@@ -2355,11 +2337,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
        if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
                bool scan = false, link = false, roam = false;
 
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], PsTdma type dismatch!!!, ");
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "curPsTdma=%d, recordPsTdma=%d\n",
-                           coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], PsTdma type dismatch!!!, ");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "curPsTdma=%d, recordPsTdma=%d\n",
+                        coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
 
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2370,8 +2352,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                                true,
                                                coex_dm->tdma_adj_type);
                else
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
        }
 }
 
@@ -2390,7 +2372,7 @@ static void halbtc8192e2ant_action_sco(struct btc_coexist *btcoexist)
 
        btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 4);
 
-       btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
 
        if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
            (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
@@ -2452,7 +2434,7 @@ static void halbtc8192e2ant_action_sco_pan(struct btc_coexist *btcoexist)
 
        btc8192e2ant_coex_tbl_w_type(btcoexist, NORMAL_EXEC, 4);
 
-       btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
 
        if ((btrssi_state == BTC_RSSI_STATE_LOW) ||
            (btrssi_state == BTC_RSSI_STATE_STAY_LOW)) {
@@ -2506,7 +2488,7 @@ static void halbtc8192e2ant_action_hid(struct btc_coexist *btcoexist)
        u32 wifi_bw;
 
        wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
 
        halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
        halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
@@ -2564,19 +2546,20 @@ static void halbtc8192e2ant_action_hid(struct btc_coexist *btcoexist)
 /* A2DP only / PAN(EDR) only/ A2DP+PAN(HS) */
 static void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 wifirssi_state, btrssi_state = BTC_RSSI_STATE_HIGH;
        u32 wifi_bw;
        bool long_dist = false;
 
        wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
 
        if ((btrssi_state == BTC_RSSI_STATE_LOW ||
             btrssi_state == BTC_RSSI_STATE_STAY_LOW) &&
            (wifirssi_state == BTC_RSSI_STATE_LOW ||
             wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
                long_dist = true;
        }
        if (long_dist) {
@@ -2656,7 +2639,7 @@ static void halbtc8192e2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
        u32 wifi_bw;
 
        wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
 
        halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
        halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
@@ -2717,7 +2700,7 @@ static void halbtc8192e2ant_action_pan_edr(struct btc_coexist *btcoexist)
        u32 wifi_bw;
 
        wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
 
        halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
        halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
@@ -2778,7 +2761,7 @@ static void halbtc8192e2ant_action_pan_hs(struct btc_coexist *btcoexist)
        u32 wifi_bw;
 
        wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
 
        halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
        halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
@@ -2836,7 +2819,7 @@ static void halbtc8192e2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
        u32 wifi_bw;
 
        wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
 
        halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
        halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
@@ -2899,7 +2882,7 @@ static void halbtc8192e2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
        u32 wifi_bw;
 
        wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
@@ -2963,7 +2946,7 @@ static void btc8192e2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
        u32 wifi_bw;
 
        wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
 
        halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
        halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
@@ -3024,7 +3007,7 @@ static void halbtc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
        u32 wifi_bw;
 
        wifirssi_state = halbtc8192e2ant_wifirssi_state(btcoexist, 0, 2, 15, 0);
-       btrssi_state = halbtc8192e2ant_btrssi_state(3, 34, 42);
+       btrssi_state = halbtc8192e2ant_btrssi_state(btcoexist, 3, 34, 42);
 
        halbtc8192e2ant_switch_sstype(btcoexist, NORMAL_EXEC, 1);
        halbtc8192e2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false, 0x8);
@@ -3079,107 +3062,108 @@ static void halbtc8192e2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
 
 static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 algorithm = 0;
 
-       btc_alg_dbg(ALGO_TRACE,
-                   "[BTCoex], RunCoexistMechanism()===>\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], RunCoexistMechanism()===>\n");
 
        if (btcoexist->manual_control) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], return for Manual CTRL <===\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], return for Manual CTRL <===\n");
                return;
        }
 
        if (coex_sta->under_ips) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], wifi is under IPS !!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], wifi is under IPS !!!\n");
                return;
        }
 
        algorithm = halbtc8192e2ant_action_algorithm(btcoexist);
        if (coex_sta->c2h_bt_inquiry_page &&
            (BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BT is under inquiry/page scan !!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BT is under inquiry/page scan !!\n");
                halbtc8192e2ant_action_bt_inquiry(btcoexist);
                return;
        }
 
        coex_dm->cur_algorithm = algorithm;
-       btc_alg_dbg(ALGO_TRACE,
-                   "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
 
        if (halbtc8192e2ant_is_common_action(btcoexist)) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Action 2-Ant common\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Action 2-Ant common\n");
                coex_dm->auto_tdma_adjust = false;
        } else {
                if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
-                                   coex_dm->pre_algorithm,
-                                   coex_dm->cur_algorithm);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
+                                coex_dm->pre_algorithm,
+                                coex_dm->cur_algorithm);
                        coex_dm->auto_tdma_adjust = false;
                }
                switch (coex_dm->cur_algorithm) {
                case BT_8192E_2ANT_COEX_ALGO_SCO:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "Action 2-Ant, algorithm = SCO\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "Action 2-Ant, algorithm = SCO\n");
                        halbtc8192e2ant_action_sco(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_SCO_PAN:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "Action 2-Ant, algorithm = SCO+PAN(EDR)\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "Action 2-Ant, algorithm = SCO+PAN(EDR)\n");
                        halbtc8192e2ant_action_sco_pan(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_HID:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "Action 2-Ant, algorithm = HID\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "Action 2-Ant, algorithm = HID\n");
                        halbtc8192e2ant_action_hid(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_A2DP:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "Action 2-Ant, algorithm = A2DP\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "Action 2-Ant, algorithm = A2DP\n");
                        halbtc8192e2ant_action_a2dp(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
                        halbtc8192e2ant_action_a2dp_pan_hs(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_PANEDR:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "Action 2-Ant, algorithm = PAN(EDR)\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "Action 2-Ant, algorithm = PAN(EDR)\n");
                        halbtc8192e2ant_action_pan_edr(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_PANHS:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "Action 2-Ant, algorithm = HS mode\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "Action 2-Ant, algorithm = HS mode\n");
                        halbtc8192e2ant_action_pan_hs(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "Action 2-Ant, algorithm = PAN+A2DP\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "Action 2-Ant, algorithm = PAN+A2DP\n");
                        halbtc8192e2ant_action_pan_edr_a2dp(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "Action 2-Ant, algorithm = PAN(EDR)+HID\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "Action 2-Ant, algorithm = PAN(EDR)+HID\n");
                        halbtc8192e2ant_action_pan_edr_hid(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "Action 2-Ant, algorithm = HID+A2DP+PAN\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "Action 2-Ant, algorithm = HID+A2DP+PAN\n");
                        btc8192e2ant_action_hid_a2dp_pan_edr(btcoexist);
                        break;
                case BT_8192E_2ANT_COEX_ALGO_HID_A2DP:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "Action 2-Ant, algorithm = HID+A2DP\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "Action 2-Ant, algorithm = HID+A2DP\n");
                        halbtc8192e2ant_action_hid_a2dp(btcoexist);
                        break;
                default:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "Action 2-Ant, algorithm = unknown!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "Action 2-Ant, algorithm = unknown!!\n");
                        /* halbtc8192e2ant_coex_alloff(btcoexist); */
                        break;
                }
@@ -3190,11 +3174,12 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
 static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
                                          bool backup)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u16 u16tmp = 0;
        u8 u8tmp = 0;
 
-       btc_iface_dbg(INTF_INIT,
-                     "[BTCoex], 2Ant Init HW Config!!\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], 2Ant Init HW Config!!\n");
 
        if (backup) {
                /* backup rf 0x1e value */
@@ -3277,8 +3262,10 @@ void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist)
 
 void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
-       btc_iface_dbg(INTF_INIT,
-                     "[BTCoex], Coex Mechanism Init!!\n");
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Coex Mechanism Init!!\n");
        halbtc8192e2ant_init_coex_dm(btcoexist);
 }
 
@@ -3298,13 +3285,13 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
        u32 fw_ver = 0, bt_patch_ver = 0;
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n ============[BT Coexist info]============");
+                "\r\n ============[BT Coexist info]============");
 
        if (btcoexist->manual_control) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n ===========[Under Manual Control]===========");
+                        "\r\n ===========[Under Manual Control]===========");
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n ==========================================");
+                        "\r\n ==========================================");
        }
 
        if (!board_info->bt_exist) {
@@ -3313,43 +3300,43 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
        }
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
+                "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
                   board_info->pg_ant_num, board_info->btdm_ant_num);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
-                  "BT stack/ hci ext ver",
+                "BT stack/ hci ext ver",
                   ((stack_info->profile_notified) ? "Yes" : "No"),
                   stack_info->hci_version);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
-                  "CoexVer/ FwVer/ PatchVer",
-                  glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
-                  fw_ver, bt_patch_ver, bt_patch_ver);
+                "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
+                "CoexVer/ FwVer/ PatchVer",
+                glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+                fw_ver, bt_patch_ver, bt_patch_ver);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
                           &wifi_dot11_chnl);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
-                  "Dot11 channel / HsMode(HsChnl)",
-                  wifi_dot11_chnl, bt_hson, wifi_hs_chnl);
+                "Dot11 channel / HsMode(HsChnl)",
+                wifi_dot11_chnl, bt_hson, wifi_hs_chnl);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
-                  "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
+                "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
 
        btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifirssi);
        btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                  "Wifi rssi/ HS rssi", wifirssi, bt_hs_rssi);
+                "Wifi rssi/ HS rssi", wifirssi, bt_hs_rssi);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
-                  "Wifi link/ roam/ scan", link, roam, scan);
+                "Wifi link/ roam/ scan", link, roam, scan);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
@@ -3357,7 +3344,7 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
                           &wifi_traffic_dir);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
-                  "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+                "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
                   ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
                        (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
                   ((!wifi_busy) ? "idle" :
@@ -3365,7 +3352,7 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
                                "uplink" : "downlink")));
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ",
-                  "BT [status/ rssi/ retryCnt]",
+                "BT [status/ rssi/ retryCnt]",
                   ((btcoexist->bt_info.bt_disabled) ? ("disabled") :
                    ((coex_sta->c2h_bt_inquiry_page) ?
                     ("inquiry/page scan") :
@@ -3376,127 +3363,127 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
                   coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d",
-                  "SCO/HID/PAN/A2DP", stack_info->sco_exist,
+                "SCO/HID/PAN/A2DP", stack_info->sco_exist,
                   stack_info->hid_exist, stack_info->pan_exist,
                   stack_info->a2dp_exist);
        btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
 
        bt_info_ext = coex_sta->bt_info_ext;
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
-                  "BT Info A2DP rate",
+                "BT Info A2DP rate",
                   (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
 
        for (i = 0; i < BT_INFO_SRC_8192E_2ANT_MAX; i++) {
                if (coex_sta->bt_info_c2h_cnt[i]) {
                        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                                  "\r\n %-35s = %7ph(%d)",
-                                  GLBtInfoSrc8192e2Ant[i],
-                                  coex_sta->bt_info_c2h[i],
-                                  coex_sta->bt_info_c2h_cnt[i]);
+                                "\r\n %-35s = %7ph(%d)",
+                                GLBtInfoSrc8192e2Ant[i],
+                                coex_sta->bt_info_c2h[i],
+                                coex_sta->bt_info_c2h_cnt[i]);
                }
        }
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
-                  "PS state, IPS/LPS",
-                  ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-                  ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+                "PS state, IPS/LPS",
+                ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+                ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
        btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "SS Type",
-                  coex_dm->cur_sstype);
+                coex_dm->cur_sstype);
 
        /* Sw mechanism */
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                  "============[Sw mechanism]============");
+                "============[Sw mechanism]============");
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
-                  "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
-                  coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+                "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+                coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
-                  "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
-                  coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
-                  coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+                "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+                coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+                coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ", "Rate Mask",
-                  btcoexist->bt_info.ra_mask);
+                btcoexist->bt_info.ra_mask);
 
        /* Fw mechanism */
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                  "============[Fw mechanism]============");
+                "============[Fw mechanism]============");
 
        ps_tdma_case = coex_dm->cur_ps_tdma;
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %5ph case-%d (auto:%d)",
-                  "PS TDMA", coex_dm->ps_tdma_para,
-                  ps_tdma_case, coex_dm->auto_tdma_adjust);
+                "\r\n %-35s = %5ph case-%d (auto:%d)",
+                "PS TDMA", coex_dm->ps_tdma_para,
+                ps_tdma_case, coex_dm->auto_tdma_adjust);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
-                  "DecBtPwr/ IgnWlanAct",
-                  coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
+                "DecBtPwr/ IgnWlanAct",
+                coex_dm->cur_dec_bt_pwr, coex_dm->cur_ignore_wlan_act);
 
        /* Hw setting */
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                  "============[Hw setting]============");
+                "============[Hw setting]============");
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
-                  "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+                "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-                  "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
-                  coex_dm->backup_arfr_cnt2, coex_dm->backup_retrylimit,
-                  coex_dm->backup_ampdu_maxtime);
+                "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+                coex_dm->backup_arfr_cnt2, coex_dm->backup_retrylimit,
+                coex_dm->backup_ampdu_maxtime);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
        u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-                  "0x430/0x434/0x42a/0x456",
-                  u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+                "0x430/0x434/0x42a/0x456",
+                u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc04);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xd04);
        u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x90c);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                  "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]);
+                "0xc04/ 0xd04/ 0x90c", u32tmp[0], u32tmp[1], u32tmp[2]);
 
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0x778",
-                  u8tmp[0]);
+                u8tmp[0]);
 
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x92c);
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x930);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                  "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]);
+                "0x92c/ 0x930", (u8tmp[0]), u32tmp[0]);
 
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
        u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x4f);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                  "0x40/ 0x4f", u8tmp[0], u8tmp[1]);
+                "0x40/ 0x4f", u8tmp[0], u8tmp[1]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                  "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+                "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x", "0xc50(dig)",
-                  u32tmp[0]);
+                u32tmp[0]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
        u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
-                  "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
-                  u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+                "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+                "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+                u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                  "0x770(hp rx[31:16]/tx[15:0])",
-                  coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+                "0x770(hp rx[31:16]/tx[15:0])",
+                coex_sta->high_priority_rx, coex_sta->high_priority_tx);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                  "0x774(lp rx[31:16]/tx[15:0])",
-                  coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+                "0x774(lp rx[31:16]/tx[15:0])",
+                coex_sta->low_priority_rx, coex_sta->low_priority_tx);
 #if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 1)
        halbtc8192e2ant_monitor_bt_ctr(btcoexist);
 #endif
@@ -3505,54 +3492,63 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
 
 void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (BTC_IPS_ENTER == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], IPS ENTER notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], IPS ENTER notify\n");
                coex_sta->under_ips = true;
                halbtc8192e2ant_coex_alloff(btcoexist);
        } else if (BTC_IPS_LEAVE == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], IPS LEAVE notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], IPS LEAVE notify\n");
                coex_sta->under_ips = false;
        }
 }
 
 void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (BTC_LPS_ENABLE == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], LPS ENABLE notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], LPS ENABLE notify\n");
                coex_sta->under_lps = true;
        } else if (BTC_LPS_DISABLE == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], LPS DISABLE notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], LPS DISABLE notify\n");
                coex_sta->under_lps = false;
        }
 }
 
 void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (BTC_SCAN_START == type)
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], SCAN START notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], SCAN START notify\n");
        else if (BTC_SCAN_FINISH == type)
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], SCAN FINISH notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], SCAN FINISH notify\n");
 }
 
 void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (BTC_ASSOCIATE_START == type)
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], CONNECT START notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CONNECT START notify\n");
        else if (BTC_ASSOCIATE_FINISH == type)
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], CONNECT FINISH notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CONNECT FINISH notify\n");
 }
 
 void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
                                            u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[3] = {0};
        u32 wifi_bw;
        u8 wifi_center_chnl;
@@ -3563,11 +3559,11 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
                return;
 
        if (BTC_MEDIA_CONNECT == type)
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], MEDIA connect notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], MEDIA connect notify\n");
        else
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], MEDIA disconnect notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], MEDIA disconnect notify\n");
 
        /* only 2.4G we need to inform bt the chnl mask */
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -3587,10 +3583,10 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
        coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
        coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], FW write 0x66 = 0x%x\n",
-                   h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
-                   h2c_parameter[2]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], FW write 0x66 = 0x%x\n",
+                h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+                h2c_parameter[2]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
@@ -3598,14 +3594,17 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
 void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
                                              u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (type == BTC_PACKET_DHCP)
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], DHCP Packet notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], DHCP Packet notify\n");
 }
 
 void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
                                       u8 *tmp_buf, u8 length)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 bt_info = 0;
        u8 i, rsp_source = 0;
        bool bt_busy = false, limited_dig = false;
@@ -3618,19 +3617,19 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
                rsp_source = BT_INFO_SRC_8192E_2ANT_WIFI_FW;
        coex_sta->bt_info_c2h_cnt[rsp_source]++;
 
-       btc_iface_dbg(INTF_NOTIFY,
-                     "[BTCoex], Bt info[%d], length=%d, hex data = [",
-                     rsp_source, length);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Bt info[%d], length=%d, hex data = [",
+                rsp_source, length);
        for (i = 0; i < length; i++) {
                coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
                if (i == 1)
                        bt_info = tmp_buf[i];
                if (i == length-1)
-                       btc_iface_dbg(INTF_NOTIFY,
-                                     "0x%02x]\n", tmp_buf[i]);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "0x%02x]\n", tmp_buf[i]);
                else
-                       btc_iface_dbg(INTF_NOTIFY,
-                                     "0x%02x, ", tmp_buf[i]);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "0x%02x, ", tmp_buf[i]);
        }
 
        if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) {
@@ -3647,8 +3646,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
                 * because bt is reset and loss of the info.
                 */
                if ((coex_sta->bt_info_ext & BIT1)) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "bit1, send wifi BW&Chnl to BT!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "bit1, send wifi BW&Chnl to BT!!\n");
                        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                                           &wifi_connected);
                        if (wifi_connected)
@@ -3664,8 +3663,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
                if ((coex_sta->bt_info_ext & BIT3)) {
                        if (!btcoexist->manual_control &&
                            !btcoexist->stop_coex_dm) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "bit3, BT NOT ignore Wlan active!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "bit3, BT NOT ignore Wlan active!\n");
                                halbtc8192e2ant_IgnoreWlanAct(btcoexist,
                                                              FORCE_EXEC,
                                                              false);
@@ -3723,25 +3722,25 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
 
        if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) {
                coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BT Non-Connected idle!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BT Non-Connected idle!!!\n");
        } else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) {
                coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
        } else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
                   (bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
                coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
        } else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) {
                coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
        } else {
                coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
        }
 
        if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3769,7 +3768,9 @@ void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
 
 void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
 {
-       btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
 
        halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
        ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3777,34 +3778,35 @@ void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
 
 void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        static u8 dis_ver_info_cnt;
        u32 fw_ver = 0, bt_patch_ver = 0;
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
 
-       btc_alg_dbg(ALGO_TRACE,
-                   "=======================Periodical=======================\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "=======================Periodical=======================\n");
        if (dis_ver_info_cnt <= 5) {
                dis_ver_info_cnt += 1;
-               btc_iface_dbg(INTF_INIT,
-                             "************************************************\n");
-               btc_iface_dbg(INTF_INIT,
-                             "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
-                             board_info->pg_ant_num, board_info->btdm_ant_num,
-                             board_info->btdm_ant_pos);
-               btc_iface_dbg(INTF_INIT,
-                             "BT stack/ hci ext ver = %s / %d\n",
-                             ((stack_info->profile_notified) ? "Yes" : "No"),
-                             stack_info->hci_version);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "************************************************\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+                        board_info->pg_ant_num, board_info->btdm_ant_num,
+                        board_info->btdm_ant_pos);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "BT stack/ hci ext ver = %s / %d\n",
+                        ((stack_info->profile_notified) ? "Yes" : "No"),
+                        stack_info->hci_version);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
                                   &bt_patch_ver);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-               btc_iface_dbg(INTF_INIT,
-                             "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
-                             glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
-                             fw_ver, bt_patch_ver, bt_patch_ver);
-               btc_iface_dbg(INTF_INIT,
-                             "************************************************\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+                        glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+                        fw_ver, bt_patch_ver, bt_patch_ver);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "************************************************\n");
        }
 
 #if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
index 16add42a62af7e0d824f98a4a4984a63452d41b5..d67bbfb6ad8e61c4e1e0cef2e3a83c04a961f71f 100644 (file)
@@ -60,9 +60,11 @@ static u32 glcoex_ver_8723b_1ant = 0x47;
 /***************************************************************
  * local function start with halbtc8723b1ant_
  ***************************************************************/
-static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
+static u8 halbtc8723b1ant_bt_rssi_state(struct btc_coexist *btcoexist,
+                                       u8 level_num, u8 rssi_thresh,
                                        u8 rssi_thresh1)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        s32 bt_rssi = 0;
        u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
 
@@ -74,28 +76,28 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= rssi_thresh +
                                        BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to High\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                   "[BTCoex], BT Rssi thresh error!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], BT Rssi thresh error!!\n");
                        return coex_sta->pre_bt_rssi_state;
                }
 
@@ -104,12 +106,12 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= rssi_thresh +
                                        BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else if ((coex_sta->pre_bt_rssi_state ==
                                        BTC_RSSI_STATE_MEDIUM) ||
@@ -118,26 +120,26 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= rssi_thresh1 +
                                        BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to High\n");
                        } else if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at Medium\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh1) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        }
@@ -151,6 +153,7 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                                          u8 index, u8 level_num,
                                          u8 rssi_thresh, u8 rssi_thresh1)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        s32 wifi_rssi = 0;
        u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
 
@@ -165,28 +168,28 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >= rssi_thresh +
                                        BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to High\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                   "[BTCoex], wifi RSSI thresh error!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], wifi RSSI thresh error!!\n");
                        return coex_sta->pre_wifi_rssi_state[index];
                }
 
@@ -197,12 +200,12 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >= rssi_thresh +
                                         BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else if ((coex_sta->pre_wifi_rssi_state[index] ==
                                                BTC_RSSI_STATE_MEDIUM) ||
@@ -211,26 +214,26 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >= rssi_thresh1 +
                                         BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to High\n");
                        } else if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at Medium\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh1) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        }
@@ -418,15 +421,16 @@ static void halbtc8723b1ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
 
 static void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        coex_sta->c2h_bt_info_req_sent = true;
 
        h2c_parameter[0] |= BIT0;       /* trigger*/
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
-                   h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+                h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
@@ -513,6 +517,7 @@ static void halbtc8723b1ant_update_bt_link_info(struct btc_coexist *btcoexist)
 
 static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
        bool bt_hs_on = false;
        u8 algorithm = BT_8723B_1ANT_COEX_ALGO_UNDEFINED;
@@ -521,8 +526,8 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
        if (!bt_link_info->bt_link_exist) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], No BT link exists!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], No BT link exists!!!\n");
                return algorithm;
        }
 
@@ -537,27 +542,29 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
 
        if (numdiffprofile == 1) {
                if (bt_link_info->sco_exist) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], BT Profile = SCO only\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], BT Profile = SCO only\n");
                        algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
                } else {
                        if (bt_link_info->hid_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], BT Profile = HID only\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Profile = HID only\n");
                                algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], BT Profile = A2DP only\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Profile = A2DP only\n");
                                algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = PAN(HS) only\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = PAN(HS) only\n");
                                        algorithm =
                                                BT_8723B_1ANT_COEX_ALGO_PANHS;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = PAN(EDR) only\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = PAN(EDR) only\n");
                                        algorithm =
                                                BT_8723B_1ANT_COEX_ALGO_PANEDR;
                                }
@@ -566,21 +573,23 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
        } else if (numdiffprofile == 2) {
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], BT Profile = SCO + HID\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Profile = SCO + HID\n");
                                algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
                                algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = SCO + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = SCO + PAN(HS)\n");
                                        algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -588,32 +597,36 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
                } else {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], BT Profile = HID + A2DP\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Profile = HID + A2DP\n");
                                algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = HID + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = HID + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = HID + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = HID + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP;
                                }
@@ -623,31 +636,35 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
                                algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
                                        algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -657,13 +674,15 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
                                }
@@ -675,11 +694,13 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
                                        algorithm =
                                            BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -693,6 +714,7 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
 static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist,
                                                  bool low_penalty_ra)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[6] = {0};
 
        h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty */
@@ -706,9 +728,9 @@ static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist,
                h2c_parameter[5] = 0xf9;  /*MCS5 or OFDM36 */
        }
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], set WiFi Low-Penalty Retry: %s",
-                   (low_penalty_ra ? "ON!!" : "OFF!!"));
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set WiFi Low-Penalty Retry: %s",
+                (low_penalty_ra ? "ON!!" : "OFF!!"));
 
        btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
 }
@@ -732,20 +754,22 @@ static void halbtc8723b1ant_set_coex_table(struct btc_coexist *btcoexist,
                                           u32 val0x6c0, u32 val0x6c4,
                                           u32 val0x6c8, u8 val0x6cc)
 {
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
        btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
        btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
        btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
        btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
@@ -754,10 +778,12 @@ static void halbtc8723b1ant_coex_table(struct btc_coexist *btcoexist,
                                       u32 val0x6c4, u32 val0x6c8,
                                       u8 val0x6cc)
 {
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
-                   (force_exec ? "force to" : ""),
-                   val0x6c0, val0x6c4, val0x6cc);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
+                (force_exec ? "force to" : ""),
+                val0x6c0, val0x6c4, val0x6cc);
        coex_dm->cur_val0x6c0 = val0x6c0;
        coex_dm->cur_val0x6c4 = val0x6c4;
        coex_dm->cur_val0x6c8 = val0x6c8;
@@ -823,14 +849,15 @@ static void halbtc8723b1ant_coex_table_with_type(struct btc_coexist *btcoexist,
 static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
                                               bool enable)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        if (enable)
                h2c_parameter[0] |= BIT0;       /* function enable */
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
-                   h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+                h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
 }
@@ -838,16 +865,18 @@ static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
 static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
                                            bool force_exec, bool enable)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s turn Ignore WlanAct %s\n",
-                   (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn Ignore WlanAct %s\n",
+                (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
        coex_dm->cur_ignore_wlan_act = enable;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
-                           coex_dm->pre_ignore_wlan_act,
-                           coex_dm->cur_ignore_wlan_act);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+                        coex_dm->pre_ignore_wlan_act,
+                        coex_dm->cur_ignore_wlan_act);
 
                if (coex_dm->pre_ignore_wlan_act ==
                    coex_dm->cur_ignore_wlan_act)
@@ -862,6 +891,7 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
                                           u8 byte1, u8 byte2, u8 byte3,
                                           u8 byte4, u8 byte5)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[5] = {0};
        u8 real_byte1 = byte1, real_byte5 = byte5;
        bool ap_enable = false;
@@ -871,8 +901,8 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
 
        if (ap_enable) {
                if ((byte1 & BIT4) && !(byte1 & BIT5)) {
-                       btc_iface_dbg(INTF_NOTIFY,
-                                     "[BTCoex], FW for 1Ant AP mode\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], FW for 1Ant AP mode\n");
                        real_byte1 &= ~BIT4;
                        real_byte1 |= BIT5;
 
@@ -893,8 +923,8 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
        coex_dm->ps_tdma_para[3] = byte4;
        coex_dm->ps_tdma_para[4] = real_byte5;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
                    h2c_parameter[0],
                    h2c_parameter[1] << 24 |
                    h2c_parameter[2] << 16 |
@@ -918,22 +948,24 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist,
                                    bool force_exec,
                                    u8 lps_val, u8 rpwm_val)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
-                   (force_exec ? "force to" : ""), lps_val, rpwm_val);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+                (force_exec ? "force to" : ""), lps_val, rpwm_val);
        coex_dm->cur_lps = lps_val;
        coex_dm->cur_rpwm = rpwm_val;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
-                           coex_dm->cur_lps, coex_dm->cur_rpwm);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
+                        coex_dm->cur_lps, coex_dm->cur_rpwm);
 
                if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
                    (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
-                                   coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
+                                coex_dm->pre_rpwm, coex_dm->cur_rpwm);
 
                        return;
                }
@@ -947,8 +979,10 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist,
 static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist,
                                         bool low_penalty_ra)
 {
-       btc_alg_dbg(ALGO_BT_MONITOR,
-                   "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
 
        halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
 }
@@ -1153,6 +1187,7 @@ static void halbtc8723b1ant_SetAntPath(struct btc_coexist *btcoexist,
 static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
                                    bool force_exec, bool turn_on, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool wifi_busy = false;
        u8 rssi_adjust_val = 0;
 
@@ -1163,13 +1198,13 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
 
        if (!force_exec) {
                if (coex_dm->cur_ps_tdma_on)
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "[BTCoex], ******** TDMA(on, %d) *********\n",
-                                   coex_dm->cur_ps_tdma);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], ******** TDMA(on, %d) *********\n",
+                                coex_dm->cur_ps_tdma);
                else
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "[BTCoex], ******** TDMA(off, %d) ********\n",
-                                   coex_dm->cur_ps_tdma);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], ******** TDMA(off, %d) ********\n",
+                                coex_dm->cur_ps_tdma);
 
                if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
                    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1374,6 +1409,7 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
 
 static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool commom = false, wifi_connected = false;
        bool wifi_busy = false;
 
@@ -1383,45 +1419,45 @@ static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist)
 
        if (!wifi_connected &&
            BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
                halbtc8723b1ant_sw_mechanism(btcoexist, false);
                commom = true;
        } else if (wifi_connected &&
                   (BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
                    coex_dm->bt_status)) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi connected + BT non connected-idle!!\n");
                halbtc8723b1ant_sw_mechanism(btcoexist, false);
                commom = true;
        } else if (!wifi_connected &&
                   (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
                    coex_dm->bt_status)) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
                halbtc8723b1ant_sw_mechanism(btcoexist, false);
                commom = true;
        } else if (wifi_connected &&
                   (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
                    coex_dm->bt_status)) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Wifi connected + BT connected-idle!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi connected + BT connected-idle!!\n");
                halbtc8723b1ant_sw_mechanism(btcoexist, false);
                commom = true;
        } else if (!wifi_connected &&
                   (BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE !=
                    coex_dm->bt_status)) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
                halbtc8723b1ant_sw_mechanism(btcoexist, false);
                commom = true;
        } else {
                if (wifi_busy)
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
                else
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
 
                commom = false;
        }
@@ -1432,6 +1468,7 @@ static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist)
 static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
                                              u8 wifi_status)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        static s32 up, dn, m, n, wait_count;
        /* 0: no change, +1: increase WiFi duration,
         * -1: decrease WiFi duration
@@ -1440,8 +1477,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
        u8 retry_count = 0, bt_info_ext;
        bool wifi_busy = false;
 
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], TdmaDurationAdjustForAcl()\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], TdmaDurationAdjustForAcl()\n");
 
        if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status)
                wifi_busy = true;
@@ -1470,8 +1507,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
 
        if (!coex_dm->auto_tdma_adjust) {
                coex_dm->auto_tdma_adjust = true;
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], first run TdmaDurationAdjust()!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], first run TdmaDurationAdjust()!!\n");
 
                halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
                coex_dm->tdma_adj_type = 2;
@@ -1502,8 +1539,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
                                up = 0;
                                dn = 0;
                                result = 1;
-                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                           "[BTCoex], Increase wifi duration!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Increase wifi duration!!\n");
                        }
                } else if (retry_count <= 3) {
                        up--;
@@ -1526,8 +1563,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
                                dn = 0;
                                wait_count = 0;
                                result = -1;
-                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                           "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
                        }
                } else {
                        if (wait_count == 1)
@@ -1543,8 +1580,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
                        dn = 0;
                        wait_count = 0;
                        result = -1;
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
                }
 
                if (result == -1) {
@@ -1589,9 +1626,9 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
                        }
                } else {          /*no change */
                        /*if busy / idle change */
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "[BTCoex],********* TDMA(on, %d) ********\n",
-                                   coex_dm->cur_ps_tdma);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex],********* TDMA(on, %d) ********\n",
+                                coex_dm->cur_ps_tdma);
                }
 
                if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
@@ -1807,7 +1844,7 @@ static void halbtc8723b1ant_action_wifi_connected_bt_acl_busy(
 
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
 
-       bt_rssi_state = halbtc8723b1ant_bt_rssi_state(2, 28, 0);
+       bt_rssi_state = halbtc8723b1ant_bt_rssi_state(btcoexist, 2, 28, 0);
 
        if (bt_link_info->hid_only) {  /*HID */
                btc8723b1ant_act_bt_sco_hid_only_busy(btcoexist, wifi_status);
@@ -1835,16 +1872,8 @@ static void halbtc8723b1ant_action_wifi_connected_bt_acl_busy(
                }
        } else if (bt_link_info->hid_exist &&
                        bt_link_info->a2dp_exist) { /*HID+A2DP */
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 14);
-                       coex_dm->auto_tdma_adjust = false;
-               } else { /*for low BT RSSI*/
-                       halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               true, 14);
-                       coex_dm->auto_tdma_adjust = false;
-               }
+               halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 14);
+               coex_dm->auto_tdma_adjust = false;
 
                halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 6);
         /*PAN(OPP,FTP), HID+PAN(OPP,FTP) */
@@ -1993,19 +2022,20 @@ static void halbtc8723b1ant_action_wifi_connected_special_packet(
 
 static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool wifi_busy = false;
        bool scan = false, link = false, roam = false;
        bool under_4way = false, ap_enable = false;
 
-       btc_alg_dbg(ALGO_TRACE,
-                   "[BTCoex], CoexForWifiConnect()===>\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], CoexForWifiConnect()===>\n");
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
                           &under_4way);
        if (under_4way) {
                halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
                return;
        }
 
@@ -2019,8 +2049,8 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
                else
                        halbtc8723b1ant_action_wifi_connected_special_packet(
                                                                     btcoexist);
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
                return;
        }
 
@@ -2081,6 +2111,7 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
 
 static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 algorithm = 0;
 
        algorithm = halbtc8723b1ant_action_algorithm(btcoexist);
@@ -2089,58 +2120,58 @@ static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
        if (!halbtc8723b1ant_is_common_action(btcoexist)) {
                switch (coex_dm->cur_algorithm) {
                case BT_8723B_1ANT_COEX_ALGO_SCO:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = SCO\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = SCO\n");
                        halbtc8723b1ant_action_sco(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_HID:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = HID\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = HID\n");
                        halbtc8723b1ant_action_hid(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_A2DP:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = A2DP\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = A2DP\n");
                        halbtc8723b1ant_action_a2dp(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
                        halbtc8723b1ant_action_a2dp_pan_hs(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_PANEDR:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = PAN(EDR)\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = PAN(EDR)\n");
                        halbtc8723b1ant_action_pan_edr(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_PANHS:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = HS mode\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = HS mode\n");
                        halbtc8723b1ant_action_pan_hs(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = PAN+A2DP\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = PAN+A2DP\n");
                        halbtc8723b1ant_action_pan_edr_a2dp(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
                        halbtc8723b1ant_action_pan_edr_hid(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
                        btc8723b1ant_action_hid_a2dp_pan_edr(btcoexist);
                        break;
                case BT_8723B_1ANT_COEX_ALGO_HID_A2DP:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = HID+A2DP\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = HID+A2DP\n");
                        halbtc8723b1ant_action_hid_a2dp(btcoexist);
                        break;
                default:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = coexist All Off!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = coexist All Off!!\n");
                        break;
                }
                coex_dm->pre_algorithm = coex_dm->cur_algorithm;
@@ -2149,6 +2180,7 @@ static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
 
 static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
        bool wifi_connected = false, bt_hs_on = false;
        bool increase_scan_dev_num = false;
@@ -2158,24 +2190,24 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        u32 wifi_link_status = 0;
        u32 num_of_wifi_link = 0;
 
-       btc_alg_dbg(ALGO_TRACE,
-                   "[BTCoex], RunCoexistMechanism()===>\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], RunCoexistMechanism()===>\n");
 
        if (btcoexist->manual_control) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
                return;
        }
 
        if (btcoexist->stop_coex_dm) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
                return;
        }
 
        if (coex_sta->under_ips) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], wifi is under IPS !!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], wifi is under IPS !!!\n");
                return;
        }
 
@@ -2210,16 +2242,8 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
                        wifi_rssi_state =
                                halbtc8723b1ant_wifi_rssi_state(btcoexist,
                                                                1, 2, 30, 0);
-                       if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                           (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                               halbtc8723b1ant_limited_tx(btcoexist,
-                                                          NORMAL_EXEC,
-                                                          1, 1, 1, 1);
-                       } else {
-                               halbtc8723b1ant_limited_tx(btcoexist,
-                                                          NORMAL_EXEC,
-                                                          1, 1, 1, 1);
-                       }
+                       halbtc8723b1ant_limited_tx(btcoexist,
+                                                  NORMAL_EXEC, 1, 1, 1, 1);
                } else {
                        halbtc8723b1ant_limited_tx(btcoexist, NORMAL_EXEC,
                                                   0, 0, 0, 0);
@@ -2254,8 +2278,8 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        if (!wifi_connected) {
                bool scan = false, link = false, roam = false;
 
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], wifi is non connected-idle !!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], wifi is non connected-idle !!!\n");
 
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2288,12 +2312,13 @@ static void halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
 static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
                                           bool backup)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u32 u32tmp = 0;
        u8 u8tmp = 0;
        u32 cnt_bt_cal_chk = 0;
 
-       btc_iface_dbg(INTF_INIT,
-                     "[BTCoex], 1Ant Init HW Config!!\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], 1Ant Init HW Config!!\n");
 
        if (backup) {/* backup rf 0x1e value */
                coex_dm->backup_arfr_cnt1 =
@@ -2320,13 +2345,13 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
                u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d);
                cnt_bt_cal_chk++;
                if (u32tmp & BIT0) {
-                       btc_iface_dbg(INTF_INIT,
-                                     "[BTCoex], ########### BT calibration(cnt=%d) ###########\n",
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], ########### BT calibration(cnt=%d) ###########\n",
                                      cnt_bt_cal_chk);
                        mdelay(50);
                } else {
-                       btc_iface_dbg(INTF_INIT,
-                                     "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n",
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n",
                                      cnt_bt_cal_chk);
                        break;
                }
@@ -2370,8 +2395,10 @@ void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist)
 
 void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
-       btc_iface_dbg(INTF_INIT,
-                     "[BTCoex], Coex Mechanism Init!!\n");
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Coex Mechanism Init!!\n");
 
        btcoexist->stop_coex_dm = false;
 
@@ -2398,19 +2425,19 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
        u32 fw_ver = 0, bt_patch_ver = 0;
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n ============[BT Coexist info]============");
+                "\r\n ============[BT Coexist info]============");
 
        if (btcoexist->manual_control) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n ============[Under Manual Control]==========");
+                        "\r\n ============[Under Manual Control]==========");
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n ==========================================");
+                        "\r\n ==========================================");
        }
        if (btcoexist->stop_coex_dm) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n ============[Coex is STOPPED]============");
+                        "\r\n ============[Coex is STOPPED]============");
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n ==========================================");
+                        "\r\n ==========================================");
        }
 
        if (!board_info->bt_exist) {
@@ -2419,45 +2446,45 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
        }
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d",
-                  "Ant PG Num/ Ant Mech/ Ant Pos:",
-                  board_info->pg_ant_num, board_info->btdm_ant_num,
-                  board_info->btdm_ant_pos);
+                "Ant PG Num/ Ant Mech/ Ant Pos:",
+                board_info->pg_ant_num, board_info->btdm_ant_num,
+                board_info->btdm_ant_pos);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
-                  "BT stack/ hci ext ver",
-                  ((stack_info->profile_notified) ? "Yes" : "No"),
-                  stack_info->hci_version);
+                "BT stack/ hci ext ver",
+                ((stack_info->profile_notified) ? "Yes" : "No"),
+                stack_info->hci_version);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
-                  "CoexVer/ FwVer/ PatchVer",
-                  glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant,
-                  fw_ver, bt_patch_ver, bt_patch_ver);
+                "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+                "CoexVer/ FwVer/ PatchVer",
+                glcoex_ver_date_8723b_1ant, glcoex_ver_8723b_1ant,
+                fw_ver, bt_patch_ver, bt_patch_ver);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
                           &wifi_dot11_chnl);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
-                  "Dot11 channel / HsChnl(HsMode)",
-                  wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+                "Dot11 channel / HsChnl(HsMode)",
+                wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
-                  "H2C Wifi inform bt chnl Info",
-                  coex_dm->wifi_chnl_info);
+                "H2C Wifi inform bt chnl Info",
+                coex_dm->wifi_chnl_info);
 
        btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
        btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                  "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
+                "Wifi rssi/ HS rssi", wifi_rssi, bt_hs_rssi);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
-                  "Wifi link/ roam/ scan", link, roam, scan);
+                "Wifi link/ roam/ scan", link, roam, scan);
 
        btcoexist->btc_get(btcoexist , BTC_GET_BL_WIFI_UNDER_5G,
                           &wifi_under_5g);
@@ -2467,106 +2494,106 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
                           &wifi_traffic_dir);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
-                  "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
-                  ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
-                       (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
-                  ((!wifi_busy) ? "idle" :
-                       ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
-                               "uplink" : "downlink")));
+                "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+                ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
+                 ((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20")),
+                 ((!wifi_busy) ? "idle" :
+                  ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
+                  "uplink" : "downlink")));
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
                           &wifi_link_status);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d/ %d/ %d",
-                  "sta/vwifi/hs/p2pGo/p2pGc",
-                  ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
-                  ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
-                  ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
-                  ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
-                  ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
+                "sta/vwifi/hs/p2pGo/p2pGc",
+                ((wifi_link_status & WIFI_STA_CONNECTED) ? 1 : 0),
+                ((wifi_link_status & WIFI_AP_CONNECTED) ? 1 : 0),
+                ((wifi_link_status & WIFI_HS_CONNECTED) ? 1 : 0),
+                ((wifi_link_status & WIFI_P2P_GO_CONNECTED) ? 1 : 0),
+                ((wifi_link_status & WIFI_P2P_GC_CONNECTED) ? 1 : 0));
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = [%s/ %d/ %d] ",
-                  "BT [status/ rssi/ retryCnt]",
-                  ((btcoexist->bt_info.bt_disabled) ? ("disabled") :
-                   ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
-                    ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
-                      coex_dm->bt_status) ?
-                     "non-connected idle" :
-                     ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
-                       coex_dm->bt_status) ?
-                      "connected-idle" : "busy")))),
+                "BT [status/ rssi/ retryCnt]",
+                ((btcoexist->bt_info.bt_disabled) ? ("disabled") :
+                 ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
+                  ((BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
+                    coex_dm->bt_status) ?
+                   "non-connected idle" :
+                   ((BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
+                     coex_dm->bt_status) ?
+                    "connected-idle" : "busy")))),
                     coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d / %d / %d / %d",
-                  "SCO/HID/PAN/A2DP", bt_link_info->sco_exist,
-                  bt_link_info->hid_exist, bt_link_info->pan_exist,
-                  bt_link_info->a2dp_exist);
+                "\r\n %-35s = %d / %d / %d / %d",
+                "SCO/HID/PAN/A2DP", bt_link_info->sco_exist,
+                bt_link_info->hid_exist, bt_link_info->pan_exist,
+                bt_link_info->a2dp_exist);
        btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
 
        bt_info_ext = coex_sta->bt_info_ext;
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
-                  "BT Info A2DP rate",
-                  (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
+                "BT Info A2DP rate",
+                (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
 
        for (i = 0; i < BT_INFO_SRC_8723B_1ANT_MAX; i++) {
                if (coex_sta->bt_info_c2h_cnt[i]) {
                        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                                  "\r\n %-35s = %7ph(%d)",
-                                  GLBtInfoSrc8723b1Ant[i],
-                                  coex_sta->bt_info_c2h[i],
-                                  coex_sta->bt_info_c2h_cnt[i]);
+                                "\r\n %-35s = %7ph(%d)",
+                                GLBtInfoSrc8723b1Ant[i],
+                                coex_sta->bt_info_c2h[i],
+                                coex_sta->bt_info_c2h_cnt[i]);
                }
        }
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %s/%s, (0x%x/0x%x)",
-                  "PS state, IPS/LPS, (lps/rpwm)",
-                  ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-                  ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
-                  btcoexist->bt_info.lps_val,
-                  btcoexist->bt_info.rpwm_val);
+                "\r\n %-35s = %s/%s, (0x%x/0x%x)",
+                "PS state, IPS/LPS, (lps/rpwm)",
+                ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+                ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")),
+                btcoexist->bt_info.lps_val,
+                btcoexist->bt_info.rpwm_val);
        btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
 
        if (!btcoexist->manual_control) {
                /* Sw mechanism */
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                          "============[Sw mechanism]============");
+                        "============[Sw mechanism]============");
 
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/",
-                          "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra);
+                        "SM[LowPenaltyRA]", coex_dm->cur_low_penalty_ra);
 
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/ %s/ %d ",
-                          "DelBA/ BtCtrlAgg/ AggSize",
+                        "DelBA/ BtCtrlAgg/ AggSize",
                           (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
                           (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
                           btcoexist->bt_info.agg_buf_size);
 
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ",
-                          "Rate Mask", btcoexist->bt_info.ra_mask);
+                        "Rate Mask", btcoexist->bt_info.ra_mask);
 
                /* Fw mechanism */
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                          "============[Fw mechanism]============");
+                        "============[Fw mechanism]============");
 
                pstdmacase = coex_dm->cur_ps_tdma;
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n %-35s = %5ph case-%d (auto:%d)",
+                        "\r\n %-35s = %5ph case-%d (auto:%d)",
                           "PS TDMA", coex_dm->ps_tdma_para,
                           pstdmacase, coex_dm->auto_tdma_adjust);
 
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d ",
-                          "IgnWlanAct", coex_dm->cur_ignore_wlan_act);
+                        "IgnWlanAct", coex_dm->cur_ignore_wlan_act);
 
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x ",
-                          "Latest error condition(should be 0)",
+                        "Latest error condition(should be 0)",
                           coex_dm->error_condition);
        }
 
        /* Hw setting */
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                  "============[Hw setting]============");
+                "============[Hw setting]============");
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-                  "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
+                "backup ARFR1/ARFR2/RL/AMaxTime", coex_dm->backup_arfr_cnt1,
                   coex_dm->backup_arfr_cnt2, coex_dm->backup_retry_limit,
                   coex_dm->backup_ampdu_max_time);
 
@@ -2575,49 +2602,49 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
        u16tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-                  "0x430/0x434/0x42a/0x456",
-                  u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
+                "0x430/0x434/0x42a/0x456",
+                u32tmp[0], u32tmp[1], u16tmp[0], u8tmp[0]);
 
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6cc);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x880);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                  "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0],
-                  (u32tmp[1] & 0x3e000000) >> 25);
+                "0x778/0x6cc/0x880[29:25]", u8tmp[0], u32tmp[0],
+                (u32tmp[1] & 0x3e000000) >> 25);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
        u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                  "0x948/ 0x67[5] / 0x765",
-                  u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
+                "0x948/ 0x67[5] / 0x765",
+                u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
        u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                  "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
-                  u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
+                "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+                u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
 
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
        u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
        u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
-                  "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
-                  ((u8tmp[0] & 0x8)>>3), u8tmp[1],
-                  ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
+                "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+                "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+                ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
+                 ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                  "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+                "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                  "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
+                "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
@@ -2636,22 +2663,22 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
        fa_cck = (u8tmp[0] << 8) + u8tmp[1];
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                  "OFDM-CCA/OFDM-FA/CCK-FA",
-                  u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
+                "OFDM-CCA/OFDM-FA/CCK-FA",
+                u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
        u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                  "0x6c0/0x6c4/0x6c8(coexTable)",
-                  u32tmp[0], u32tmp[1], u32tmp[2]);
+                "0x6c0/0x6c4/0x6c8(coexTable)",
+                u32tmp[0], u32tmp[1], u32tmp[2]);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                  "0x770(high-pri rx/tx)", coex_sta->high_priority_rx,
-                  coex_sta->high_priority_tx);
+                "0x770(high-pri rx/tx)", coex_sta->high_priority_rx,
+                coex_sta->high_priority_tx);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                  "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
-                  coex_sta->low_priority_tx);
+                "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+                coex_sta->low_priority_tx);
 #if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 1)
        halbtc8723b1ant_monitor_bt_ctr(btcoexist);
 #endif
@@ -2660,12 +2687,14 @@ void ex_halbtc8723b1ant_display_coex_info(struct btc_coexist *btcoexist)
 
 void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (btcoexist->manual_control || btcoexist->stop_coex_dm)
                return;
 
        if (BTC_IPS_ENTER == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], IPS ENTER notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], IPS ENTER notify\n");
                coex_sta->under_ips = true;
 
                halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
@@ -2676,8 +2705,8 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
                                                     NORMAL_EXEC, 0);
                halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
        } else if (BTC_IPS_LEAVE == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], IPS LEAVE notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], IPS LEAVE notify\n");
                coex_sta->under_ips = false;
 
                halbtc8723b1ant_init_hw_config(btcoexist, false);
@@ -2688,22 +2717,25 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 
 void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (btcoexist->manual_control || btcoexist->stop_coex_dm)
                return;
 
        if (BTC_LPS_ENABLE == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], LPS ENABLE notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], LPS ENABLE notify\n");
                coex_sta->under_lps = true;
        } else if (BTC_LPS_DISABLE == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], LPS DISABLE notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], LPS DISABLE notify\n");
                coex_sta->under_lps = false;
        }
 }
 
 void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool wifi_connected = false, bt_hs_on = false;
        u32 wifi_link_status = 0;
        u32 num_of_wifi_link = 0;
@@ -2740,15 +2772,15 @@ void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
        }
 
        if (BTC_SCAN_START == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], SCAN START notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], SCAN START notify\n");
                if (!wifi_connected)    /* non-connected scan */
                        btc8723b1ant_action_wifi_not_conn_scan(btcoexist);
                else    /* wifi is connected */
                        btc8723b1ant_action_wifi_conn_scan(btcoexist);
        } else if (BTC_SCAN_FINISH == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], SCAN FINISH notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], SCAN FINISH notify\n");
                if (!wifi_connected)    /* non-connected scan */
                        btc8723b1ant_action_wifi_not_conn(btcoexist);
                else
@@ -2758,6 +2790,7 @@ void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 
 void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool wifi_connected = false, bt_hs_on = false;
        u32 wifi_link_status = 0;
        u32 num_of_wifi_link = 0;
@@ -2789,12 +2822,12 @@ void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
        }
 
        if (BTC_ASSOCIATE_START == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], CONNECT START notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CONNECT START notify\n");
                btc8723b1ant_act_wifi_not_conn_asso_auth(btcoexist);
        } else if (BTC_ASSOCIATE_FINISH == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], CONNECT FINISH notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CONNECT FINISH notify\n");
 
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                                   &wifi_connected);
@@ -2808,6 +2841,7 @@ void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
                                            u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[3] = {0};
        u32 wifi_bw;
        u8 wifiCentralChnl;
@@ -2817,11 +2851,11 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
                return;
 
        if (BTC_MEDIA_CONNECT == type)
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], MEDIA connect notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], MEDIA connect notify\n");
        else
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], MEDIA disconnect notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], MEDIA disconnect notify\n");
 
        /* only 2.4G we need to inform bt the chnl mask */
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -2842,10 +2876,10 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
        coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
        coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], FW write 0x66 = 0x%x\n",
-                   h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
-                   h2c_parameter[2]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], FW write 0x66 = 0x%x\n",
+                h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+                h2c_parameter[2]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
@@ -2853,6 +2887,7 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
 void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
                                              u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool bt_hs_on = false;
        u32 wifi_link_status = 0;
        u32 num_of_wifi_link = 0;
@@ -2887,8 +2922,8 @@ void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
 
        if (BTC_PACKET_DHCP == type ||
            BTC_PACKET_EAPOL == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], special Packet(%d) notify\n", type);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], special Packet(%d) notify\n", type);
                halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
        }
 }
@@ -2896,6 +2931,7 @@ void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
 void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
                                       u8 *tmp_buf, u8 length)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 bt_info = 0;
        u8 i, rsp_source = 0;
        bool wifi_connected = false;
@@ -2908,19 +2944,19 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
                rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW;
        coex_sta->bt_info_c2h_cnt[rsp_source]++;
 
-       btc_iface_dbg(INTF_NOTIFY,
-                     "[BTCoex], Bt info[%d], length=%d, hex data = [",
-                     rsp_source, length);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Bt info[%d], length=%d, hex data = [",
+                rsp_source, length);
        for (i = 0; i < length; i++) {
                coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
                if (i == 1)
                        bt_info = tmp_buf[i];
                if (i == length - 1)
-                       btc_iface_dbg(INTF_NOTIFY,
-                                     "0x%02x]\n", tmp_buf[i]);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "0x%02x]\n", tmp_buf[i]);
                else
-                       btc_iface_dbg(INTF_NOTIFY,
-                                     "0x%02x, ", tmp_buf[i]);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "0x%02x, ", tmp_buf[i]);
        }
 
        if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) {
@@ -2937,8 +2973,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
                 * because bt is reset and loss of the info.
                 */
                if (coex_sta->bt_info_ext & BIT1) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
                        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                                           &wifi_connected);
                        if (wifi_connected)
@@ -2952,8 +2988,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
                if (coex_sta->bt_info_ext & BIT3) {
                        if (!btcoexist->manual_control &&
                            !btcoexist->stop_coex_dm) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
                                halbtc8723b1ant_ignore_wlan_act(btcoexist,
                                                                FORCE_EXEC,
                                                                false);
@@ -3008,30 +3044,30 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
 
        if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) {
                coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
        /* connection exists but no busy */
        } else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) {
                coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
        } else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
                (bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) {
                coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
        } else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) {
                if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
                        coex_dm->auto_tdma_adjust = false;
 
                coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
        } else {
                coex_dm->bt_status =
                        BT_8723B_1ANT_BT_STATUS_MAX;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
        }
 
        if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3047,7 +3083,9 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
 
 void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
 {
-       btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
 
        btcoexist->stop_coex_dm = true;
 
@@ -3065,11 +3103,13 @@ void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
 
 void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
 {
-       btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Pnp notify\n");
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
 
        if (BTC_WIFI_PNP_SLEEP == pnp_state) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], Pnp notify to SLEEP\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Pnp notify to SLEEP\n");
                btcoexist->stop_coex_dm = true;
                halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false,
                                           true);
@@ -3079,8 +3119,8 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
                halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
                halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
        } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], Pnp notify to WAKE UP\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Pnp notify to WAKE UP\n");
                btcoexist->stop_coex_dm = false;
                halbtc8723b1ant_init_hw_config(btcoexist, false);
                halbtc8723b1ant_init_coex_dm(btcoexist);
@@ -3090,8 +3130,10 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
 
 void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist)
 {
-       btc_alg_dbg(ALGO_TRACE,
-                   "[BTCoex], *****************Coex DM Reset****************\n");
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], *****************Coex DM Reset****************\n");
 
        halbtc8723b1ant_init_hw_config(btcoexist, false);
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
@@ -3101,36 +3143,37 @@ void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist)
 
 void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
        static u8 dis_ver_info_cnt;
        u32 fw_ver = 0, bt_patch_ver = 0;
 
-       btc_alg_dbg(ALGO_TRACE,
-                   "[BTCoex], ==========================Periodical===========================\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], ==========================Periodical===========================\n");
 
        if (dis_ver_info_cnt <= 5) {
                dis_ver_info_cnt += 1;
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], ****************************************************************\n");
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
-                             board_info->pg_ant_num, board_info->btdm_ant_num,
-                             board_info->btdm_ant_pos);
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
-                             stack_info->profile_notified ? "Yes" : "No",
-                             stack_info->hci_version);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], ****************************************************************\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+                        board_info->pg_ant_num, board_info->btdm_ant_num,
+                        board_info->btdm_ant_pos);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+                        stack_info->profile_notified ? "Yes" : "No",
+                        stack_info->hci_version);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
                                   &bt_patch_ver);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
-                             glcoex_ver_date_8723b_1ant,
-                             glcoex_ver_8723b_1ant, fw_ver,
-                             bt_patch_ver, bt_patch_ver);
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], ****************************************************************\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+                        glcoex_ver_date_8723b_1ant,
+                        glcoex_ver_8723b_1ant, fw_ver,
+                        bt_patch_ver, bt_patch_ver);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], ****************************************************************\n");
        }
 
 #if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 0)
index 5f488ecaef70b890ba049f6a00e82a76b130d8ee..12125966a911140fd0b32a7d153cf47b0dc75a68 100644 (file)
@@ -58,9 +58,11 @@ static u32 glcoex_ver_8723b_2ant = 0x3f;
 /**************************************************************
  * local function start with btc8723b2ant_
  **************************************************************/
-static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
+static u8 btc8723b2ant_bt_rssi_state(struct btc_coexist *btcoexist,
+                                    u8 level_num, u8 rssi_thresh,
                                     u8 rssi_thresh1)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        s32 bt_rssi = 0;
        u8 bt_rssi_state = coex_sta->pre_bt_rssi_state;
 
@@ -72,28 +74,28 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= rssi_thresh +
                                       BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to High\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                   "[BTCoex], BT Rssi thresh error!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], BT Rssi thresh error!!\n");
                        return coex_sta->pre_bt_rssi_state;
                }
 
@@ -102,12 +104,12 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= rssi_thresh +
                                       BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else if ((coex_sta->pre_bt_rssi_state ==
                                                BTC_RSSI_STATE_MEDIUM) ||
@@ -116,26 +118,26 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= rssi_thresh1 +
                                       BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to High\n");
                        } else if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at Medium\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh1) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        }
@@ -149,6 +151,7 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                                       u8 index, u8 level_num,
                                       u8 rssi_thresh, u8 rssi_thresh1)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        s32 wifi_rssi = 0;
        u8 wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
 
@@ -162,28 +165,28 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >= rssi_thresh +
                                         BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to High\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                   "[BTCoex], wifi RSSI thresh error!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], wifi RSSI thresh error!!\n");
                        return coex_sta->pre_wifi_rssi_state[index];
                }
 
@@ -194,12 +197,12 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >= rssi_thresh +
                                        BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else if ((coex_sta->pre_wifi_rssi_state[index] ==
                                                BTC_RSSI_STATE_MEDIUM) ||
@@ -208,26 +211,26 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >= rssi_thresh1 +
                                         BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to High\n");
                        } else if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at Medium\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh1) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        }
@@ -239,6 +242,7 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
 
 static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u32 reg_hp_txrx, reg_lp_txrx, u32tmp;
        u32 reg_hp_tx = 0, reg_hp_rx = 0;
        u32 reg_lp_tx = 0, reg_lp_rx = 0;
@@ -259,12 +263,12 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
        coex_sta->low_priority_tx = reg_lp_tx;
        coex_sta->low_priority_rx = reg_lp_rx;
 
-       btc_alg_dbg(ALGO_BT_MONITOR,
-                   "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
-                   reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
-       btc_alg_dbg(ALGO_BT_MONITOR,
-                   "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
-                   reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+                reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+                reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
 
        /* reset counter */
        btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -272,15 +276,16 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
 
 static void btc8723b2ant_query_bt_info(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        coex_sta->c2h_bt_info_req_sent = true;
 
        h2c_parameter[0] |= BIT0;       /* trigger */
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
-                   h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+                h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
@@ -386,6 +391,7 @@ static void btc8723b2ant_update_bt_link_info(struct btc_coexist *btcoexist)
 
 static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
        bool bt_hs_on = false;
        u8 algorithm = BT_8723B_2ANT_COEX_ALGO_UNDEFINED;
@@ -394,8 +400,8 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
        if (!bt_link_info->bt_link_exist) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], No BT link exists!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], No BT link exists!!!\n");
                return algorithm;
        }
 
@@ -410,27 +416,29 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
 
        if (num_of_diff_profile == 1) {
                if (bt_link_info->sco_exist) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], SCO only\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], SCO only\n");
                        algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
                } else {
                        if (bt_link_info->hid_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], HID only\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], HID only\n");
                                algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], A2DP only\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], A2DP only\n");
                                algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], PAN(HS) only\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], PAN(HS) only\n");
                                        algorithm =
                                                BT_8723B_2ANT_COEX_ALGO_PANHS;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], PAN(EDR) only\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], PAN(EDR) only\n");
                                        algorithm =
                                                BT_8723B_2ANT_COEX_ALGO_PANEDR;
                                }
@@ -439,21 +447,23 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
        } else if (num_of_diff_profile == 2) {
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], SCO + HID\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], SCO + HID\n");
                                algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (bt_link_info->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], SCO + A2DP ==> SCO\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], SCO + A2DP ==> SCO\n");
                                algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], SCO + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], SCO + PAN(HS)\n");
                                        algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], SCO + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], SCO + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -461,31 +471,35 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
                } else {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], HID + A2DP\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], HID + A2DP\n");
                                algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], HID + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], HID + PAN(HS)\n");
                                        algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], HID + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], HID + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], A2DP + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex],A2DP + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex],A2DP + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
                                }
@@ -495,32 +509,36 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], SCO + HID + A2DP ==> HID\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], SCO + HID + A2DP ==> HID\n");
                                algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], SCO + HID + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], SCO + HID + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], SCO + HID + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], SCO + HID + PAN(EDR)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], SCO + A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], SCO + A2DP + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -530,13 +548,15 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], HID + A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], HID + A2DP + PAN(HS)\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], HID + A2DP + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], HID + A2DP + PAN(EDR)\n");
                                        algorithm =
                                        BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
                                }
@@ -548,11 +568,13 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
                                        algorithm =
                                            BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
                                }
@@ -564,6 +586,7 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
 
 static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool ret = false;
        bool bt_hs_on = false, wifi_connected = false;
        s32 bt_hs_rssi = 0;
@@ -577,20 +600,20 @@ static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
        if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
                return false;
 
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
 
        if (wifi_connected) {
                if (bt_hs_on) {
                        if (bt_hs_rssi > 37) {
-                               btc_alg_dbg(ALGO_TRACE_FW,
-                                           "[BTCoex], Need to decrease bt power for HS mode!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Need to decrease bt power for HS mode!!\n");
                                ret = true;
                        }
                } else {
                        if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
                            (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                               btc_alg_dbg(ALGO_TRACE_FW,
-                                           "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
                                ret = true;
                        }
                }
@@ -602,6 +625,7 @@ static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
 static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
                                                u8 dac_swing_lvl)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        /* There are several type of dacswing
@@ -609,10 +633,10 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
         */
        h2c_parameter[0] = dac_swing_lvl;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
 }
@@ -620,6 +644,7 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
 static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
                                           bool dec_bt_pwr)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        h2c_parameter[0] = 0;
@@ -627,8 +652,8 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
        if (dec_bt_pwr)
                h2c_parameter[0] |= BIT1;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
                    (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
@@ -637,14 +662,16 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
 static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
                                    bool force_exec, bool dec_bt_pwr)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s Dec BT power = %s\n",
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s Dec BT power = %s\n",
                    force_exec ? "force to" : "", dec_bt_pwr ? "ON" : "OFF");
        coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
                            coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
 
                if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
@@ -658,14 +685,16 @@ static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
 static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
                                          bool force_exec, u8 fw_dac_swing_lvl)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s set FW Dac Swing level = %d\n",
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s set FW Dac Swing level = %d\n",
                    (force_exec ? "force to" : ""), fw_dac_swing_lvl);
        coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
                            coex_dm->pre_fw_dac_swing_lvl,
                            coex_dm->cur_fw_dac_swing_lvl);
 
@@ -682,18 +711,20 @@ static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
 static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
                                                 bool rx_rf_shrink_on)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (rx_rf_shrink_on) {
                /* Shrink RF Rx LPF corner */
-               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                           "[BTCoex], Shrink RF Rx LPF corner!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Shrink RF Rx LPF corner!!\n");
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
                                          0xfffff, 0xffffc);
        } else {
                /* Resume RF Rx LPF corner */
                /* After initialized, we can use coex_dm->btRf0x1eBackup */
                if (btcoexist->initilized) {
-                       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                                   "[BTCoex], Resume RF Rx LPF corner!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Resume RF Rx LPF corner!!\n");
                        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
                                                  0xfffff,
                                                  coex_dm->bt_rf0x1e_backup);
@@ -704,15 +735,17 @@ static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
 static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
                                   bool force_exec, bool rx_rf_shrink_on)
 {
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s turn Rx RF Shrink = %s\n",
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn Rx RF Shrink = %s\n",
                    (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
                                                     "ON" : "OFF"));
        coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
                            coex_dm->pre_rf_rx_lpf_shrink,
                            coex_dm->cur_rf_rx_lpf_shrink);
 
@@ -729,6 +762,7 @@ static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
 static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
                                        bool low_penalty_ra)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[6] = {0};
 
        h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/
@@ -742,9 +776,9 @@ static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
                h2c_parameter[5] = 0xf9;  /*MCS5 or OFDM36*/
        }
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], set WiFi Low-Penalty Retry: %s",
-                   (low_penalty_ra ? "ON!!" : "OFF!!"));
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set WiFi Low-Penalty Retry: %s",
+                (low_penalty_ra ? "ON!!" : "OFF!!"));
 
        btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
 }
@@ -752,18 +786,20 @@ static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
 static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
                                        bool force_exec, bool low_penalty_ra)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        /*return; */
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s turn LowPenaltyRA = %s\n",
-                   (force_exec ? "force to" : ""), (low_penalty_ra ?
-                                                    "ON" : "OFF"));
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn LowPenaltyRA = %s\n",
+                (force_exec ? "force to" : ""), (low_penalty_ra ?
+                                                 "ON" : "OFF"));
        coex_dm->cur_low_penalty_ra = low_penalty_ra;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
-                           coex_dm->pre_low_penalty_ra,
-                           coex_dm->cur_low_penalty_ra);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+                        coex_dm->pre_low_penalty_ra,
+                        coex_dm->cur_low_penalty_ra);
 
                if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
                        return;
@@ -776,9 +812,11 @@ static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
 static void btc8723b2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
                                           u32 level)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 val = (u8) level;
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Write SwDacSwing = 0x%x\n", level);
        btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
 }
 
@@ -796,20 +834,22 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
                                   bool force_exec, bool dac_swing_on,
                                   u32 dac_swing_lvl)
 {
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
-                   (force_exec ? "force to" : ""),
-                   (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+                (force_exec ? "force to" : ""),
+                (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
        coex_dm->cur_dac_swing_on = dac_swing_on;
        coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
-                           coex_dm->pre_dac_swing_on,
-                           coex_dm->pre_dac_swing_lvl,
-                           coex_dm->cur_dac_swing_on,
-                           coex_dm->cur_dac_swing_lvl);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+                        coex_dm->pre_dac_swing_on,
+                        coex_dm->pre_dac_swing_lvl,
+                        coex_dm->cur_dac_swing_on,
+                        coex_dm->cur_dac_swing_lvl);
 
                if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
                    (coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -826,12 +866,13 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
 static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
                                       bool agc_table_en)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 rssi_adjust_val = 0;
 
        /*  BB AGC Gain Table */
        if (agc_table_en) {
-               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                           "[BTCoex], BB Agc Table On!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BB Agc Table On!\n");
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
@@ -840,8 +881,8 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
        } else {
-               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                           "[BTCoex], BB Agc Table Off!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BB Agc Table Off!\n");
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
                btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -854,15 +895,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
        /* RF Gain */
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
        if (agc_table_en) {
-               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                           "[BTCoex], Agc Table On!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Agc Table On!\n");
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
                                          0xfffff, 0x38fff);
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
                                          0xfffff, 0x38ffe);
        } else {
-               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                           "[BTCoex], Agc Table Off!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Agc Table Off!\n");
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
                                          0xfffff, 0x380c3);
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
@@ -873,15 +914,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
 
        if (agc_table_en) {
-               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                           "[BTCoex], Agc Table On!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Agc Table On!\n");
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
                                          0xfffff, 0x38fff);
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
                                          0xfffff, 0x38ffe);
        } else {
-               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                           "[BTCoex], Agc Table Off!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Agc Table Off!\n");
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
                                          0xfffff, 0x380c3);
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
@@ -899,17 +940,19 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
 static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist,
                                   bool force_exec, bool agc_table_en)
 {
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s %s Agc Table\n",
-                   (force_exec ? "force to" : ""),
-                   (agc_table_en ? "Enable" : "Disable"));
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s %s Agc Table\n",
+                (force_exec ? "force to" : ""),
+                (agc_table_en ? "Enable" : "Disable"));
        coex_dm->cur_agc_table_en = agc_table_en;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
-                           coex_dm->pre_agc_table_en,
-                           coex_dm->cur_agc_table_en);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+                        coex_dm->pre_agc_table_en,
+                        coex_dm->cur_agc_table_en);
 
                if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
                        return;
@@ -923,20 +966,22 @@ static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
                                        u32 val0x6c0, u32 val0x6c4,
                                        u32 val0x6c8, u8 val0x6cc)
 {
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
        btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
        btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
        btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
        btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
@@ -945,24 +990,26 @@ static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist,
                                    u32 val0x6c4, u32 val0x6c8,
                                    u8 val0x6cc)
 {
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
-                   force_exec ? "force to" : "",
-                   val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+                force_exec ? "force to" : "",
+                val0x6c0, val0x6c4, val0x6c8, val0x6cc);
        coex_dm->cur_val0x6c0 = val0x6c0;
        coex_dm->cur_val0x6c4 = val0x6c4;
        coex_dm->cur_val0x6c8 = val0x6c8;
        coex_dm->cur_val0x6cc = val0x6cc;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
-                           coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
-                           coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
-                           coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
-                           coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+                        coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
+                        coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+                        coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
+                        coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
 
                if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
                    (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1043,14 +1090,15 @@ static void btc8723b_coex_tbl_type(struct btc_coexist *btcoexist,
 static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
                                                bool enable)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        if (enable)
                h2c_parameter[0] |= BIT0;/* function enable*/
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
-                   h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
+                h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
 }
@@ -1058,16 +1106,18 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
 static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
                                         bool force_exec, bool enable)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s turn Ignore WlanAct %s\n",
-                   (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn Ignore WlanAct %s\n",
+                (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
        coex_dm->cur_ignore_wlan_act = enable;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
-                           coex_dm->pre_ignore_wlan_act,
-                           coex_dm->cur_ignore_wlan_act);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+                        coex_dm->pre_ignore_wlan_act,
+                        coex_dm->cur_ignore_wlan_act);
 
                if (coex_dm->pre_ignore_wlan_act ==
                    coex_dm->cur_ignore_wlan_act)
@@ -1081,6 +1131,7 @@ static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
 static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
                                        u8 byte2, u8 byte3, u8 byte4, u8 byte5)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[5];
 
        h2c_parameter[0] = byte1;
@@ -1095,11 +1146,11 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
        coex_dm->ps_tdma_para[3] = byte4;
        coex_dm->ps_tdma_para[4] = byte5;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
-                   h2c_parameter[0],
-                   h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
-                   h2c_parameter[3] << 8 | h2c_parameter[4]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+                h2c_parameter[0],
+                h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+                h2c_parameter[3] << 8 | h2c_parameter[4]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
@@ -1208,20 +1259,22 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
 static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
                                 bool turn_on, u8 type)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s turn %s PS TDMA, type=%d\n",
-                   (force_exec ? "force to" : ""),
-                   (turn_on ? "ON" : "OFF"), type);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+                (force_exec ? "force to" : ""),
+                (turn_on ? "ON" : "OFF"), type);
        coex_dm->cur_ps_tdma_on = turn_on;
        coex_dm->cur_ps_tdma = type;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
-                           coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
-                           coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+                        coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+                        coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
 
                if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
                    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1405,6 +1458,7 @@ static void btc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
 
 static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool common = false, wifi_connected = false;
        bool wifi_busy = false;
        bool bt_hs_on = false, low_pwr_disable = false;
@@ -1419,8 +1473,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
                                   &low_pwr_disable);
 
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Wifi non-connected idle!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi non-connected idle!!\n");
 
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
                                          0x0);
@@ -1443,8 +1497,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
                                           BTC_SET_ACT_DISABLE_LOW_POWER,
                                           &low_pwr_disable);
 
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Wifi connected + BT non connected-idle!!\n");
 
                        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
                                                  0xfffff, 0x0);
@@ -1470,8 +1524,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
 
                        if (bt_hs_on)
                                return false;
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Wifi connected + BT connected-idle!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Wifi connected + BT connected-idle!!\n");
 
                        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
                                                  0xfffff, 0x0);
@@ -1495,15 +1549,15 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
                                           &low_pwr_disable);
 
                        if (wifi_busy) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
                                common = false;
                        } else {
                                if (bt_hs_on)
                                        return false;
 
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
 
                                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
                                                          0x1, 0xfffff, 0x0);
@@ -1539,10 +1593,12 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
 static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
                          s32 result)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        /* Set PS TDMA for max interval == 1 */
        if (tx_pause) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 1\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 1\n");
 
                if (coex_dm->cur_ps_tdma == 71) {
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1638,8 +1694,8 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 0\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71);
                        coex_dm->tdma_adj_type = 71;
@@ -1735,10 +1791,12 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
 static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
                          s32 result)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        /* Set PS TDMA for max interval == 2 */
        if (tx_pause) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 1\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 1\n");
                if (coex_dm->cur_ps_tdma == 1) {
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
                        coex_dm->tdma_adj_type = 6;
@@ -1819,8 +1877,8 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 0\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
                        coex_dm->tdma_adj_type = 2;
@@ -1906,10 +1964,12 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
 static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
                          s32 result)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        /* Set PS TDMA for max interval == 3 */
        if (tx_pause) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 1\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 1\n");
                if (coex_dm->cur_ps_tdma == 1) {
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
                        coex_dm->tdma_adj_type = 7;
@@ -1990,8 +2050,8 @@ static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 0\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
                        coex_dm->tdma_adj_type = 3;
@@ -2078,18 +2138,19 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                          bool sco_hid, bool tx_pause,
                                          u8 max_interval)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        static s32 up, dn, m, n, wait_count;
        /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/
        s32 result;
        u8 retry_count = 0;
 
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], TdmaDurationAdjust()\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], TdmaDurationAdjust()\n");
 
        if (!coex_dm->auto_tdma_adjust) {
                coex_dm->auto_tdma_adjust = true;
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], first run TdmaDurationAdjust()!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], first run TdmaDurationAdjust()!!\n");
                if (sco_hid) {
                        if (tx_pause) {
                                if (max_interval == 1) {
@@ -2102,11 +2163,6 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 14);
                                        coex_dm->tdma_adj_type = 14;
-                               } else if (max_interval == 3) {
-                                       btc8723b2ant_ps_tdma(btcoexist,
-                                                            NORMAL_EXEC,
-                                                            true, 15);
-                                       coex_dm->tdma_adj_type = 15;
                                } else {
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
@@ -2124,11 +2180,6 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 10);
                                        coex_dm->tdma_adj_type = 10;
-                               } else if (max_interval == 3) {
-                                       btc8723b2ant_ps_tdma(btcoexist,
-                                                            NORMAL_EXEC,
-                                                            true, 11);
-                                       coex_dm->tdma_adj_type = 11;
                                } else {
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
@@ -2148,11 +2199,6 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 6);
                                        coex_dm->tdma_adj_type = 6;
-                               } else if (max_interval == 3) {
-                                       btc8723b2ant_ps_tdma(btcoexist,
-                                                            NORMAL_EXEC,
-                                                            true, 7);
-                                       coex_dm->tdma_adj_type = 7;
                                } else {
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
@@ -2170,11 +2216,6 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                                             NORMAL_EXEC,
                                                             true, 2);
                                        coex_dm->tdma_adj_type = 2;
-                               } else if (max_interval == 3) {
-                                       btc8723b2ant_ps_tdma(btcoexist,
-                                                            NORMAL_EXEC,
-                                                            true, 3);
-                                       coex_dm->tdma_adj_type = 3;
                                } else {
                                        btc8723b2ant_ps_tdma(btcoexist,
                                                             NORMAL_EXEC,
@@ -2193,11 +2234,11 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
        } else {
                /*accquire the BT TRx retry count from BT_Info byte2*/
                retry_count = coex_sta->bt_retry_cnt;
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], retry_count = %d\n", retry_count);
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
-                           up, dn, m, n, wait_count);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], retry_count = %d\n", retry_count);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+                        up, dn, m, n, wait_count);
                result = 0;
                wait_count++;
                 /* no retry in the last 2-second duration*/
@@ -2214,8 +2255,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                up = 0;
                                dn = 0;
                                result = 1;
-                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                           "[BTCoex], Increase wifi duration!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Increase wifi duration!!\n");
                        } /* <=3 retry in the last 2-second duration*/
                } else if (retry_count <= 3) {
                        up--;
@@ -2238,8 +2279,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                                dn = 0;
                                wait_count = 0;
                                result = -1;
-                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                           "[BTCoex], Decrease wifi duration for retry_counter<3!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Decrease wifi duration for retry_counter<3!!\n");
                        }
                } else {
                        if (wait_count == 1)
@@ -2255,12 +2296,12 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                        dn = 0;
                        wait_count = 0;
                        result = -1;
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "[BTCoex], Decrease wifi duration for retry_counter>3!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Decrease wifi duration for retry_counter>3!!\n");
                }
 
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], max Interval = %d\n", max_interval);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], max Interval = %d\n", max_interval);
                if (max_interval == 1)
                        set_tdma_int1(btcoexist, tx_pause, result);
                else if (max_interval == 2)
@@ -2274,9 +2315,9 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
         */
        if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
                bool scan = false, link = false, roam = false;
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
-                           coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
+                        coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
 
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2286,8 +2327,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
                        btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
                                             coex_dm->tdma_adj_type);
                else
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
        }
 }
 
@@ -2357,7 +2398,7 @@ static void btc8723b2ant_action_hid(struct btc_coexist *btcoexist)
 
        wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
                                                       0, 2, 15, 0);
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
 
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
@@ -2422,7 +2463,7 @@ static void btc8723b2ant_action_a2dp(struct btc_coexist *btcoexist)
                                                       0, 2, 15, 0);
        wifi_rssi_state1 = btc8723b2ant_wifi_rssi_state(btcoexist,
                                                        1, 2, 40, 0);
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
 
@@ -2561,7 +2602,7 @@ static void btc8723b2ant_action_pan_edr(struct btc_coexist *btcoexist)
 
        wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
                                                       0, 2, 15, 0);
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
 
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
@@ -2672,7 +2713,7 @@ static void btc8723b2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
 
        wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
                                                       0, 2, 15, 0);
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
 
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
@@ -2736,7 +2777,7 @@ static void btc8723b2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
 
        wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
                                                       0, 2, 15, 0);
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
        if (btc8723b_need_dec_pwr(btcoexist))
@@ -2806,7 +2847,7 @@ static void btc8723b2ant_action_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
 
        wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
                                                       0, 2, 15, 0);
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
 
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
@@ -2870,7 +2911,7 @@ static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
 
        wifi_rssi_state = btc8723b2ant_wifi_rssi_state(btcoexist,
                                                       0, 2, 15, 0);
-       bt_rssi_state = btc8723b2ant_bt_rssi_state(2, 29, 0);
+       bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, 29, 0);
 
        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
 
@@ -2923,28 +2964,29 @@ static void btc8723b2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
 
 static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 algorithm = 0;
 
-       btc_alg_dbg(ALGO_TRACE,
-                   "[BTCoex], RunCoexistMechanism()===>\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], RunCoexistMechanism()===>\n");
 
        if (btcoexist->manual_control) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
                return;
        }
 
        if (coex_sta->under_ips) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], wifi is under IPS !!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], wifi is under IPS !!!\n");
                return;
        }
 
        algorithm = btc8723b2ant_action_algorithm(btcoexist);
        if (coex_sta->c2h_bt_inquiry_page &&
            (BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BT is under inquiry/page scan !!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BT is under inquiry/page scan !!\n");
                btc8723b2ant_action_bt_inquiry(btcoexist);
                return;
        } else {
@@ -2956,75 +2998,76 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        }
 
        coex_dm->cur_algorithm = algorithm;
-       btc_alg_dbg(ALGO_TRACE, "[BTCoex], Algorithm = %d\n",
-                   coex_dm->cur_algorithm);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Algorithm = %d\n",
+                coex_dm->cur_algorithm);
 
        if (btc8723b2ant_is_common_action(btcoexist)) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Action 2-Ant common\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Action 2-Ant common\n");
                coex_dm->auto_tdma_adjust = false;
        } else {
                if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n",
-                                   coex_dm->pre_algorithm,
-                                   coex_dm->cur_algorithm);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n",
+                                coex_dm->pre_algorithm,
+                                coex_dm->cur_algorithm);
                        coex_dm->auto_tdma_adjust = false;
                }
                switch (coex_dm->cur_algorithm) {
                case BT_8723B_2ANT_COEX_ALGO_SCO:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = SCO\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = SCO\n");
                        btc8723b2ant_action_sco(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_HID:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = HID\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = HID\n");
                        btc8723b2ant_action_hid(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_A2DP:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
                        btc8723b2ant_action_a2dp(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
                        btc8723b2ant_action_a2dp_pan_hs(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_PANEDR:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
                        btc8723b2ant_action_pan_edr(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_PANHS:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
                        btc8723b2ant_action_pan_hs(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
                        btc8723b2ant_action_pan_edr_a2dp(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
                        btc8723b2ant_action_pan_edr_hid(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
                        btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
                        break;
                case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
                        btc8723b2ant_action_hid_a2dp(btcoexist);
                        break;
                default:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
                        btc8723b2ant_coex_alloff(btcoexist);
                        break;
                }
@@ -3050,10 +3093,11 @@ static void btc8723b2ant_wifioff_hwcfg(struct btc_coexist *btcoexist)
  *********************************************************************/
 void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 u8tmp = 0;
 
-       btc_iface_dbg(INTF_INIT,
-                     "[BTCoex], 2Ant Init HW Config!!\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], 2Ant Init HW Config!!\n");
        coex_dm->bt_rf0x1e_backup =
                btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff);
 
@@ -3078,8 +3122,10 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
 
 void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
-       btc_iface_dbg(INTF_INIT,
-                     "[BTCoex], Coex Mechanism Init!!\n");
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Coex Mechanism Init!!\n");
        btc8723b2ant_init_coex_dm(btcoexist);
 }
 
@@ -3101,13 +3147,13 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
        u8 ap_num = 0;
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n ============[BT Coexist info]============");
+                "\r\n ============[BT Coexist info]============");
 
        if (btcoexist->manual_control) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n ==========[Under Manual Control]============");
+                        "\r\n ==========[Under Manual Control]============");
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n ==========================================");
+                        "\r\n ==========================================");
        }
 
        if (!board_info->bt_exist) {
@@ -3116,21 +3162,21 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
        }
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
-                  "Ant PG number/ Ant mechanism:",
-                  board_info->pg_ant_num, board_info->btdm_ant_num);
+                "Ant PG number/ Ant mechanism:",
+                board_info->pg_ant_num, board_info->btdm_ant_num);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %d",
-                  "BT stack/ hci ext ver",
-                  ((stack_info->profile_notified) ? "Yes" : "No"),
-                  stack_info->hci_version);
+                "BT stack/ hci ext ver",
+                ((stack_info->profile_notified) ? "Yes" : "No"),
+                stack_info->hci_version);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
-                  "CoexVer/ FwVer/ PatchVer",
-                  glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
-                  fw_ver, bt_patch_ver, bt_patch_ver);
+                "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+                "CoexVer/ FwVer/ PatchVer",
+                glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+                fw_ver, bt_patch_ver, bt_patch_ver);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_DOT11_CHNL,
@@ -3138,23 +3184,23 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d(%d)",
-                  "Dot11 channel / HsChnl(HsMode)",
-                  wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+                "Dot11 channel / HsChnl(HsMode)",
+                wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %3ph ",
-                  "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
+                "H2C Wifi inform bt chnl Info", coex_dm->wifi_chnl_info);
 
        btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
        btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
        btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM, &ap_num);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d",
-                  "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num);
+                "Wifi rssi/ HS rssi/ AP#", wifi_rssi, bt_hs_rssi, ap_num);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
-                  "Wifi link/ roam/ scan", link, roam, scan);
+                "Wifi link/ roam/ scan", link, roam, scan);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
@@ -3162,112 +3208,112 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
                           &wifi_traffic_dir);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s / %s/ %s ",
-                  "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
-                  ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
-                  (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
-                  ((!wifi_busy) ? "idle" :
-                  ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
-                  "uplink" : "downlink")));
+                "Wifi status", (wifi_under_5g ? "5G" : "2.4G"),
+                ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
+                (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
+                ((!wifi_busy) ? "idle" :
+                ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
+                 "uplink" : "downlink")));
 
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d / %d / %d / %d",
-                  "SCO/HID/PAN/A2DP",
-                  bt_link_info->sco_exist, bt_link_info->hid_exist,
-                  bt_link_info->pan_exist, bt_link_info->a2dp_exist);
+                "SCO/HID/PAN/A2DP",
+                bt_link_info->sco_exist, bt_link_info->hid_exist,
+                bt_link_info->pan_exist, bt_link_info->a2dp_exist);
        btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
 
        bt_info_ext = coex_sta->bt_info_ext;
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
-                  "BT Info A2DP rate",
-                  (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
+                "BT Info A2DP rate",
+                (bt_info_ext & BIT0) ? "Basic rate" : "EDR rate");
 
        for (i = 0; i < BT_INFO_SRC_8723B_2ANT_MAX; i++) {
                if (coex_sta->bt_info_c2h_cnt[i]) {
                        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                                  "\r\n %-35s = %7ph(%d)",
-                                  glbt_info_src_8723b_2ant[i],
-                                  coex_sta->bt_info_c2h[i],
-                                  coex_sta->bt_info_c2h_cnt[i]);
+                                "\r\n %-35s = %7ph(%d)",
+                                glbt_info_src_8723b_2ant[i],
+                                coex_sta->bt_info_c2h[i],
+                                coex_sta->bt_info_c2h_cnt[i]);
                }
        }
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
-                  "PS state, IPS/LPS",
-                  ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-                  ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+                "PS state, IPS/LPS",
+                ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+                ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
        btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
 
        /* Sw mechanism */
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s", "============[Sw mechanism]============");
+                "\r\n %-35s", "============[Sw mechanism]============");
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d ",
-                  "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
-                  coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
+                "SM1[ShRf/ LpRA/ LimDig]", coex_dm->cur_rf_rx_lpf_shrink,
+                coex_dm->cur_low_penalty_ra, coex_dm->limited_dig);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d/ %d(0x%x) ",
-                  "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
-                  coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
-                  coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+                "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+                coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+                coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
 
        /* Fw mechanism */
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                  "============[Fw mechanism]============");
+                "============[Fw mechanism]============");
 
        ps_tdma_case = coex_dm->cur_ps_tdma;
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %5ph case-%d (auto:%d)",
-                  "PS TDMA", coex_dm->ps_tdma_para,
-                  ps_tdma_case, coex_dm->auto_tdma_adjust);
+                "\r\n %-35s = %5ph case-%d (auto:%d)",
+                "PS TDMA", coex_dm->ps_tdma_para,
+                ps_tdma_case, coex_dm->auto_tdma_adjust);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d ",
-                  "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
-                  coex_dm->cur_ignore_wlan_act);
+                "DecBtPwr/ IgnWlanAct", coex_dm->cur_dec_bt_pwr,
+                coex_dm->cur_ignore_wlan_act);
 
        /* Hw setting */
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                  "============[Hw setting]============");
+                "============[Hw setting]============");
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
-                  "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
+                "RF-A, 0x1e initVal", coex_dm->bt_rf0x1e_backup);
 
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x880);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                  "0x778/0x880[29:25]", u8tmp[0],
-                  (u32tmp[0]&0x3e000000) >> 25);
+                "0x778/0x880[29:25]", u8tmp[0],
+                (u32tmp[0] & 0x3e000000) >> 25);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x948);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x67);
        u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x765);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                  "0x948/ 0x67[5] / 0x765",
-                  u32tmp[0], ((u8tmp[0]&0x20) >> 5), u8tmp[1]);
+                "0x948/ 0x67[5] / 0x765",
+                u32tmp[0], ((u8tmp[0] & 0x20) >> 5), u8tmp[1]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x92c);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x930);
        u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x944);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                  "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
-                  u32tmp[0]&0x3, u32tmp[1]&0xff, u32tmp[2]&0x3);
+                "0x92c[1:0]/ 0x930[7:0]/0x944[1:0]",
+                u32tmp[0] & 0x3, u32tmp[1] & 0xff, u32tmp[2] & 0x3);
 
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x39);
        u8tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x40);
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
        u8tmp[2] = btcoexist->btc_read_1byte(btcoexist, 0x64);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
-                  "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
-                  ((u8tmp[0] & 0x8)>>3), u8tmp[1],
-                  ((u32tmp[0]&0x01800000)>>23), u8tmp[2]&0x1);
+                "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+                "0x38[11]/0x40/0x4c[24:23]/0x64[0]",
+                ((u8tmp[0] & 0x8) >> 3), u8tmp[1],
+                ((u32tmp[0] & 0x01800000) >> 23), u8tmp[2] & 0x1);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                  "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
+                "0x550(bcn ctrl)/0x522", u32tmp[0], u8tmp[0]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x49c);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                  "0xc50(dig)/0x49c(null-drop)", u32tmp[0]&0xff, u8tmp[0]);
+                "0xc50(dig)/0x49c(null-drop)", u32tmp[0] & 0xff, u8tmp[0]);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xda0);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0xda4);
@@ -3286,24 +3332,24 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
        fa_cck = (u8tmp[0] << 8) + u8tmp[1];
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                  "OFDM-CCA/OFDM-FA/CCK-FA",
-                  u32tmp[0]&0xffff, fa_ofdm, fa_cck);
+                "OFDM-CCA/OFDM-FA/CCK-FA",
+                u32tmp[0] & 0xffff, fa_ofdm, fa_cck);
 
        u32tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
        u32tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
        u32tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
        u8tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
-                  "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
-                  u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
+                "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+                "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+                u32tmp[0], u32tmp[1], u32tmp[2], u8tmp[0]);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                  "0x770(high-pri rx/tx)",
-                  coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+                "0x770(high-pri rx/tx)",
+                coex_sta->high_priority_rx, coex_sta->high_priority_tx);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                  "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
-                  coex_sta->low_priority_tx);
+                "0x774(low-pri rx/tx)", coex_sta->low_priority_rx,
+                coex_sta->low_priority_tx);
 #if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 1)
        btc8723b2ant_monitor_bt_ctr(btcoexist);
 #endif
@@ -3313,16 +3359,18 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
 
 void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (BTC_IPS_ENTER == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], IPS ENTER notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], IPS ENTER notify\n");
                coex_sta->under_ips = true;
                btc8723b2ant_wifioff_hwcfg(btcoexist);
                btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
                btc8723b2ant_coex_alloff(btcoexist);
        } else if (BTC_IPS_LEAVE == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], IPS LEAVE notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], IPS LEAVE notify\n");
                coex_sta->under_ips = false;
                ex_btc8723b2ant_init_hwconfig(btcoexist);
                btc8723b2ant_init_coex_dm(btcoexist);
@@ -3332,50 +3380,57 @@ void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 
 void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (BTC_LPS_ENABLE == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], LPS ENABLE notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], LPS ENABLE notify\n");
                coex_sta->under_lps = true;
        } else if (BTC_LPS_DISABLE == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], LPS DISABLE notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], LPS DISABLE notify\n");
                coex_sta->under_lps = false;
        }
 }
 
 void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (BTC_SCAN_START == type)
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], SCAN START notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], SCAN START notify\n");
        else if (BTC_SCAN_FINISH == type)
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], SCAN FINISH notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], SCAN FINISH notify\n");
 }
 
 void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (BTC_ASSOCIATE_START == type)
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], CONNECT START notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CONNECT START notify\n");
        else if (BTC_ASSOCIATE_FINISH == type)
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], CONNECT FINISH notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CONNECT FINISH notify\n");
 }
 
 void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
                                         u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[3] = {0};
        u32 wifi_bw;
        u8 wifi_central_chnl;
 
        if (BTC_MEDIA_CONNECT == type)
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], MEDIA connect notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], MEDIA connect notify\n");
        else
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], MEDIA disconnect notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], MEDIA disconnect notify\n");
 
        /* only 2.4G we need to inform bt the chnl mask */
        btcoexist->btc_get(btcoexist,
@@ -3396,10 +3451,10 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
        coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
        coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], FW write 0x66=0x%x\n",
-                   h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
-                   h2c_parameter[2]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], FW write 0x66=0x%x\n",
+                h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+                h2c_parameter[2]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
@@ -3407,14 +3462,17 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
 void ex_btc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
                                           u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (type == BTC_PACKET_DHCP)
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], DHCP Packet notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], DHCP Packet notify\n");
 }
 
 void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
                                    u8 *tmpbuf, u8 length)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 bt_info = 0;
        u8 i, rsp_source = 0;
        bool bt_busy = false, limited_dig = false;
@@ -3427,24 +3485,24 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
                rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
        coex_sta->bt_info_c2h_cnt[rsp_source]++;
 
-       btc_iface_dbg(INTF_NOTIFY,
-                     "[BTCoex], Bt info[%d], length=%d, hex data=[",
-                     rsp_source, length);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Bt info[%d], length=%d, hex data=[",
+                rsp_source, length);
        for (i = 0; i < length; i++) {
                coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
                if (i == 1)
                        bt_info = tmpbuf[i];
                if (i == length-1)
-                       btc_iface_dbg(INTF_NOTIFY,
-                                     "0x%02x]\n", tmpbuf[i]);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "0x%02x]\n", tmpbuf[i]);
                else
-                       btc_iface_dbg(INTF_NOTIFY,
-                                     "0x%02x, ", tmpbuf[i]);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "0x%02x, ", tmpbuf[i]);
        }
 
        if (btcoexist->manual_control) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
                return;
        }
 
@@ -3462,8 +3520,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
                     because bt is reset and loss of the info.
                 */
                if ((coex_sta->bt_info_ext & BIT1)) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
                        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
                                           &wifi_connected);
                        if (wifi_connected)
@@ -3477,8 +3535,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
                }
 
                if ((coex_sta->bt_info_ext & BIT3)) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
                        btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
                                                     false);
                } else {
@@ -3531,26 +3589,26 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
 
        if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
                coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
        /* connection exists but no busy */
        } else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) {
                coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
        } else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
                   (bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
                coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
        } else if (bt_info&BT_INFO_8723B_2ANT_B_ACL_BUSY) {
                coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
        } else {
                coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
        }
 
        if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3573,7 +3631,9 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
 
 void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
 {
-       btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
 
        btc8723b2ant_wifioff_hwcfg(btcoexist);
        btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
@@ -3582,36 +3642,37 @@ void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
 
 void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
        static u8 dis_ver_info_cnt;
        u32 fw_ver = 0, bt_patch_ver = 0;
 
-       btc_alg_dbg(ALGO_TRACE,
-                   "[BTCoex], ==========================Periodical===========================\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], ==========================Periodical===========================\n");
 
        if (dis_ver_info_cnt <= 5) {
                dis_ver_info_cnt += 1;
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], ****************************************************************\n");
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], ****************************************************************\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
                              board_info->pg_ant_num,
                              board_info->btdm_ant_num,
                              board_info->btdm_ant_pos);
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
                              stack_info->profile_notified ? "Yes" : "No",
                              stack_info->hci_version);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
                                   &bt_patch_ver);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
                              glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
                              fw_ver, bt_patch_ver, bt_patch_ver);
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], ****************************************************************\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], ****************************************************************\n");
        }
 
 #if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
index 3ce47c70bfa49935cc1c98140a9e2d1827c6a959..8b689ed9a629bce30472d2b61adb562d24c071af 100644 (file)
@@ -62,9 +62,11 @@ static u32   glcoex_ver_8821a_1ant = 0x41;
  * local function start with halbtc8821a1ant_
  *============================================================
  */
-static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
+static u8 halbtc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist,
+                                       u8 level_num, u8 rssi_thresh,
                                        u8 rssi_thresh1)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        long    bt_rssi = 0;
        u8      bt_rssi_state = coex_sta->pre_bt_rssi_state;
 
@@ -76,28 +78,28 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= (rssi_thresh +
                                        BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to High\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                   "[BTCoex], BT Rssi thresh error!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], BT Rssi thresh error!!\n");
                        return coex_sta->pre_bt_rssi_state;
                }
 
@@ -106,12 +108,12 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= (rssi_thresh +
                                        BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else if ((coex_sta->pre_bt_rssi_state ==
                           BTC_RSSI_STATE_MEDIUM) ||
@@ -120,26 +122,26 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >= (rssi_thresh1 +
                                        BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to High\n");
                        } else if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at Medium\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh1) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        }
@@ -152,6 +154,7 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
                                        u8 index, u8 level_num, u8 rssi_thresh,
                                        u8 rssi_thresh1)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        long    wifi_rssi = 0;
        u8      wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
 
@@ -165,28 +168,28 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
                        if (wifi_rssi >=
                            (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to High\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                   "[BTCoex], wifi RSSI thresh error!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], wifi RSSI thresh error!!\n");
                        return coex_sta->pre_wifi_rssi_state[index];
                }
 
@@ -197,12 +200,12 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
                        if (wifi_rssi >=
                            (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else if ((coex_sta->pre_wifi_rssi_state[index] ==
                        BTC_RSSI_STATE_MEDIUM) ||
@@ -212,26 +215,26 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
                            (rssi_thresh1 +
                             BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to High\n");
                        } else if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at Medium\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh1) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        }
@@ -414,15 +417,16 @@ static void halbtc8821a1ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
 
 static void halbtc8821a1ant_query_bt_info(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        coex_sta->c2h_bt_info_req_sent = true;
 
        h2c_parameter[0] |= BIT0;       /* trigger*/
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
-                   h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+                h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
@@ -485,6 +489,7 @@ static void halbtc8821a1ant_update_bt_link_info(struct btc_coexist *btcoexist)
 
 static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
        bool    bt_hs_on = false;
        u8      algorithm = BT_8821A_1ANT_COEX_ALGO_UNDEFINED;
@@ -493,8 +498,8 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
 
        if (!bt_link_info->bt_link_exist) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], No BT link exists!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], No BT link exists!!!\n");
                return algorithm;
        }
 
@@ -509,26 +514,28 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
 
        if (num_of_diff_profile == 1) {
                if (bt_link_info->sco_exist) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], BT Profile = SCO only\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], BT Profile = SCO only\n");
                        algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
                } else {
                        if (bt_link_info->hid_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], BT Profile = HID only\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Profile = HID only\n");
                                algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], BT Profile = A2DP only\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Profile = A2DP only\n");
                                algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = PAN(HS) only\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = PAN(HS) only\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANHS;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = PAN(EDR) only\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = PAN(EDR) only\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR;
                                }
                        }
@@ -536,50 +543,56 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
        } else if (num_of_diff_profile == 2) {
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], BT Profile = SCO + HID\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Profile = SCO + HID\n");
                                algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
                                algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
                        } else if (bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = SCO + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = SCO + PAN(HS)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
                                }
                        }
                } else {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], BT Profile = HID + A2DP\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Profile = HID + A2DP\n");
                                algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
                        } else if (bt_link_info->hid_exist &&
                                   bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = HID + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = HID + PAN(HS)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = HID + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = HID + PAN(EDR)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (bt_link_info->pan_exist &&
                                   bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP;
                                }
                        }
@@ -588,29 +601,33 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
                if (bt_link_info->sco_exist) {
                        if (bt_link_info->hid_exist &&
                            bt_link_info->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
                                algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
                        } else if (bt_link_info->hid_exist &&
                                bt_link_info->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (bt_link_info->pan_exist &&
                                bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
                                }
                        }
@@ -619,12 +636,14 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
                                }
                        }
@@ -635,12 +654,14 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
                            bt_link_info->pan_exist &&
                            bt_link_info->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
 
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
                                        algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
                                }
                        }
@@ -652,6 +673,7 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
 static void halbtc8821a1ant_set_bt_auto_report(struct btc_coexist *btcoexist,
                                               bool enable_auto_report)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        h2c_parameter[0] = 0;
@@ -659,10 +681,10 @@ static void halbtc8821a1ant_set_bt_auto_report(struct btc_coexist *btcoexist,
        if (enable_auto_report)
                h2c_parameter[0] |= BIT0;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
-                   (enable_auto_report ? "Enabled!!" : "Disabled!!"),
-                   h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+                (enable_auto_report ? "Enabled!!" : "Disabled!!"),
+                h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
 }
@@ -671,14 +693,17 @@ static void halbtc8821a1ant_bt_auto_report(struct btc_coexist *btcoexist,
                                           bool force_exec,
                                           bool enable_auto_report)
 {
-       btc_alg_dbg(ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n",
-                   (force_exec ? "force to" : ""), ((enable_auto_report) ?
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s BT Auto report = %s\n",
+                (force_exec ? "force to" : ""), ((enable_auto_report) ?
                                                     "Enabled" : "Disabled"));
        coex_dm->cur_bt_auto_report = enable_auto_report;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
                            coex_dm->pre_bt_auto_report,
                            coex_dm->cur_bt_auto_report);
 
@@ -693,6 +718,7 @@ static void halbtc8821a1ant_bt_auto_report(struct btc_coexist *btcoexist,
 static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist,
                                            bool low_penalty_ra)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[6] = {0};
 
        h2c_parameter[0] = 0x6; /* opCode, 0x6= Retry_Penalty*/
@@ -706,9 +732,9 @@ static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist,
                h2c_parameter[5] = 0xf9;        /*MCS5 or OFDM36*/
        }
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], set WiFi Low-Penalty Retry: %s",
-                   (low_penalty_ra ? "ON!!" : "OFF!!"));
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set WiFi Low-Penalty Retry: %s",
+                (low_penalty_ra ? "ON!!" : "OFF!!"));
 
        btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
 }
@@ -731,20 +757,22 @@ static void halbtc8821a1ant_set_coex_table(struct btc_coexist *btcoexist,
                                           u32 val0x6c0, u32 val0x6c4,
                                           u32 val0x6c8, u8 val0x6cc)
 {
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
        btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
        btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
        btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
        btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
@@ -752,8 +780,10 @@ static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist,
                                       bool force_exec, u32 val0x6c0,
                                       u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
 {
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
                    (force_exec ? "force to" : ""), val0x6c0, val0x6c4,
                    val0x6c8, val0x6cc);
        coex_dm->cur_val_0x6c0 = val0x6c0;
@@ -822,14 +852,15 @@ static void halbtc8821a1ant_coex_table_with_type(struct btc_coexist *btcoexist,
 static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
                                                bool enable)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8      h2c_parameter[1] = {0};
 
        if (enable)
                h2c_parameter[0] |= BIT0;       /* function enable*/
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
-                   h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+                h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
 }
@@ -837,16 +868,18 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
 static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
                                            bool force_exec, bool enable)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s turn Ignore WlanAct %s\n",
-                   (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn Ignore WlanAct %s\n",
+                (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
        coex_dm->cur_ignore_wlan_act = enable;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
-                           coex_dm->pre_ignore_wlan_act,
-                           coex_dm->cur_ignore_wlan_act);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+                        coex_dm->pre_ignore_wlan_act,
+                        coex_dm->cur_ignore_wlan_act);
 
                if (coex_dm->pre_ignore_wlan_act ==
                    coex_dm->cur_ignore_wlan_act)
@@ -861,6 +894,7 @@ static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist,
                                          u8 byte1, u8 byte2, u8 byte3,
                                          u8 byte4, u8 byte5)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[5] = {0};
 
        h2c_parameter[0] = byte1;
@@ -875,13 +909,13 @@ static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist,
        coex_dm->ps_tdma_para[3] = byte4;
        coex_dm->ps_tdma_para[4] = byte5;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
-                   h2c_parameter[0],
-                   h2c_parameter[1] << 24 |
-                   h2c_parameter[2] << 16 |
-                   h2c_parameter[3] << 8 |
-                   h2c_parameter[4]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+                h2c_parameter[0],
+                h2c_parameter[1] << 24 |
+                h2c_parameter[2] << 16 |
+                h2c_parameter[3] << 8 |
+                h2c_parameter[4]);
        btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
 
@@ -898,22 +932,24 @@ static void halbtc8821a1ant_set_lps_rpwm(struct btc_coexist *btcoexist,
 static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
                                     bool force_exec, u8 lps_val, u8 rpwm_val)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
-                   (force_exec ? "force to" : ""), lps_val, rpwm_val);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+                (force_exec ? "force to" : ""), lps_val, rpwm_val);
        coex_dm->cur_lps = lps_val;
        coex_dm->cur_rpwm = rpwm_val;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
-                           coex_dm->cur_lps, coex_dm->cur_rpwm);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
+                        coex_dm->cur_lps, coex_dm->cur_rpwm);
 
                if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
                    (coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
-                                   coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
+                                coex_dm->pre_rpwm, coex_dm->cur_rpwm);
 
                        return;
                }
@@ -927,8 +963,10 @@ static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
 static void halbtc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist,
                                         bool low_penalty_ra)
 {
-       btc_alg_dbg(ALGO_BT_MONITOR,
-                   "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
 
        halbtc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
 }
@@ -1017,6 +1055,7 @@ static void halbtc8821a1ant_set_ant_path(struct btc_coexist *btcoexist,
 static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
                                    bool force_exec, bool turn_on, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 rssi_adjust_val = 0;
 
        coex_dm->cur_ps_tdma_on = turn_on;
@@ -1024,13 +1063,13 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
 
        if (!force_exec) {
                if (coex_dm->cur_ps_tdma_on) {
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "[BTCoex], ********** TDMA(on, %d) **********\n",
-                                   coex_dm->cur_ps_tdma);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], ********** TDMA(on, %d) **********\n",
+                                coex_dm->cur_ps_tdma);
                } else {
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "[BTCoex], ********** TDMA(off, %d) **********\n",
-                                   coex_dm->cur_ps_tdma);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], ********** TDMA(off, %d) **********\n",
+                                coex_dm->cur_ps_tdma);
                }
                if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
                    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1232,6 +1271,7 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
 
 static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool    common = false, wifi_connected = false, wifi_busy = false;
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
@@ -1241,50 +1281,50 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
        if (!wifi_connected &&
            BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
            coex_dm->bt_status) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
                halbtc8821a1ant_sw_mechanism(btcoexist, false);
 
                common = true;
        } else if (wifi_connected &&
                   (BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
                    coex_dm->bt_status)) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi connected + BT non connected-idle!!\n");
                halbtc8821a1ant_sw_mechanism(btcoexist, false);
 
                common = true;
        } else if (!wifi_connected &&
                   (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
                    coex_dm->bt_status)) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
                halbtc8821a1ant_sw_mechanism(btcoexist, false);
 
                common = true;
        } else if (wifi_connected &&
                   (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
                   coex_dm->bt_status)) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Wifi connected + BT connected-idle!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi connected + BT connected-idle!!\n");
                halbtc8821a1ant_sw_mechanism(btcoexist, false);
 
                common = true;
        } else if (!wifi_connected &&
                   (BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE !=
                    coex_dm->bt_status)) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
                halbtc8821a1ant_sw_mechanism(btcoexist, false);
 
                common = true;
        } else {
                if (wifi_busy) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
                } else {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
                }
 
                common = false;
@@ -1296,13 +1336,14 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
 static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                      u8 wifi_status)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        static long             up, dn, m, n, wait_count;
        /*0: no change, +1: increase WiFi duration, -1: decrease WiFi duration*/
        long                    result;
        u8                      retry_count = 0, bt_info_ext;
 
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], TdmaDurationAdjustForAcl()\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], TdmaDurationAdjustForAcl()\n");
 
        if ((BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
             wifi_status) ||
@@ -1330,8 +1371,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
 
        if (!coex_dm->auto_tdma_adjust) {
                coex_dm->auto_tdma_adjust = true;
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], first run TdmaDurationAdjust()!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], first run TdmaDurationAdjust()!!\n");
 
                halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
                coex_dm->tdma_adj_type = 2;
@@ -1366,8 +1407,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                up = 0;
                                dn = 0;
                                result = 1;
-                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                           "[BTCoex], Increase wifi duration!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Increase wifi duration!!\n");
                        }
                } else if (retry_count <= 3) {
                        /* <=3 retry in the last 2-second duration*/
@@ -1397,8 +1438,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                dn = 0;
                                wait_count = 0;
                                result = -1;
-                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                           "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
                        }
                } else {
                        /* retry count > 3, if retry count > 3 happens once,
@@ -1419,8 +1460,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                        dn = 0;
                        wait_count = 0;
                        result = -1;
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
                }
 
                if (result == -1) {
@@ -1465,9 +1506,9 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                        }
                } else {
                        /*no change*/
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "[BTCoex], ********** TDMA(on, %d) **********\n",
-                                   coex_dm->cur_ps_tdma);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], ********** TDMA(on, %d) **********\n",
+                                coex_dm->cur_ps_tdma);
                }
 
                if (coex_dm->cur_ps_tdma != 1 &&
@@ -1566,6 +1607,7 @@ static void halbtc8821a1ant_action_wifi_only(struct btc_coexist *btcoexist)
 
 static void btc8821a1ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        static bool     pre_bt_disabled;
        static u32      bt_disable_cnt;
        bool            bt_active = true, bt_disabled = false;
@@ -1589,25 +1631,25 @@ static void btc8821a1ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
                bt_disabled = false;
                btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
                                   &bt_disabled);
-               btc_alg_dbg(ALGO_BT_MONITOR,
-                           "[BTCoex], BT is enabled !!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BT is enabled !!\n");
        } else {
                bt_disable_cnt++;
-               btc_alg_dbg(ALGO_BT_MONITOR,
-                           "[BTCoex], bt all counters = 0, %d times!!\n",
-                           bt_disable_cnt);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bt all counters = 0, %d times!!\n",
+                        bt_disable_cnt);
                if (bt_disable_cnt >= 2) {
                        bt_disabled = true;
                        btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
                                           &bt_disabled);
-                       btc_alg_dbg(ALGO_BT_MONITOR,
-                                   "[BTCoex], BT is disabled !!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], BT is disabled !!\n");
                        halbtc8821a1ant_action_wifi_only(btcoexist);
                }
        }
        if (pre_bt_disabled != bt_disabled) {
-               btc_alg_dbg(ALGO_BT_MONITOR,
-                           "[BTCoex], BT is from %s to %s!!\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BT is from %s to %s!!\n",
                            (pre_bt_disabled ? "disabled" : "enabled"),
                            (bt_disabled ? "disabled" : "enabled"));
                pre_bt_disabled = bt_disabled;
@@ -1726,11 +1768,7 @@ static void btc8821a1ant_act_bt_sco_hid_only_busy(struct btc_coexist *btcoexist,
        /* tdma and coex table*/
        halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
 
-       if (BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
-           wifi_status)
-               halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
-       else
-               halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
+       halbtc8821a1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 1);
 }
 
 static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist,
@@ -1740,7 +1778,7 @@ static void btc8821a1ant_act_wifi_con_bt_acl_busy(struct btc_coexist *btcoexist,
 
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
 
-       bt_rssi_state = halbtc8821a1ant_bt_rssi_state(2, 28, 0);
+       bt_rssi_state = halbtc8821a1ant_bt_rssi_state(btcoexist, 2, 28, 0);
 
        if (bt_link_info->hid_only) {
                /*HID*/
@@ -1879,19 +1917,20 @@ static void btc8821a1ant_act_wifi_conn_sp_pkt(struct btc_coexist *btcoexist)
 
 static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool    wifi_busy = false;
        bool    scan = false, link = false, roam = false;
        bool    under_4way = false;
 
-       btc_alg_dbg(ALGO_TRACE,
-                   "[BTCoex], CoexForWifiConnect()===>\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], CoexForWifiConnect()===>\n");
 
        btcoexist->btc_get(btcoexist,
                 BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way);
        if (under_4way) {
                btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
                return;
        }
 
@@ -1900,8 +1939,8 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
        if (scan || link || roam) {
                halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
                return;
        }
 
@@ -1954,6 +1993,7 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
 
 static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8      algorithm = 0;
 
        algorithm = halbtc8821a1ant_action_algorithm(btcoexist);
@@ -1962,58 +2002,58 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
        if (!halbtc8821a1ant_is_common_action(btcoexist)) {
                switch (coex_dm->cur_algorithm) {
                case BT_8821A_1ANT_COEX_ALGO_SCO:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = SCO\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = SCO\n");
                        halbtc8821a1ant_action_sco(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_HID:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = HID\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = HID\n");
                        halbtc8821a1ant_action_hid(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_A2DP:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = A2DP\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = A2DP\n");
                        halbtc8821a1ant_action_a2dp(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
                        halbtc8821a1ant_action_a2dp_pan_hs(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_PANEDR:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = PAN(EDR)\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = PAN(EDR)\n");
                        halbtc8821a1ant_action_pan_edr(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_PANHS:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = HS mode\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = HS mode\n");
                        halbtc8821a1ant_action_pan_hs(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = PAN+A2DP\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = PAN+A2DP\n");
                        halbtc8821a1ant_action_pan_edr_a2dp(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
                        halbtc8821a1ant_action_pan_edr_hid(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
                        btc8821a1ant_action_hid_a2dp_pan_edr(btcoexist);
                        break;
                case BT_8821A_1ANT_COEX_ALGO_HID_A2DP:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = HID+A2DP\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = HID+A2DP\n");
                        halbtc8821a1ant_action_hid_a2dp(btcoexist);
                        break;
                default:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action algorithm = coexist All Off!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action algorithm = coexist All Off!!\n");
                        /*halbtc8821a1ant_coex_all_off(btcoexist);*/
                        break;
                }
@@ -2023,6 +2063,7 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
 
 static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
        bool    wifi_connected = false, bt_hs_on = false;
        bool    increase_scan_dev_num = false;
@@ -2031,31 +2072,31 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        u8      wifi_rssi_state = BTC_RSSI_STATE_HIGH;
        bool    wifi_under_5g = false;
 
-       btc_alg_dbg(ALGO_TRACE,
-                   "[BTCoex], RunCoexistMechanism()===>\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], RunCoexistMechanism()===>\n");
 
        if (btcoexist->manual_control) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
                return;
        }
 
        if (btcoexist->stop_coex_dm) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
                return;
        }
 
        if (coex_sta->under_ips) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], wifi is under IPS !!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], wifi is under IPS !!!\n");
                return;
        }
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
        if (wifi_under_5g) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
                halbtc8821a1ant_coex_under_5g(btcoexist);
                return;
        }
@@ -2078,16 +2119,8 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
                        wifi_rssi_state =
                                 halbtc8821a1ant_WifiRssiState(btcoexist, 1, 2,
                                                               30, 0);
-                       if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                           (wifi_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                               halbtc8821a1ant_limited_tx(btcoexist,
-                                                          NORMAL_EXEC, 1, 1,
-                                                          1, 1);
-                       } else {
-                               halbtc8821a1ant_limited_tx(btcoexist,
-                                                          NORMAL_EXEC, 1, 1,
-                                                          1, 1);
-                       }
+                       halbtc8821a1ant_limited_tx(btcoexist,
+                                                  NORMAL_EXEC, 1, 1, 1, 1);
                } else {
                        halbtc8821a1ant_limited_tx(btcoexist, NORMAL_EXEC,
                                                   0, 0, 0, 0);
@@ -2121,8 +2154,8 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        if (!wifi_connected) {
                bool    scan = false, link = false, roam = false;
 
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], wifi is non connected-idle !!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], wifi is non connected-idle !!!\n");
 
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2151,11 +2184,12 @@ static void halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
 static void halbtc8821a1ant_init_hw_config(struct btc_coexist *btcoexist,
                                           bool back_up)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8      u1_tmp = 0;
        bool    wifi_under_5g = false;
 
-       btc_iface_dbg(INTF_INIT,
-                     "[BTCoex], 1Ant Init HW Config!!\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], 1Ant Init HW Config!!\n");
 
        if (back_up) {
                coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist,
@@ -2206,8 +2240,10 @@ void ex_halbtc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist)
 
 void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
-       btc_iface_dbg(INTF_INIT,
-                     "[BTCoex], Coex Mechanism Init!!\n");
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Coex Mechanism Init!!\n");
 
        btcoexist->stop_coex_dm = false;
 
@@ -2233,19 +2269,19 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
        u32 fw_ver = 0, bt_patch_ver = 0;
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n ============[BT Coexist info]============");
+                "\r\n ============[BT Coexist info]============");
 
        if (btcoexist->manual_control) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n ============[Under Manual Control]============");
+                        "\r\n ============[Under Manual Control]============");
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n ==========================================");
+                        "\r\n ==========================================");
        }
        if (btcoexist->stop_coex_dm) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n ============[Coex is STOPPED]============");
+                        "\r\n ============[Coex is STOPPED]============");
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n ==========================================");
+                        "\r\n ==========================================");
        }
 
        if (!board_info->bt_exist) {
@@ -2254,27 +2290,27 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
        }
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d/ %d/ %d",
-                  "Ant PG Num/ Ant Mech/ Ant Pos:",
-                  board_info->pg_ant_num,
-                  board_info->btdm_ant_num,
-                  board_info->btdm_ant_pos);
+                "\r\n %-35s = %d/ %d/ %d",
+                "Ant PG Num/ Ant Mech/ Ant Pos:",
+                board_info->pg_ant_num,
+                board_info->btdm_ant_num,
+                board_info->btdm_ant_pos);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
-                  ((stack_info->profile_notified) ? "Yes" : "No"),
-               stack_info->hci_version);
+                "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
+                ((stack_info->profile_notified) ? "Yes" : "No"),
+                stack_info->hci_version);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
                           &bt_patch_ver);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
-                  "CoexVer/ FwVer/ PatchVer",
-                  glcoex_ver_date_8821a_1ant,
-                  glcoex_ver_8821a_1ant,
-                  fw_ver, bt_patch_ver,
-                  bt_patch_ver);
+                "\r\n %-35s = %d_%x/ 0x%x/ 0x%x(%d)",
+                "CoexVer/ FwVer/ PatchVer",
+                glcoex_ver_date_8821a_1ant,
+                glcoex_ver_8821a_1ant,
+                fw_ver, bt_patch_ver,
+                bt_patch_ver);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION,
                           &bt_hs_on);
@@ -2283,27 +2319,27 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_HS_CHNL,
                           &wifi_hs_chnl);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d / %d(%d)",
-                  "Dot11 channel / HsChnl(HsMode)",
-                  wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
+                "\r\n %-35s = %d / %d(%d)",
+                "Dot11 channel / HsChnl(HsMode)",
+                wifi_dot11_chnl, wifi_hs_chnl, bt_hs_on);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %3ph ",
-                  "H2C Wifi inform bt chnl Info",
-                  coex_dm->wifi_chnl_info);
+                "\r\n %-35s = %3ph ",
+                "H2C Wifi inform bt chnl Info",
+                coex_dm->wifi_chnl_info);
 
        btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
        btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi",
-                  (int)wifi_rssi, (int)bt_hs_rssi);
+                "\r\n %-35s = %d/ %d", "Wifi rssi/ HS rssi",
+                (int)wifi_rssi, (int)bt_hs_rssi);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
-                  link, roam, scan);
+                "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
+                link, roam, scan);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G,
                           &wifi_under_5g);
@@ -2314,13 +2350,13 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_TRAFFIC_DIRECTION,
                           &wifi_traffic_dir);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %s / %s/ %s ", "Wifi status",
-                  (wifi_under_5g ? "5G" : "2.4G"),
-                  ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
-                  (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
-                  ((!wifi_busy) ? "idle" :
-                  ((BTC_WIFI_TRAFFIC_TX == wifi_traffic_dir) ?
-                  "uplink" : "downlink")));
+                "\r\n %-35s = %s / %s/ %s ", "Wifi status",
+                (wifi_under_5g ? "5G" : "2.4G"),
+                ((wifi_bw == BTC_WIFI_BW_LEGACY) ? "Legacy" :
+                (((wifi_bw == BTC_WIFI_BW_HT40) ? "HT40" : "HT20"))),
+                ((!wifi_busy) ? "idle" :
+                ((wifi_traffic_dir == BTC_WIFI_TRAFFIC_TX) ?
+                "uplink" : "downlink")));
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
                   "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]",
                   ((btcoexist->bt_info.bt_disabled) ? ("disabled") :
@@ -2334,161 +2370,162 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
                   coex_sta->bt_rssi, coex_sta->bt_retry_cnt);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
-                  bt_link_info->sco_exist,
-                  bt_link_info->hid_exist,
-                  bt_link_info->pan_exist,
-                  bt_link_info->a2dp_exist);
+                "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
+                bt_link_info->sco_exist,
+                bt_link_info->hid_exist,
+                bt_link_info->pan_exist,
+                bt_link_info->a2dp_exist);
        btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_BT_LINK_INFO);
 
        bt_info_ext = coex_sta->bt_info_ext;
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %s",
-                  "BT Info A2DP rate",
-                  (bt_info_ext&BIT0) ?
-                  "Basic rate" : "EDR rate");
+                "\r\n %-35s = %s",
+                "BT Info A2DP rate",
+                (bt_info_ext & BIT0) ?
+                "Basic rate" : "EDR rate");
 
        for (i = 0; i < BT_INFO_SRC_8821A_1ANT_MAX; i++) {
                if (coex_sta->bt_info_c2h_cnt[i]) {
                        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                                  "\r\n %-35s = %7ph(%d)",
-                                  glbt_info_src_8821a_1ant[i],
-                                  coex_sta->bt_info_c2h[i],
-                                  coex_sta->bt_info_c2h_cnt[i]);
+                                "\r\n %-35s = %7ph(%d)",
+                                glbt_info_src_8821a_1ant[i],
+                                coex_sta->bt_info_c2h[i],
+                                coex_sta->bt_info_c2h_cnt[i]);
                }
        }
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %s/%s, (0x%x/0x%x)",
-                  "PS state, IPS/LPS, (lps/rpwm)",
-                  ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-                  ((coex_sta->under_Lps ? "LPS ON" : "LPS OFF")),
-                  btcoexist->bt_info.lps_val,
-                  btcoexist->bt_info.rpwm_val);
+                "\r\n %-35s = %s/%s, (0x%x/0x%x)",
+                "PS state, IPS/LPS, (lps/rpwm)",
+                ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+                ((coex_sta->under_Lps ? "LPS ON" : "LPS OFF")),
+                btcoexist->bt_info.lps_val,
+                btcoexist->bt_info.rpwm_val);
        btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
 
        if (!btcoexist->manual_control) {
                /* Sw mechanism*/
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n %-35s", "============[Sw mechanism]============");
+                        "\r\n %-35s",
+                        "============[Sw mechanism]============");
 
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n %-35s = %d", "SM[LowPenaltyRA]",
-                          coex_dm->cur_low_penalty_ra);
+                        "\r\n %-35s = %d", "SM[LowPenaltyRA]",
+                        coex_dm->cur_low_penalty_ra);
 
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n %-35s = %s/ %s/ %d ",
-                          "DelBA/ BtCtrlAgg/ AggSize",
-                          (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
-                          (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
-                          btcoexist->bt_info.agg_buf_size);
+                        "\r\n %-35s = %s/ %s/ %d ",
+                        "DelBA/ BtCtrlAgg/ AggSize",
+                        (btcoexist->bt_info.reject_agg_pkt ? "Yes" : "No"),
+                        (btcoexist->bt_info.bt_ctrl_buf_size ? "Yes" : "No"),
+                        btcoexist->bt_info.agg_buf_size);
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n %-35s = 0x%x ", "Rate Mask",
-                          btcoexist->bt_info.ra_mask);
+                        "\r\n %-35s = 0x%x ", "Rate Mask",
+                        btcoexist->bt_info.ra_mask);
 
                /* Fw mechanism*/
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                          "============[Fw mechanism]============");
+                        "============[Fw mechanism]============");
 
                ps_tdma_case = coex_dm->cur_ps_tdma;
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n %-35s = %5ph case-%d (auto:%d)",
-                          "PS TDMA",
-                          coex_dm->ps_tdma_para,
-                          ps_tdma_case,
-                          coex_dm->auto_tdma_adjust);
+                        "\r\n %-35s = %5ph case-%d (auto:%d)",
+                        "PS TDMA",
+                        coex_dm->ps_tdma_para,
+                        ps_tdma_case,
+                        coex_dm->auto_tdma_adjust);
 
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n %-35s = 0x%x ",
-                          "Latest error condition(should be 0)",
+                        "\r\n %-35s = 0x%x ",
+                        "Latest error condition(should be 0)",
                           coex_dm->error_condition);
 
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n %-35s = %d ", "IgnWlanAct",
-                          coex_dm->cur_ignore_wlan_act);
+                        "\r\n %-35s = %d ", "IgnWlanAct",
+                        coex_dm->cur_ignore_wlan_act);
        }
 
        /* Hw setting*/
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s", "============[Hw setting]============");
+                "\r\n %-35s", "============[Hw setting]============");
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-                  "backup ARFR1/ARFR2/RL/AMaxTime",
-                  coex_dm->backup_arfr_cnt1,
-                  coex_dm->backup_arfr_cnt2,
-                  coex_dm->backup_retry_limit,
-                  coex_dm->backup_ampdu_max_time);
+                "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+                "backup ARFR1/ARFR2/RL/AMaxTime",
+                coex_dm->backup_arfr_cnt1,
+                coex_dm->backup_arfr_cnt2,
+                coex_dm->backup_retry_limit,
+                coex_dm->backup_ampdu_max_time);
 
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x430);
        u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x434);
        u2_tmp[0] = btcoexist->btc_read_2byte(btcoexist, 0x42a);
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x456);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
-                  "0x430/0x434/0x42a/0x456",
-                  u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]);
+                "\r\n %-35s = 0x%x/0x%x/0x%x/0x%x",
+                "0x430/0x434/0x42a/0x456",
+                u4_tmp[0], u4_tmp[1], u2_tmp[0], u1_tmp[0]);
 
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc58);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]",
-                  u1_tmp[0], (u4_tmp[0]&0x3e000000) >> 25);
+                "\r\n %-35s = 0x%x/ 0x%x", "0x778/ 0xc58[29:25]",
+                u1_tmp[0], (u4_tmp[0] & 0x3e000000) >> 25);
 
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = 0x%x", "0x8db[6:5]",
-                  ((u1_tmp[0]&0x60)>>5));
+                "\r\n %-35s = 0x%x", "0x8db[6:5]",
+                ((u1_tmp[0] & 0x60) >> 5));
 
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x975);
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                  "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]",
-                  (u4_tmp[0] & 0x30000000)>>28,
-                   u4_tmp[0] & 0xff,
-                   u1_tmp[0] & 0x3);
+                "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                "0xcb4[29:28]/0xcb4[7:0]/0x974[9:8]",
+                (u4_tmp[0] & 0x30000000) >> 28,
+                 u4_tmp[0] & 0xff,
+                 u1_tmp[0] & 0x3);
 
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
        u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x64);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                  "0x40/0x4c[24:23]/0x64[0]",
-                  u1_tmp[0], ((u4_tmp[0]&0x01800000)>>23), u1_tmp[1]&0x1);
+                "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
+                "0x40/0x4c[24:23]/0x64[0]",
+                u1_tmp[0], ((u4_tmp[0] & 0x01800000) >> 23), u1_tmp[1] & 0x1);
 
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522",
-                  u4_tmp[0], u1_tmp[0]);
+                "\r\n %-35s = 0x%x/ 0x%x", "0x550(bcn ctrl)/0x522",
+                u4_tmp[0], u1_tmp[0]);
 
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = 0x%x", "0xc50(dig)",
-                  u4_tmp[0]&0xff);
+                "\r\n %-35s = 0x%x", "0xc50(dig)",
+                u4_tmp[0] & 0xff);
 
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48);
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5d);
        u1_tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA",
-                  u4_tmp[0], (u1_tmp[0]<<8) + u1_tmp[1]);
+                "\r\n %-35s = 0x%x/ 0x%x", "OFDM-FA/ CCK-FA",
+                u4_tmp[0], (u1_tmp[0] << 8) + u1_tmp[1]);
 
        u4_tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
        u4_tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
        u4_tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
        u1_tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
-                  "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
+                "\r\n %-35s = 0x%x/ 0x%x/ 0x%x/ 0x%x",
+                "0x6c0/0x6c4/0x6c8/0x6cc(coexTable)",
                   u4_tmp[0], u4_tmp[1], u4_tmp[2], u1_tmp[0]);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)",
-                  coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+                "\r\n %-35s = %d/ %d", "0x770(high-pri rx/tx)",
+                coex_sta->high_priority_rx, coex_sta->high_priority_tx);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
-                  coex_sta->low_priority_rx, coex_sta->low_priority_tx);
+                "\r\n %-35s = %d/ %d", "0x774(low-pri rx/tx)",
+                coex_sta->low_priority_rx, coex_sta->low_priority_tx);
 #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 1)
        halbtc8821a1ant_monitor_bt_ctr(btcoexist);
 #endif
@@ -2497,12 +2534,14 @@ void ex_halbtc8821a1ant_display_coex_info(struct btc_coexist *btcoexist)
 
 void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (btcoexist->manual_control || btcoexist->stop_coex_dm)
                return;
 
        if (BTC_IPS_ENTER == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], IPS ENTER notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], IPS ENTER notify\n");
                coex_sta->under_ips = true;
                halbtc8821a1ant_set_ant_path(btcoexist,
                                             BTC_ANT_PATH_BT, false, true);
@@ -2511,8 +2550,8 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
                halbtc8821a1ant_coex_table_with_type(btcoexist,
                                                     NORMAL_EXEC, 0);
        } else if (BTC_IPS_LEAVE == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], IPS LEAVE notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], IPS LEAVE notify\n");
                coex_sta->under_ips = false;
 
                halbtc8821a1ant_run_coexist_mechanism(btcoexist);
@@ -2521,22 +2560,25 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 
 void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (btcoexist->manual_control || btcoexist->stop_coex_dm)
                return;
 
        if (BTC_LPS_ENABLE == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], LPS ENABLE notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], LPS ENABLE notify\n");
                coex_sta->under_Lps = true;
        } else if (BTC_LPS_DISABLE == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], LPS DISABLE notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], LPS DISABLE notify\n");
                coex_sta->under_Lps = false;
        }
 }
 
 void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool wifi_connected = false, bt_hs_on = false;
 
        if (btcoexist->manual_control ||
@@ -2560,8 +2602,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
        }
 
        if (BTC_SCAN_START == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], SCAN START notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], SCAN START notify\n");
                if (!wifi_connected) {
                        /* non-connected scan*/
                        btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
@@ -2570,8 +2612,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
                        halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
                }
        } else if (BTC_SCAN_FINISH == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], SCAN FINISH notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], SCAN FINISH notify\n");
                if (!wifi_connected) {
                        /* non-connected scan*/
                        halbtc8821a1ant_action_wifi_not_connected(btcoexist);
@@ -2583,6 +2625,7 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 
 void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool    wifi_connected = false, bt_hs_on = false;
 
        if (btcoexist->manual_control ||
@@ -2600,12 +2643,12 @@ void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
        }
 
        if (BTC_ASSOCIATE_START == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], CONNECT START notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CONNECT START notify\n");
                btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
        } else if (BTC_ASSOCIATE_FINISH == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], CONNECT FINISH notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CONNECT FINISH notify\n");
 
                btcoexist->btc_get(btcoexist,
                         BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
@@ -2621,6 +2664,7 @@ void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
                                            u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[3] = {0};
        u32 wifi_bw;
        u8 wifi_central_chnl;
@@ -2631,11 +2675,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
                return;
 
        if (BTC_MEDIA_CONNECT == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], MEDIA connect notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], MEDIA connect notify\n");
        } else {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], MEDIA disconnect notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], MEDIA disconnect notify\n");
        }
 
        /* only 2.4G we need to inform bt the chnl mask*/
@@ -2658,11 +2702,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
        coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
        coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], FW write 0x66 = 0x%x\n",
-                   h2c_parameter[0] << 16 |
-                   h2c_parameter[1] << 8 |
-                   h2c_parameter[2]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], FW write 0x66 = 0x%x\n",
+                h2c_parameter[0] << 16 |
+                h2c_parameter[1] << 8 |
+                h2c_parameter[2]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
@@ -2670,6 +2714,7 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
 void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
                                              u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool bt_hs_on = false;
 
        if (btcoexist->manual_control ||
@@ -2690,8 +2735,8 @@ void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
 
        if (BTC_PACKET_DHCP == type ||
            BTC_PACKET_EAPOL == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], special Packet(%d) notify\n", type);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], special Packet(%d) notify\n", type);
                btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
        }
 }
@@ -2699,6 +2744,7 @@ void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
 void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
                                       u8 *tmp_buf, u8 length)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 bt_info = 0;
        u8 i, rsp_source = 0;
        bool wifi_connected = false;
@@ -2715,19 +2761,19 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
                rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW;
        coex_sta->bt_info_c2h_cnt[rsp_source]++;
 
-       btc_iface_dbg(INTF_NOTIFY,
-                     "[BTCoex], Bt info[%d], length = %d, hex data = [",
-                     rsp_source, length);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Bt info[%d], length = %d, hex data = [",
+                rsp_source, length);
        for (i = 0; i < length; i++) {
                coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
                if (i == 1)
                        bt_info = tmp_buf[i];
                if (i == length-1) {
-                       btc_iface_dbg(INTF_NOTIFY,
-                                     "0x%02x]\n", tmp_buf[i]);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "0x%02x]\n", tmp_buf[i]);
                } else {
-                       btc_iface_dbg(INTF_NOTIFY,
-                                     "0x%02x, ", tmp_buf[i]);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "0x%02x, ", tmp_buf[i]);
                }
        }
 
@@ -2744,8 +2790,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
                /* Here we need to resend some wifi info to BT*/
                /* because bt is reset and loss of the info.*/
                if (coex_sta->bt_info_ext & BIT1) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
                        btcoexist->btc_get(btcoexist,
                                           BTC_GET_BL_WIFI_CONNECTED,
                                           &wifi_connected);
@@ -2761,8 +2807,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
                if ((coex_sta->bt_info_ext & BIT3) && !wifi_under_5g) {
                        if (!btcoexist->manual_control &&
                            !btcoexist->stop_coex_dm) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
                                halbtc8821a1ant_ignore_wlan_act(btcoexist,
                                                                FORCE_EXEC,
                                                                false);
@@ -2770,8 +2816,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
                }
 #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
                if (!(coex_sta->bt_info_ext & BIT4)) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n");
                        halbtc8821a1ant_bt_auto_report(btcoexist,
                                                       FORCE_EXEC, true);
                }
@@ -2816,28 +2862,28 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
 
        if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) {
                coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
        } else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) {
                /* connection exists but no busy*/
                coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
        } else if ((bt_info&BT_INFO_8821A_1ANT_B_SCO_ESCO) ||
                (bt_info&BT_INFO_8821A_1ANT_B_SCO_BUSY)) {
                coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_SCO_BUSY;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
        } else if (bt_info&BT_INFO_8821A_1ANT_B_ACL_BUSY) {
                if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
                        coex_dm->auto_tdma_adjust = false;
                coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_ACL_BUSY;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
        } else {
                coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_MAX;
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
        }
 
        if ((BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -2854,8 +2900,10 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
 
 void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
 {
-       btc_iface_dbg(INTF_NOTIFY,
-                     "[BTCoex], Halt notify\n");
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Halt notify\n");
 
        btcoexist->stop_coex_dm = true;
 
@@ -2873,20 +2921,22 @@ void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
 
 void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
 {
-       btc_iface_dbg(INTF_NOTIFY,
-                     "[BTCoex], Pnp notify\n");
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Pnp notify\n");
 
        if (BTC_WIFI_PNP_SLEEP == pnp_state) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], Pnp notify to SLEEP\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Pnp notify to SLEEP\n");
                btcoexist->stop_coex_dm = true;
                halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
                halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
                                                 0x0, 0x0);
                halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
        } else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], Pnp notify to WAKE UP\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Pnp notify to WAKE UP\n");
                btcoexist->stop_coex_dm = false;
                halbtc8821a1ant_init_hw_config(btcoexist, false);
                halbtc8821a1ant_init_coex_dm(btcoexist);
@@ -2894,41 +2944,41 @@ void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
        }
 }
 
-void
-ex_halbtc8821a1ant_periodical(
-       struct btc_coexist *btcoexist) {
+void ex_halbtc8821a1ant_periodical(struct btc_coexist *btcoexist)
+{
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        static u8       dis_ver_info_cnt;
        u32             fw_ver = 0, bt_patch_ver = 0;
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
 
-       btc_alg_dbg(ALGO_TRACE,
-                   "[BTCoex], ==========================Periodical===========================\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], ==========================Periodical===========================\n");
 
        if (dis_ver_info_cnt <= 5) {
                dis_ver_info_cnt += 1;
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], ****************************************************************\n");
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], ****************************************************************\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
                              board_info->pg_ant_num,
                              board_info->btdm_ant_num,
                              board_info->btdm_ant_pos);
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
                              stack_info->profile_notified ? "Yes" : "No",
                              stack_info->hci_version);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
                                   &bt_patch_ver);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
                              glcoex_ver_date_8821a_1ant,
                              glcoex_ver_8821a_1ant,
                              fw_ver, bt_patch_ver,
                              bt_patch_ver);
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], ****************************************************************\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], ****************************************************************\n");
        }
 
 #if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
index 81f843bba77160db94b5467920e3100db299b9c4..1717e9ce96caa311e7b6002ebac5ba49286b0aa0 100644 (file)
@@ -65,9 +65,11 @@ static u32   glcoex_ver_8821a_2ant = 0x5050;
  * local function start with halbtc8821a2ant_
  *============================================================
  */
-static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
+static u8 halbtc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
+                                       u8 level_num, u8 rssi_thresh,
                                        u8 rssi_thresh1)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        long    bt_rssi = 0;
        u8      bt_rssi_state = coex_sta->pre_bt_rssi_state;
 
@@ -80,28 +82,28 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                                   BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT;
                        if (bt_rssi >= tmp) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to High\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                   "[BTCoex], BT Rssi thresh error!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], BT Rssi thresh error!!\n");
                        return coex_sta->pre_bt_rssi_state;
                }
 
@@ -110,12 +112,12 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                        if (bt_rssi >=
                            (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at Low\n");
                        }
                } else if ((coex_sta->pre_bt_rssi_state ==
                           BTC_RSSI_STATE_MEDIUM) ||
@@ -125,26 +127,26 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
                            (rssi_thresh1 +
                             BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
                                bt_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to High\n");
                        } else if (bt_rssi < rssi_thresh) {
                                bt_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Low\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at Medium\n");
                        }
                } else {
                        if (bt_rssi < rssi_thresh1) {
                                bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state switch to Medium\n");
                        } else {
                                bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_BT_RSSI_STATE,
-                                           "[BTCoex], BT Rssi state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], BT Rssi state stay at High\n");
                        }
                }
        }
@@ -158,6 +160,7 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                                          u8 index, u8 level_num,
                                          u8 rssi_thresh, u8 rssi_thresh1)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        long    wifi_rssi = 0;
        u8      wifi_rssi_state = coex_sta->pre_wifi_rssi_state[index];
 
@@ -171,28 +174,28 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >=
                            (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to High\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        } else if (level_num == 3) {
                if (rssi_thresh > rssi_thresh1) {
-                       btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                   "[BTCoex], wifi RSSI thresh error!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], wifi RSSI thresh error!!\n");
                        return coex_sta->pre_wifi_rssi_state[index];
                }
 
@@ -203,12 +206,12 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >=
                            (rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at Low\n");
                        }
                } else if ((coex_sta->pre_wifi_rssi_state[index] ==
                           BTC_RSSI_STATE_MEDIUM) ||
@@ -217,26 +220,26 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
                        if (wifi_rssi >= (rssi_thresh1 +
                            BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
                                wifi_rssi_state = BTC_RSSI_STATE_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to High\n");
                        } else if (wifi_rssi < rssi_thresh) {
                                wifi_rssi_state = BTC_RSSI_STATE_LOW;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Low\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Low\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at Medium\n");
                        }
                } else {
                        if (wifi_rssi < rssi_thresh1) {
                                wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state switch to Medium\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state switch to Medium\n");
                        } else {
                                wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
-                               btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
-                                           "[BTCoex], wifi RSSI state stay at High\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], wifi RSSI state stay at High\n");
                        }
                }
        }
@@ -247,6 +250,7 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
 
 static void btc8821a2ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        static bool     pre_bt_disabled;
        static u32      bt_disable_cnt;
        bool            bt_active = true, bt_disabled = false;
@@ -268,32 +272,33 @@ static void btc8821a2ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
                bt_disabled = false;
                btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
                                   &bt_disabled);
-               btc_alg_dbg(ALGO_BT_MONITOR,
-                           "[BTCoex], BT is enabled !!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BT is enabled !!\n");
        } else {
                bt_disable_cnt++;
-               btc_alg_dbg(ALGO_BT_MONITOR,
-                           "[BTCoex], bt all counters = 0, %d times!!\n",
-                           bt_disable_cnt);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], bt all counters = 0, %d times!!\n",
+                        bt_disable_cnt);
                if (bt_disable_cnt >= 2) {
                        bt_disabled = true;
                        btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
                                           &bt_disabled);
-                       btc_alg_dbg(ALGO_BT_MONITOR,
-                                   "[BTCoex], BT is disabled !!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], BT is disabled !!\n");
                }
        }
        if (pre_bt_disabled != bt_disabled) {
-               btc_alg_dbg(ALGO_BT_MONITOR,
-                           "[BTCoex], BT is from %s to %s!!\n",
-                           (pre_bt_disabled ? "disabled" : "enabled"),
-                           (bt_disabled ? "disabled" : "enabled"));
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BT is from %s to %s!!\n",
+                        (pre_bt_disabled ? "disabled" : "enabled"),
+                        (bt_disabled ? "disabled" : "enabled"));
                pre_bt_disabled = bt_disabled;
        }
 }
 
 static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u32     reg_hp_txrx, reg_lp_txrx, u4tmp;
        u32     reg_hp_tx = 0, reg_hp_rx = 0, reg_lp_tx = 0, reg_lp_rx = 0;
 
@@ -313,12 +318,12 @@ static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
        coex_sta->low_priority_tx = reg_lp_tx;
        coex_sta->low_priority_rx = reg_lp_rx;
 
-       btc_alg_dbg(ALGO_BT_MONITOR,
-                   "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
                    reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
-       btc_alg_dbg(ALGO_BT_MONITOR,
-                   "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
-                   reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+                reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
 
        /* reset counter */
        btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -326,21 +331,23 @@ static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
 
 static void halbtc8821a2ant_query_bt_info(struct btc_coexist *btcoexist)
 {
-       u8      h2c_parameter[1] = {0};
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       u8 h2c_parameter[1] = {0};
 
        coex_sta->c2h_bt_info_req_sent = true;
 
        h2c_parameter[0] |= BIT0;       /* trigger */
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
-                   h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+                h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
 }
 
 static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
        bool bt_hs_on = false;
        u8 algorithm = BT_8821A_2ANT_COEX_ALGO_UNDEFINED;
@@ -357,8 +364,8 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
                stack_info->bt_link_exist = coex_sta->bt_link_exist;
 
        if (!coex_sta->bt_link_exist) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], No profile exists!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], No profile exists!!!\n");
                return algorithm;
        }
 
@@ -373,26 +380,28 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
 
        if (num_of_diff_profile == 1) {
                if (coex_sta->sco_exist) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], SCO only\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], SCO only\n");
                        algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
                } else {
                        if (coex_sta->hid_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], HID only\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], HID only\n");
                                algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
                        } else if (coex_sta->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], A2DP only\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], A2DP only\n");
                                algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP;
                        } else if (coex_sta->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], PAN(HS) only\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], PAN(HS) only\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANHS;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], PAN(EDR) only\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], PAN(EDR) only\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR;
                                }
                        }
@@ -400,50 +409,56 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
        } else if (num_of_diff_profile == 2) {
                if (coex_sta->sco_exist) {
                        if (coex_sta->hid_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], SCO + HID\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], SCO + HID\n");
                                algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (coex_sta->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], SCO + A2DP ==> SCO\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], SCO + A2DP ==> SCO\n");
                                algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (coex_sta->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], SCO + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], SCO + PAN(HS)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], SCO + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], SCO + PAN(EDR)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        }
                } else {
                        if (coex_sta->hid_exist &&
                            coex_sta->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], HID + A2DP\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], HID + A2DP\n");
                                algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
                        } else if (coex_sta->hid_exist &&
                                coex_sta->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], HID + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], HID + PAN(HS)\n");
                                        algorithm =  BT_8821A_2ANT_COEX_ALGO_HID;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], HID + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], HID + PAN(EDR)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (coex_sta->pan_exist &&
                                coex_sta->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], A2DP + PAN(HS)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], A2DP + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], A2DP + PAN(EDR)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP;
                                }
                        }
@@ -452,29 +467,33 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
                if (coex_sta->sco_exist) {
                        if (coex_sta->hid_exist &&
                            coex_sta->a2dp_exist) {
-                               btc_alg_dbg(ALGO_TRACE,
-                                           "[BTCoex], SCO + HID + A2DP ==> HID\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], SCO + HID + A2DP ==> HID\n");
                                algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                        } else if (coex_sta->hid_exist &&
                                coex_sta->pan_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], SCO + HID + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], SCO + HID + PAN(HS)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], SCO + HID + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], SCO + HID + PAN(EDR)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        } else if (coex_sta->pan_exist &&
                                   coex_sta->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], SCO + A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], SCO + A2DP + PAN(HS)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        }
@@ -483,12 +502,14 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
                            coex_sta->pan_exist &&
                            coex_sta->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], HID + A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], HID + A2DP + PAN(HS)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], HID + A2DP + PAN(EDR)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], HID + A2DP + PAN(EDR)\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
                                }
                        }
@@ -499,12 +520,14 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
                            coex_sta->pan_exist &&
                            coex_sta->a2dp_exist) {
                                if (bt_hs_on) {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
 
                                } else {
-                                       btc_alg_dbg(ALGO_TRACE,
-                                                   "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+                                       RT_TRACE(rtlpriv, COMP_BT_COEXIST,
+                                                DBG_LOUD,
+                                                "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
                                        algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
                                }
                        }
@@ -515,6 +538,7 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
 
 static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool ret = false;
        bool bt_hs_on = false, wifi_connected = false;
        long bt_hs_rssi = 0;
@@ -528,20 +552,20 @@ static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
        if (!btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi))
                return false;
 
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
 
        if (wifi_connected) {
                if (bt_hs_on) {
                        if (bt_hs_rssi > 37) {
-                               btc_alg_dbg(ALGO_TRACE_FW,
-                                           "[BTCoex], Need to decrease bt power for HS mode!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Need to decrease bt power for HS mode!!\n");
                                ret = true;
                        }
                } else {
                        if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
                            (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                               btc_alg_dbg(ALGO_TRACE_FW,
-                                           "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
                                ret = true;
                        }
                }
@@ -552,17 +576,18 @@ static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
 static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist,
                                              u8 dac_swing_lvl)
 {
-       u8      h2c_parameter[1] = {0};
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       u8 h2c_parameter[1] = {0};
 
        /* There are several type of dacswing
         * 0x18/ 0x10/ 0xc/ 0x8/ 0x4/ 0x6
         */
        h2c_parameter[0] = dac_swing_lvl;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
 }
@@ -570,16 +595,17 @@ static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
                                              bool dec_bt_pwr)
 {
-       u8                      h2c_parameter[1] = {0};
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+       u8 h2c_parameter[1] = {0};
 
        h2c_parameter[0] = 0;
 
        if (dec_bt_pwr)
                h2c_parameter[0] |= BIT1;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n",
-                   (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n",
+                (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
 }
@@ -587,15 +613,17 @@ static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
                                       bool force_exec, bool dec_bt_pwr)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s Dec BT power = %s\n",
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s Dec BT power = %s\n",
                    (force_exec ? "force to" : ""),
                    ((dec_bt_pwr) ? "ON" : "OFF"));
        coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n",
                            coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
 
                if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
@@ -609,6 +637,7 @@ static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
 static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
                                              bool bt_lna_cons_on)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[2] = {0};
 
        h2c_parameter[0] = 0x3; /* opCode, 0x3 = BT_SET_LNA_CONSTRAIN */
@@ -616,10 +645,10 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
        if (bt_lna_cons_on)
                h2c_parameter[1] |= BIT0;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n",
-                   bt_lna_cons_on ? "ON!!" : "OFF!!",
-                   h2c_parameter[0] << 8 | h2c_parameter[1]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n",
+                bt_lna_cons_on ? "ON!!" : "OFF!!",
+                h2c_parameter[0] << 8 | h2c_parameter[1]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
 }
@@ -627,15 +656,17 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
 static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist,
                                       bool force_exec, bool bt_lna_cons_on)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s BT Constrain = %s\n",
-                   (force_exec ? "force" : ""),
-                   ((bt_lna_cons_on) ? "ON" : "OFF"));
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s BT Constrain = %s\n",
+                (force_exec ? "force" : ""),
+                ((bt_lna_cons_on) ? "ON" : "OFF"));
        coex_dm->cur_bt_lna_constrain = bt_lna_cons_on;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n",
                            coex_dm->pre_bt_lna_constrain,
                            coex_dm->cur_bt_lna_constrain);
 
@@ -652,16 +683,17 @@ static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist,
                                               u8 bt_psd_mode)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[2] = {0};
 
        h2c_parameter[0] = 0x2; /* opCode, 0x2 = BT_SET_PSD_MODE */
 
        h2c_parameter[1] = bt_psd_mode;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n",
-                   h2c_parameter[1],
-                   h2c_parameter[0] << 8 | h2c_parameter[1]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n",
+                h2c_parameter[1],
+                h2c_parameter[0] << 8 | h2c_parameter[1]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
 }
@@ -669,15 +701,17 @@ static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist,
                                            bool force_exec, u8 bt_psd_mode)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s BT PSD mode = 0x%x\n",
-                   (force_exec ? "force" : ""), bt_psd_mode);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s BT PSD mode = 0x%x\n",
+                (force_exec ? "force" : ""), bt_psd_mode);
        coex_dm->cur_bt_psd_mode = bt_psd_mode;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n",
-                           coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n",
+                        coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode);
 
                if (coex_dm->pre_bt_psd_mode == coex_dm->cur_bt_psd_mode)
                        return;
@@ -691,6 +725,7 @@ static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
                                               bool enable_auto_report)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[1] = {0};
 
        h2c_parameter[0] = 0;
@@ -698,10 +733,10 @@ static void halbtc8821a2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
        if (enable_auto_report)
                h2c_parameter[0] |= BIT0;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
-                   (enable_auto_report ? "Enabled!!" : "Disabled!!"),
-                   h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+                (enable_auto_report ? "Enabled!!" : "Disabled!!"),
+                h2c_parameter[0]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
 }
@@ -710,15 +745,17 @@ static void halbtc8821a2ant_bt_auto_report(struct btc_coexist *btcoexist,
                                           bool force_exec,
                                           bool enable_auto_report)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s BT Auto report = %s\n",
-                   (force_exec ? "force to" : ""),
-                   ((enable_auto_report) ? "Enabled" : "Disabled"));
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s BT Auto report = %s\n",
+                (force_exec ? "force to" : ""),
+                ((enable_auto_report) ? "Enabled" : "Disabled"));
        coex_dm->cur_bt_auto_report = enable_auto_report;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
                            coex_dm->pre_bt_auto_report,
                            coex_dm->cur_bt_auto_report);
 
@@ -735,16 +772,18 @@ static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
                                             bool force_exec,
                                             u8 fw_dac_swing_lvl)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s set FW Dac Swing level = %d\n",
-                   (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s set FW Dac Swing level = %d\n",
+                (force_exec ? "force to" : ""), fw_dac_swing_lvl);
        coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
-                           coex_dm->pre_fw_dac_swing_lvl,
-                           coex_dm->cur_fw_dac_swing_lvl);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
+                        coex_dm->pre_fw_dac_swing_lvl,
+                        coex_dm->cur_fw_dac_swing_lvl);
 
                if (coex_dm->pre_fw_dac_swing_lvl ==
                    coex_dm->cur_fw_dac_swing_lvl)
@@ -760,10 +799,12 @@ static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
 static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
                                                 bool rx_rf_shrink_on)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (rx_rf_shrink_on) {
                /* Shrink RF Rx LPF corner */
-               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                           "[BTCoex], Shrink RF Rx LPF corner!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Shrink RF Rx LPF corner!!\n");
                btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
                                          0xfffff, 0xffffc);
        } else {
@@ -771,8 +812,8 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
                 * After initialized, we can use coex_dm->bt_rf0x1e_backup
                 */
                if (btcoexist->initilized) {
-                       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                                   "[BTCoex], Resume RF Rx LPF corner!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Resume RF Rx LPF corner!!\n");
                        btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
                                                  0x1e, 0xfffff,
                                                   coex_dm->bt_rf0x1e_backup);
@@ -783,17 +824,19 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist,
                                     bool force_exec, bool rx_rf_shrink_on)
 {
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s turn Rx RF Shrink = %s\n",
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn Rx RF Shrink = %s\n",
                    (force_exec ? "force to" : ""),
                    ((rx_rf_shrink_on) ? "ON" : "OFF"));
        coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n",
-                           coex_dm->pre_rf_rx_lpf_shrink,
-                           coex_dm->cur_rf_rx_lpf_shrink);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n",
+                        coex_dm->pre_rf_rx_lpf_shrink,
+                        coex_dm->cur_rf_rx_lpf_shrink);
 
                if (coex_dm->pre_rf_rx_lpf_shrink ==
                    coex_dm->cur_rf_rx_lpf_shrink)
@@ -808,6 +851,7 @@ static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist,
 static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
                                             bool low_penalty_ra)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[6] = {0};
 
        h2c_parameter[0] = 0x6; /* opCode, 0x6 = Retry_Penalty */
@@ -824,9 +868,9 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
                h2c_parameter[5] = 0xf9;
        }
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], set WiFi Low-Penalty Retry: %s",
-                   (low_penalty_ra ? "ON!!" : "OFF!!"));
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set WiFi Low-Penalty Retry: %s",
+                (low_penalty_ra ? "ON!!" : "OFF!!"));
 
        btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
 }
@@ -834,17 +878,19 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
                                           bool force_exec, bool low_penalty_ra)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        /*return;*/
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s turn LowPenaltyRA = %s\n",
-                   (force_exec ? "force to" : ""),
-                   ((low_penalty_ra) ? "ON" : "OFF"));
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn LowPenaltyRA = %s\n",
+                (force_exec ? "force to" : ""),
+                ((low_penalty_ra) ? "ON" : "OFF"));
        coex_dm->cur_low_penalty_ra = low_penalty_ra;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
-                           coex_dm->pre_low_penalty_ra,
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
+                        coex_dm->pre_low_penalty_ra,
                            coex_dm->cur_low_penalty_ra);
 
                if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
@@ -859,10 +905,11 @@ static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
                                              u32 level)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 val = (u8)level;
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Write SwDacSwing = 0x%x\n", level);
        btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val);
 }
 
@@ -880,21 +927,23 @@ static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
                                      bool force_exec, bool dac_swing_on,
                                      u32 dac_swing_lvl)
 {
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
-                   (force_exec ? "force to" : ""),
-                   ((dac_swing_on) ? "ON" : "OFF"),
-                   dac_swing_lvl);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
+                (force_exec ? "force to" : ""),
+                ((dac_swing_on) ? "ON" : "OFF"),
+                dac_swing_lvl);
        coex_dm->cur_dac_swing_on = dac_swing_on;
        coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
-                           coex_dm->pre_dac_swing_on,
-                           coex_dm->pre_dac_swing_lvl,
-                           coex_dm->cur_dac_swing_on,
-                           coex_dm->cur_dac_swing_lvl);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
+                        coex_dm->pre_dac_swing_on,
+                        coex_dm->pre_dac_swing_lvl,
+                        coex_dm->cur_dac_swing_on,
+                        coex_dm->cur_dac_swing_lvl);
 
                if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
                    (coex_dm->pre_dac_swing_lvl ==
@@ -912,13 +961,15 @@ static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist,
                                             bool adc_back_off)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (adc_back_off) {
-               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                           "[BTCoex], BB BackOff Level On!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BB BackOff Level On!\n");
                btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3);
        } else {
-               btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                           "[BTCoex], BB BackOff Level Off!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BB BackOff Level Off!\n");
                btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1);
        }
 }
@@ -926,17 +977,19 @@ static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_adc_back_off(struct btc_coexist *btcoexist,
                                         bool force_exec, bool adc_back_off)
 {
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s turn AdcBackOff = %s\n",
-                   (force_exec ? "force to" : ""),
-                   ((adc_back_off) ? "ON" : "OFF"));
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn AdcBackOff = %s\n",
+                (force_exec ? "force to" : ""),
+                ((adc_back_off) ? "ON" : "OFF"));
        coex_dm->cur_adc_back_off = adc_back_off;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n",
-                           coex_dm->pre_adc_back_off,
-                           coex_dm->cur_adc_back_off);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n",
+                        coex_dm->pre_adc_back_off,
+                        coex_dm->cur_adc_back_off);
 
                if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
                        return;
@@ -950,20 +1003,22 @@ static void halbtc8821a2ant_set_coex_table(struct btc_coexist *btcoexist,
                                           u32 val0x6c0, u32 val0x6c4,
                                           u32 val0x6c8, u8 val0x6cc)
 {
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
        btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
        btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
        btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
 
-       btc_alg_dbg(ALGO_TRACE_SW_EXEC,
-                   "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
        btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
 }
 
@@ -971,28 +1026,30 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist,
                                       bool force_exec, u32 val0x6c0,
                                       u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
 {
-       btc_alg_dbg(ALGO_TRACE_SW,
-                   "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
-                   (force_exec ? "force to" : ""),
-                   val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+                (force_exec ? "force to" : ""),
+                val0x6c0, val0x6c4, val0x6c8, val0x6cc);
        coex_dm->cur_val0x6c0 = val0x6c0;
        coex_dm->cur_val0x6c4 = val0x6c4;
        coex_dm->cur_val0x6c8 = val0x6c8;
        coex_dm->cur_val0x6cc = val0x6cc;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
-                           coex_dm->pre_val0x6c0,
-                           coex_dm->pre_val0x6c4,
-                           coex_dm->pre_val0x6c8,
-                           coex_dm->pre_val0x6cc);
-               btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
-                           "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
-                           coex_dm->cur_val0x6c0,
-                           coex_dm->cur_val0x6c4,
-                           coex_dm->cur_val0x6c8,
-                           coex_dm->cur_val0x6cc);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
+                        coex_dm->pre_val0x6c0,
+                        coex_dm->pre_val0x6c4,
+                        coex_dm->pre_val0x6c8,
+                        coex_dm->pre_val0x6cc);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
+                        coex_dm->cur_val0x6c0,
+                        coex_dm->cur_val0x6c4,
+                        coex_dm->cur_val0x6c8,
+                        coex_dm->cur_val0x6cc);
 
                if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
                    (coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1012,14 +1069,15 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
                                                   bool enable)
 {
+       struct rtl_priv *rtlpriv = btcoex->adapter;
        u8 h2c_parameter[1] = {0};
 
        if (enable)
                h2c_parameter[0] |= BIT0;/* function enable */
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
-                   h2c_parameter[0]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+                h2c_parameter[0]);
 
        btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter);
 }
@@ -1027,16 +1085,18 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
 static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
                                            bool force_exec, bool enable)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s turn Ignore WlanAct %s\n",
-                   (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn Ignore WlanAct %s\n",
+                (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
        coex_dm->cur_ignore_wlan_act = enable;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
-                           coex_dm->pre_ignore_wlan_act,
-                           coex_dm->cur_ignore_wlan_act);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+                        coex_dm->pre_ignore_wlan_act,
+                        coex_dm->cur_ignore_wlan_act);
 
                if (coex_dm->pre_ignore_wlan_act ==
                    coex_dm->cur_ignore_wlan_act)
@@ -1051,6 +1111,7 @@ static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist,
                                          u8 byte1, u8 byte2, u8 byte3,
                                          u8 byte4, u8 byte5)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 h2c_parameter[5];
 
        h2c_parameter[0] = byte1;
@@ -1065,13 +1126,13 @@ static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist,
        coex_dm->ps_tdma_para[3] = byte4;
        coex_dm->ps_tdma_para[4] = byte5;
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
-                   h2c_parameter[0],
-                   h2c_parameter[1] << 24 |
-                   h2c_parameter[2] << 16 |
-                   h2c_parameter[3] << 8 |
-                   h2c_parameter[4]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+                h2c_parameter[0],
+                h2c_parameter[1] << 24 |
+                h2c_parameter[2] << 16 |
+                h2c_parameter[3] << 8 |
+                h2c_parameter[4]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
 }
@@ -1165,20 +1226,22 @@ static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist,
 static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
                                    bool force_exec, bool turn_on, u8 type)
 {
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], %s turn %s PS TDMA, type = %d\n",
-                   (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
-                   type);
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], %s turn %s PS TDMA, type = %d\n",
+                (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
+                type);
        coex_dm->cur_ps_tdma_on = turn_on;
        coex_dm->cur_ps_tdma = type;
 
        if (!force_exec) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
-                           coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
-                           coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
+                        coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
+                        coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
 
                if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
                    (coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1348,6 +1411,7 @@ static void halbtc8821a2ant_bt_inquiry_page(struct btc_coexist *btcoexist)
 
 static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool common = false, wifi_connected = false, wifi_busy = false;
        bool low_pwr_disable = false;
 
@@ -1364,8 +1428,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
                                   &low_pwr_disable);
 
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Wifi IPS + BT IPS!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi IPS + BT IPS!!\n");
 
                halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
                halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1382,13 +1446,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
                                   &low_pwr_disable);
 
                if (wifi_busy) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Wifi Busy + BT IPS!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Wifi Busy + BT IPS!!\n");
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                false, 1);
                } else {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Wifi LPS + BT IPS!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Wifi LPS + BT IPS!!\n");
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                false, 1);
                }
@@ -1406,8 +1470,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
                btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
                                   &low_pwr_disable);
 
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Wifi IPS + BT LPS!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi IPS + BT LPS!!\n");
 
                halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
                halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1423,13 +1487,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
                        BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
 
                if (wifi_busy) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Wifi Busy + BT LPS!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Wifi Busy + BT LPS!!\n");
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                false, 1);
                } else {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Wifi LPS + BT LPS!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Wifi LPS + BT LPS!!\n");
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                false, 1);
                }
@@ -1448,8 +1512,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
                btcoexist->btc_set(btcoexist,
                        BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
 
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Wifi IPS + BT Busy!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Wifi IPS + BT Busy!!\n");
 
                halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
                halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1468,12 +1532,12 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
                                   &low_pwr_disable);
 
                if (wifi_busy) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Wifi Busy + BT Busy!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Wifi Busy + BT Busy!!\n");
                        common = false;
                } else {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Wifi LPS + BT Busy!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Wifi LPS + BT Busy!!\n");
                        halbtc8821a2ant_ps_tdma(btcoexist,
                                                NORMAL_EXEC, true, 21);
 
@@ -1494,9 +1558,11 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
 static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
                           int result)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (tx_pause) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 1\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 1\n");
 
                if (coex_dm->cur_ps_tdma == 71) {
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1591,8 +1657,8 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 0\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 71);
@@ -1695,9 +1761,11 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
 static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
                           int result)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (tx_pause) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 1\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 1\n");
                if (coex_dm->cur_ps_tdma == 1) {
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 6);
@@ -1786,8 +1854,8 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 0\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 2);
@@ -1881,9 +1949,11 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
 static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause,
                           int result)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (tx_pause) {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 1\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 1\n");
                if (coex_dm->cur_ps_tdma == 1) {
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 7);
@@ -1972,8 +2042,8 @@ static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause,
                        }
                }
        } else {
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], TxPause = 0\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], TxPause = 0\n");
                if (coex_dm->cur_ps_tdma == 5) {
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
                                                true, 3);
@@ -2068,6 +2138,7 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                      bool sco_hid, bool tx_pause,
                                      u8 max_interval)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        static long     up, dn, m, n, wait_count;
         /* 0: no change, +1: increase WiFi duration,
          * -1: decrease WiFi duration
@@ -2075,13 +2146,13 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
        int             result;
        u8              retry_count = 0;
 
-       btc_alg_dbg(ALGO_TRACE_FW,
-                   "[BTCoex], TdmaDurationAdjust()\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], TdmaDurationAdjust()\n");
 
        if (coex_dm->reset_tdma_adjust) {
                coex_dm->reset_tdma_adjust = false;
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], first run TdmaDurationAdjust()!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], first run TdmaDurationAdjust()!!\n");
                if (sco_hid) {
                        if (tx_pause) {
                                if (max_interval == 1) {
@@ -2094,11 +2165,6 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                                                NORMAL_EXEC,
                                                                true, 14);
                                        coex_dm->tdma_adj_type = 14;
-                               } else if (max_interval == 3) {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 15);
-                                       coex_dm->tdma_adj_type = 15;
                                } else {
                                        halbtc8821a2ant_ps_tdma(btcoexist,
                                                                NORMAL_EXEC,
@@ -2116,11 +2182,6 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                                                NORMAL_EXEC,
                                                                true, 10);
                                        coex_dm->tdma_adj_type = 10;
-                               } else if (max_interval == 3) {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 11);
-                                       coex_dm->tdma_adj_type = 11;
                                } else {
                                        halbtc8821a2ant_ps_tdma(btcoexist,
                                                                NORMAL_EXEC,
@@ -2140,11 +2201,6 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                                                NORMAL_EXEC,
                                                                true, 6);
                                        coex_dm->tdma_adj_type = 6;
-                               } else if (max_interval == 3) {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 7);
-                                       coex_dm->tdma_adj_type = 7;
                                } else {
                                        halbtc8821a2ant_ps_tdma(btcoexist,
                                                                NORMAL_EXEC,
@@ -2162,11 +2218,6 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                                                NORMAL_EXEC,
                                                                true, 2);
                                        coex_dm->tdma_adj_type = 2;
-                               } else if (max_interval == 3) {
-                                       halbtc8821a2ant_ps_tdma(btcoexist,
-                                                               NORMAL_EXEC,
-                                                               true, 3);
-                                       coex_dm->tdma_adj_type = 3;
                                } else {
                                        halbtc8821a2ant_ps_tdma(btcoexist,
                                                                NORMAL_EXEC,
@@ -2185,10 +2236,10 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
        } else {
                /* accquire the BT TRx retry count from BT_Info byte2 */
                retry_count = coex_sta->bt_retry_cnt;
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], retry_count = %d\n", retry_count);
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], retry_count = %d\n", retry_count);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
                            (int)up, (int)dn, (int)m, (int)n, (int)wait_count);
                result = 0;
                wait_count++;
@@ -2210,8 +2261,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                up = 0;
                                dn = 0;
                                result = 1;
-                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                           "[BTCoex], Increase wifi duration!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Increase wifi duration!!\n");
                        }
                } else if (retry_count <= 3) {
                        /* <=3 retry in the last 2-second duration */
@@ -2240,8 +2291,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                                dn = 0;
                                wait_count = 0;
                                result = -1;
-                               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                           "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+                               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                        "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
                        }
                } else {
                        /* retry count > 3, if retry count > 3 happens once,
@@ -2262,12 +2313,12 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                        dn = 0;
                        wait_count = 0;
                        result = -1;
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
                }
 
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], max Interval = %d\n", max_interval);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], max Interval = %d\n", max_interval);
                if (max_interval == 1)
                        btc8821a2_int1(btcoexist, tx_pause, result);
                else if (max_interval == 2)
@@ -2283,8 +2334,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
        if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
                bool    scan = false, link = false, roam = false;
 
-               btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                           "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
                            coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
 
                btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
@@ -2295,8 +2346,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
                        halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
                                                coex_dm->tdma_adj_type);
                } else {
-                       btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
-                                   "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
                }
        }
 
@@ -2311,7 +2362,7 @@ static void halbtc8821a2ant_action_sco(struct btc_coexist *btcoexist)
 
        wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
                                                          15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
 
        halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 4);
 
@@ -2337,14 +2388,8 @@ static void halbtc8821a2ant_action_sco(struct btc_coexist *btcoexist)
                 * halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 5);
                 */
 
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               false, 0); /*for voice quality*/
-               } else {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               false, 0); /*for voice quality*/
-               }
+               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
+                                       false, 0); /*for voice quality*/
 
                /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
@@ -2395,7 +2440,7 @@ static void halbtc8821a2ant_action_hid(struct btc_coexist *btcoexist)
 
        wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist,
                                                          0, 2, 15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
 
        halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
@@ -2475,7 +2520,7 @@ static void halbtc8821a2ant_action_a2dp(struct btc_coexist *btcoexist)
 
        wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
                                                          15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
 
        /* fw dac swing is called in btc8821a2ant_tdma_dur_adj()
         * halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -2543,7 +2588,7 @@ static void halbtc8821a2ant_action_a2dp_pan_hs(struct btc_coexist *btcoexist)
        bt_info_ext = coex_sta->bt_info_ext;
        wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
                                                          15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
 
        /*fw dac swing is called in btc8821a2ant_tdma_dur_adj()
         *halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -2612,7 +2657,7 @@ static void halbtc8821a2ant_action_pan_edr(struct btc_coexist *btcoexist)
 
        wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
                                                          15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
 
        halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
@@ -2692,7 +2737,7 @@ static void halbtc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist)
 
        wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist,
                                                          0, 2, 15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
 
        halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
@@ -2734,14 +2779,7 @@ static void halbtc8821a2ant_action_pan_hs(struct btc_coexist *btcoexist)
                                                   NORMAL_EXEC, false);
                }
 
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               false, 1);
-               } else {
-                       halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
-                                               false, 1);
-               }
+               halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
 
                /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
@@ -2768,7 +2806,7 @@ static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
        bt_info_ext = coex_sta->bt_info_ext;
        wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
                                                          15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
 
        halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
@@ -2779,40 +2817,18 @@ static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       if (BTC_WIFI_BW_LEGACY == wifi_bw) {
-               /* for HID at 11b/g mode */
-               halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                          0x5afa5afa, 0xffff, 0x3);
-       } else {
-               /* for HID quality & wifi performance balance at 11n mode */
-               halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                          0x5afa5afa, 0xffff, 0x3);
-       }
+       halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+                                  0x5afa5afa, 0xffff, 0x3);
 
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                /* fw mechanism */
                if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       if (bt_info_ext&BIT0) {
-                               /* a2dp basic rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, false,
-                                                         false, 3);
-                       } else {
-                               /* a2dp edr rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, false,
-                                                         false, 3);
-                       }
-               } else {
-                       if (bt_info_ext&BIT0) {
-                               /* a2dp basic rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, false,
-                                                         true, 3);
-                       } else {
-                               /* a2dp edr rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, false,
-                                                         true, 3);
-                       }
-               }
+                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+                       btc8821a2ant_tdma_dur_adj(btcoexist, false,
+                                                 false, 3);
+               else
+                       btc8821a2ant_tdma_dur_adj(btcoexist, false,
+                                                 true, 3);
 
                /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
@@ -2826,31 +2842,14 @@ static void halbtc8821a2ant_action_pan_edr_a2dp(struct btc_coexist *btcoexist)
                                              false, false);
                        btc8821a2ant_sw_mech2(btcoexist, false, false,
                                              false, 0x18);
-               };
+               }
        } else {
                /* fw mechanism */
                if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       if (bt_info_ext&BIT0) {
-                               /* a2dp basic rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, false,
-                                                         false, 3);
-                       } else {
-                               /* a2dp edr rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, false,
-                                                         false, 3);
-                       }
-               } else {
-                       if (bt_info_ext&BIT0) {
-                               /* a2dp basic rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, false,
-                                                         true, 3);
-                       } else {
-                               /* a2dp edr rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, false,
-                                                         true, 3);
-                       }
-               }
+                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
+                       btc8821a2ant_tdma_dur_adj(btcoexist, false, false, 3);
+               else
+                       btc8821a2ant_tdma_dur_adj(btcoexist, false, true, 3);
 
                /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
@@ -2875,7 +2874,7 @@ static void halbtc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
 
        wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
                                                          15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
 
        halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
@@ -2886,15 +2885,8 @@ static void halbtc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       if (BTC_WIFI_BW_LEGACY == wifi_bw) {
-               /* for HID at 11b/g mode */
-               halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                          0x5a5f5a5f, 0xffff, 0x3);
-       } else {
-               /* for HID quality & wifi performance balance at 11n mode */
-               halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                          0x5a5f5a5f, 0xffff, 0x3);
-       }
+       halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+                                  0x5a5f5a5f, 0xffff, 0x3);
 
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 3);
@@ -2958,7 +2950,7 @@ static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
        bt_info_ext = coex_sta->bt_info_ext;
        wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist,
                                                          0, 2, 15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
 
        halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
 
@@ -2969,40 +2961,12 @@ static void btc8821a2ant_act_hid_a2dp_pan_edr(struct btc_coexist *btcoexist)
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       if (BTC_WIFI_BW_LEGACY == wifi_bw) {
-               /* for HID at 11b/g mode */
-               halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                          0x5a5a5a5a, 0xffff, 0x3);
-       } else {
-               /* for HID quality & wifi performance balance at 11n mode */
-               halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                          0x5a5a5a5a, 0xffff, 0x3);
-       }
+       halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+                                  0x5a5a5a5a, 0xffff, 0x3);
 
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                /* fw mechanism */
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       if (bt_info_ext&BIT0) {
-                               /* a2dp basic rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, true,
-                                                         true, 3);
-                       } else {
-                               /* a2dp edr rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, true,
-                                                         true, 3);
-                       }
-               } else {
-                       if (bt_info_ext&BIT0) {
-                               /* a2dp basic rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, true,
-                                                         true, 3);
-                       } else {
-                               /* a2dp edr rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist, true,
-                                                         true, 3);
-                       }
-               }
+               btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 3);
 
                /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
@@ -3066,7 +3030,7 @@ static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
        bt_info_ext = coex_sta->bt_info_ext;
        wifi_rssi_state = halbtc8821a2ant_wifi_rssi_state(btcoexist, 0, 2,
                                                          15, 0);
-       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(2, 35, 0);
+       bt_rssi_state = halbtc8821a2ant_bt_rssi_state(btcoexist, 2, 35, 0);
 
        if (halbtc8821a2ant_need_to_dec_bt_pwr(btcoexist))
                halbtc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, true);
@@ -3075,40 +3039,12 @@ static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
 
-       if (BTC_WIFI_BW_LEGACY == wifi_bw) {
-               /* for HID at 11b/g mode */
-               halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                          0x5f5b5f5b, 0xffffff, 0x3);
-       } else {
-               /*for HID quality & wifi performance balance at 11n mode*/
-               halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
-                                          0x5f5b5f5b, 0xffffff, 0x3);
-       }
+       halbtc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
+                                  0x5f5b5f5b, 0xffffff, 0x3);
 
        if (BTC_WIFI_BW_HT40 == wifi_bw) {
                /* fw mechanism */
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       if (bt_info_ext&BIT0) {
-                               /* a2dp basic rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist,
-                                                         true, true, 2);
-                       } else {
-                               /* a2dp edr rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist,
-                                                         true, true, 2);
-                       }
-               } else {
-                       if (bt_info_ext&BIT0) {
-                               /* a2dp basic rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist,
-                                                         true, true, 2);
-                       } else {
-                               /* a2dp edr rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist,
-                                                         true, true, 2);
-                       }
-               }
+               btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 2);
 
                /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
@@ -3125,29 +3061,7 @@ static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
                }
        } else {
                /* fw mechanism */
-               if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
-                   (bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
-                       if (bt_info_ext&BIT0) {
-                               /* a2dp basic rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist,
-                                                         true, true, 2);
-
-                       } else {
-                               /* a2dp edr rate */
-                               btc8821a2ant_tdma_dur_adj(btcoexist,
-                                                         true, true, 2);
-                       }
-               } else {
-                       if (bt_info_ext&BIT0) {
-                               /*a2dp basic rate*/
-                               btc8821a2ant_tdma_dur_adj(btcoexist,
-                                                         true, true, 2);
-                       } else {
-                               /*a2dp edr rate*/
-                               btc8821a2ant_tdma_dur_adj(btcoexist,
-                                                         true, true, 2);
-                       }
-               }
+               btc8821a2ant_tdma_dur_adj(btcoexist, true, true, 2);
 
                /* sw mechanism */
                if ((wifi_rssi_state == BTC_RSSI_STATE_HIGH) ||
@@ -3167,12 +3081,13 @@ static void halbtc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
 
 static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        bool    wifi_under_5g = false;
        u8      algorithm = 0;
 
        if (btcoexist->manual_control) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Manual control!!!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Manual control!!!\n");
                return;
        }
 
@@ -3180,8 +3095,8 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
                BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
 
        if (wifi_under_5g) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
                halbtc8821a2ant_coex_under_5g(btcoexist);
                return;
        }
@@ -3189,82 +3104,82 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
        algorithm = halbtc8821a2ant_action_algorithm(btcoexist);
        if (coex_sta->c2h_bt_inquiry_page &&
            (BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], BT is under inquiry/page scan !!\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BT is under inquiry/page scan !!\n");
                halbtc8821a2ant_bt_inquiry_page(btcoexist);
                return;
        }
 
        coex_dm->cur_algorithm = algorithm;
-       btc_alg_dbg(ALGO_TRACE,
-                   "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
 
        if (halbtc8821a2ant_is_common_action(btcoexist)) {
-               btc_alg_dbg(ALGO_TRACE,
-                           "[BTCoex], Action 2-Ant common\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Action 2-Ant common\n");
                coex_dm->reset_tdma_adjust = true;
        } else {
                if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
                                    coex_dm->pre_algorithm,
                                    coex_dm->cur_algorithm);
                        coex_dm->reset_tdma_adjust = true;
                }
                switch (coex_dm->cur_algorithm) {
                case BT_8821A_2ANT_COEX_ALGO_SCO:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = SCO\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = SCO\n");
                        halbtc8821a2ant_action_sco(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_HID:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = HID\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = HID\n");
                        halbtc8821a2ant_action_hid(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_A2DP:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
                        halbtc8821a2ant_action_a2dp(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
                        halbtc8821a2ant_action_a2dp_pan_hs(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_PANEDR:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
                        halbtc8821a2ant_action_pan_edr(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_PANHS:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
                        halbtc8821a2ant_action_pan_hs(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
                        halbtc8821a2ant_action_pan_edr_a2dp(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
                        halbtc8821a2ant_action_pan_edr_hid(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
                        btc8821a2ant_act_hid_a2dp_pan_edr(btcoexist);
                        break;
                case BT_8821A_2ANT_COEX_ALGO_HID_A2DP:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
                        halbtc8821a2ant_action_hid_a2dp(btcoexist);
                        break;
                default:
-                       btc_alg_dbg(ALGO_TRACE,
-                                   "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
                        halbtc8821a2ant_coex_all_off(btcoexist);
                        break;
                }
@@ -3281,10 +3196,11 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
  */
 void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8 u1tmp = 0;
 
-       btc_iface_dbg(INTF_INIT,
-                     "[BTCoex], 2Ant Init HW Config!!\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], 2Ant Init HW Config!!\n");
 
        /* backup rf 0x1e value */
        coex_dm->bt_rf0x1e_backup =
@@ -3312,13 +3228,12 @@ void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
        btcoexist->btc_write_1byte_bitmask(btcoexist, 0x40, 0x20, 0x1);
 }
 
-void
-ex_halbtc8821a2ant_init_coex_dm(
-       struct btc_coexist *btcoexist
-       )
+void ex_halbtc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
 {
-       btc_iface_dbg(INTF_INIT,
-                     "[BTCoex], Coex Mechanism Init!!\n");
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Coex Mechanism Init!!\n");
 
        halbtc8821a2ant_init_coex_dm(btcoexist);
 }
@@ -3341,7 +3256,7 @@ ex_halbtc8821a2ant_display_coex_info(
        u32 fw_ver = 0, bt_patch_ver = 0;
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n ============[BT Coexist info]============");
+                "\r\n ============[BT Coexist info]============");
 
        if (!board_info->bt_exist) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n BT not exists !!!");
@@ -3349,23 +3264,23 @@ ex_halbtc8821a2ant_display_coex_info(
        }
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
-                  board_info->pg_ant_num, board_info->btdm_ant_num);
+                "\r\n %-35s = %d/ %d ", "Ant PG number/ Ant mechanism:",
+                board_info->pg_ant_num, board_info->btdm_ant_num);
 
        if (btcoexist->manual_control) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n %-35s", "[Action Manual control]!!");
+                        "\r\n %-35s", "[Action Manual control]!!");
        }
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
+                "\r\n %-35s = %s / %d", "BT stack/ hci ext ver",
                   ((stack_info->profile_notified) ? "Yes" : "No"),
                   stack_info->hci_version);
 
        btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER, &bt_patch_ver);
        btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
+                "\r\n %-35s = %d_%d/ 0x%x/ 0x%x(%d)",
                   "CoexVer/ FwVer/ PatchVer",
                   glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
                   fw_ver, bt_patch_ver, bt_patch_ver);
@@ -3377,26 +3292,26 @@ ex_halbtc8821a2ant_display_coex_info(
        btcoexist->btc_get(btcoexist,
                BTC_GET_U1_WIFI_HS_CHNL, &wifi_hs_chnl);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d / %d(%d)",
+                "\r\n %-35s = %d / %d(%d)",
                   "Dot11 channel / HsMode(HsChnl)",
                   wifi_dot_11_chnl, bt_hs_on, wifi_hs_chnl);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %3ph ",
+                "\r\n %-35s = %3ph ",
                   "H2C Wifi inform bt chnl Info",
                   coex_dm->wifi_chnl_info);
 
        btcoexist->btc_get(btcoexist, BTC_GET_S4_WIFI_RSSI, &wifi_rssi);
        btcoexist->btc_get(btcoexist, BTC_GET_S4_HS_RSSI, &bt_hs_rssi);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi",
+                "\r\n %-35s = %ld/ %ld", "Wifi rssi/ HS rssi",
                   wifi_rssi, bt_hs_rssi);
 
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
        btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
+                "\r\n %-35s = %d/ %d/ %d ", "Wifi link/ roam/ scan",
                   link, roam, scan);
 
        btcoexist->btc_get(btcoexist,
@@ -3408,7 +3323,7 @@ ex_halbtc8821a2ant_display_coex_info(
        btcoexist->btc_get(btcoexist,
                BTC_GET_U4_WIFI_TRAFFIC_DIRECTION, &wifi_traffic_dir);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %s / %s/ %s ", "Wifi status",
+                "\r\n %-35s = %s / %s/ %s ", "Wifi status",
                   (wifi_under_5g ? "5G" : "2.4G"),
                   ((BTC_WIFI_BW_LEGACY == wifi_bw) ? "Legacy" :
                    (((BTC_WIFI_BW_HT40 == wifi_bw) ? "HT40" : "HT20"))),
@@ -3417,7 +3332,7 @@ ex_halbtc8821a2ant_display_coex_info(
                     "uplink" : "downlink")));
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]",
+                "\r\n %-35s = [%s/ %d/ %d] ", "BT [status/ rssi/ retryCnt]",
                   ((coex_sta->c2h_bt_inquiry_page) ? ("inquiry/page scan") :
                    ((BT_8821A_2ANT_BT_STATUS_IDLE == coex_dm->bt_status)
                     ? "idle" : ((BT_8821A_2ANT_BT_STATUS_CON_IDLE ==
@@ -3426,7 +3341,7 @@ ex_halbtc8821a2ant_display_coex_info(
 
        if (stack_info->profile_notified) {
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
+                        "\r\n %-35s = %d / %d / %d / %d", "SCO/HID/PAN/A2DP",
                           stack_info->sco_exist, stack_info->hid_exist,
                           stack_info->pan_exist, stack_info->a2dp_exist);
 
@@ -3436,117 +3351,117 @@ ex_halbtc8821a2ant_display_coex_info(
 
        bt_info_ext = coex_sta->bt_info_ext;
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s",
-                  "BT Info A2DP rate",
+                "BT Info A2DP rate",
                   (bt_info_ext&BIT0) ? "Basic rate" : "EDR rate");
 
        for (i = 0; i < BT_INFO_SRC_8821A_2ANT_MAX; i++) {
                if (coex_sta->bt_info_c2h_cnt[i]) {
                        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                                  "\r\n %-35s = %7ph(%d)",
-                                  glbt_info_src_8821a_2ant[i],
-                                  coex_sta->bt_info_c2h[i],
-                                  coex_sta->bt_info_c2h_cnt[i]);
+                                "\r\n %-35s = %7ph(%d)",
+                                glbt_info_src_8821a_2ant[i],
+                                coex_sta->bt_info_c2h[i],
+                                coex_sta->bt_info_c2h_cnt[i]);
                }
        }
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %s/%s",
-                  "PS state, IPS/LPS",
-                  ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
-                  ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
+                "PS state, IPS/LPS",
+                ((coex_sta->under_ips ? "IPS ON" : "IPS OFF")),
+                ((coex_sta->under_lps ? "LPS ON" : "LPS OFF")));
        btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_FW_PWR_MODE_CMD);
 
        /* Sw mechanism*/
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                  "============[Sw mechanism]============");
+                "============[Sw mechanism]============");
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d/ %d/ %d/ %d ",
-                  "SM1[ShRf/ LpRA/ LimDig/ btLna]",
-                  coex_dm->cur_rf_rx_lpf_shrink, coex_dm->cur_low_penalty_ra,
-                  coex_dm->limited_dig, coex_dm->cur_bt_lna_constrain);
+                "\r\n %-35s = %d/ %d/ %d/ %d ",
+                "SM1[ShRf/ LpRA/ LimDig/ btLna]",
+                coex_dm->cur_rf_rx_lpf_shrink, coex_dm->cur_low_penalty_ra,
+                coex_dm->limited_dig, coex_dm->cur_bt_lna_constrain);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = %d/ %d/ %d(0x%x) ",
-                  "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
-                  coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
-                  coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
+                "\r\n %-35s = %d/ %d/ %d(0x%x) ",
+                "SM2[AgcT/ AdcB/ SwDacSwing(lvl)]",
+                coex_dm->cur_agc_table_en, coex_dm->cur_adc_back_off,
+                coex_dm->cur_dac_swing_on, coex_dm->cur_dac_swing_lvl);
 
        /* Fw mechanism*/
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s",
-                  "============[Fw mechanism]============");
+                "============[Fw mechanism]============");
 
        if (!btcoexist->manual_control) {
                ps_tdma_case = coex_dm->cur_ps_tdma;
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n %-35s = %5ph case-%d",
-                          "PS TDMA",
-                          coex_dm->ps_tdma_para, ps_tdma_case);
+                        "\r\n %-35s = %5ph case-%d",
+                        "PS TDMA",
+                        coex_dm->ps_tdma_para, ps_tdma_case);
 
                RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                          "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct",
-                          coex_dm->cur_dec_bt_pwr,
-                          coex_dm->cur_ignore_wlan_act);
+                        "\r\n %-35s = %d/ %d ", "DecBtPwr/ IgnWlanAct",
+                        coex_dm->cur_dec_bt_pwr,
+                        coex_dm->cur_ignore_wlan_act);
        }
 
        /* Hw setting*/
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s", "============[Hw setting]============");
+                "\r\n %-35s", "============[Hw setting]============");
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
-                  "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal",
-                  coex_dm->bt_rf0x1e_backup);
+                "\r\n %-35s = 0x%x", "RF-A, 0x1e initVal",
+                coex_dm->bt_rf0x1e_backup);
 
        u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x778);
        u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0x6cc);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x ",
-                  "0x778 (W_Act)/ 0x6cc (CoTab Sel)",
-                  u1tmp[0], u1tmp[1]);
+                "0x778 (W_Act)/ 0x6cc (CoTab Sel)",
+                u1tmp[0], u1tmp[1]);
 
        u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x8db);
        u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xc5b);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                  "0x8db(ADC)/0xc5b[29:25](DAC)",
-                  ((u1tmp[0]&0x60)>>5), ((u1tmp[1]&0x3e)>>1));
+                "0x8db(ADC)/0xc5b[29:25](DAC)",
+                ((u1tmp[0] & 0x60) >> 5), ((u1tmp[1] & 0x3e) >> 1));
 
        u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xcb4);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                  "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)",
-                  u4tmp[0]&0xff, ((u4tmp[0]&0x30000000)>>28));
+                "0xcb4[7:0](ctrl)/ 0xcb4[29:28](val)",
+                u4tmp[0] & 0xff, ((u4tmp[0] & 0x30000000) >> 28));
 
        u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x40);
        u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x4c);
        u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x974);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                  "0x40/ 0x4c[24:23]/ 0x974",
-                  u1tmp[0], ((u4tmp[0]&0x01800000)>>23), u4tmp[1]);
+                "0x40/ 0x4c[24:23]/ 0x974",
+                u1tmp[0], ((u4tmp[0] & 0x01800000) >> 23), u4tmp[1]);
 
        u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x550);
        u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x522);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                  "0x550(bcn ctrl)/0x522",
-                  u4tmp[0], u1tmp[0]);
+                "0x550(bcn ctrl)/0x522",
+                u4tmp[0], u1tmp[0]);
 
        u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xc50);
        u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa0a);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                  "0xc50(DIG)/0xa0a(CCK-TH)",
-                  u4tmp[0], u1tmp[0]);
+                "0xc50(DIG)/0xa0a(CCK-TH)",
+                u4tmp[0], u1tmp[0]);
 
        u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0xf48);
        u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0xa5b);
        u1tmp[1] = btcoexist->btc_read_1byte(btcoexist, 0xa5c);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x",
-                  "OFDM-FA/ CCK-FA",
-                  u4tmp[0], (u1tmp[0]<<8) + u1tmp[1]);
+                "OFDM-FA/ CCK-FA",
+                u4tmp[0], (u1tmp[0] << 8) + u1tmp[1]);
 
        u4tmp[0] = btcoexist->btc_read_4byte(btcoexist, 0x6c0);
        u4tmp[1] = btcoexist->btc_read_4byte(btcoexist, 0x6c4);
        u4tmp[2] = btcoexist->btc_read_4byte(btcoexist, 0x6c8);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x/ 0x%x/ 0x%x",
-                  "0x6c0/0x6c4/0x6c8",
-                  u4tmp[0], u4tmp[1], u4tmp[2]);
+                "0x6c0/0x6c4/0x6c8",
+                u4tmp[0], u4tmp[1], u4tmp[2]);
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
-                  "0x770 (hi-pri Rx/Tx)",
-                  coex_sta->high_priority_rx, coex_sta->high_priority_tx);
+                "0x770 (hi-pri Rx/Tx)",
+                coex_sta->high_priority_rx, coex_sta->high_priority_tx);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = %d/ %d",
                   "0x774(low-pri Rx/Tx)",
                   coex_sta->low_priority_rx, coex_sta->low_priority_tx);
@@ -3554,22 +3469,24 @@ ex_halbtc8821a2ant_display_coex_info(
        /* Tx mgnt queue hang or not, 0x41b should = 0xf, ex: 0xd ==>hang*/
        u1tmp[0] = btcoexist->btc_read_1byte(btcoexist, 0x41b);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "\r\n %-35s = 0x%x",
-                  "0x41b (mgntQ hang chk == 0xf)",
-                  u1tmp[0]);
+                "0x41b (mgntQ hang chk == 0xf)",
+                u1tmp[0]);
 
        btcoexist->btc_disp_dbg_msg(btcoexist, BTC_DBG_DISP_COEX_STATISTICS);
 }
 
 void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (BTC_IPS_ENTER == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], IPS ENTER notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], IPS ENTER notify\n");
                coex_sta->under_ips = true;
                halbtc8821a2ant_coex_all_off(btcoexist);
        } else if (BTC_IPS_LEAVE == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], IPS LEAVE notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], IPS LEAVE notify\n");
                coex_sta->under_ips = false;
                /*halbtc8821a2ant_init_coex_dm(btcoexist);*/
        }
@@ -3577,52 +3494,59 @@ void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
 
 void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (BTC_LPS_ENABLE == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], LPS ENABLE notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], LPS ENABLE notify\n");
                coex_sta->under_lps = true;
        } else if (BTC_LPS_DISABLE == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], LPS DISABLE notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], LPS DISABLE notify\n");
                coex_sta->under_lps = false;
        }
 }
 
 void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (BTC_SCAN_START == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], SCAN START notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], SCAN START notify\n");
        } else if (BTC_SCAN_FINISH == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], SCAN FINISH notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], SCAN FINISH notify\n");
        }
 }
 
 void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (BTC_ASSOCIATE_START == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], CONNECT START notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CONNECT START notify\n");
        } else if (BTC_ASSOCIATE_FINISH == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], CONNECT FINISH notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CONNECT FINISH notify\n");
        }
 }
 
 void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
                                            u8 type)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8      h2c_parameter[3] = {0};
        u32     wifi_bw;
        u8      wifi_central_chnl;
 
        if (BTC_MEDIA_CONNECT == type) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], MEDIA connect notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], MEDIA connect notify\n");
        } else {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], MEDIA disconnect notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], MEDIA disconnect notify\n");
        }
 
        /* only 2.4G we need to inform bt the chnl mask*/
@@ -3643,26 +3567,29 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
        coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
        coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
 
-       btc_alg_dbg(ALGO_TRACE_FW_EXEC,
-                   "[BTCoex], FW write 0x66 = 0x%x\n",
-                   h2c_parameter[0] << 16 |
-                   h2c_parameter[1] << 8 |
-                   h2c_parameter[2]);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], FW write 0x66 = 0x%x\n",
+                h2c_parameter[0] << 16 |
+                h2c_parameter[1] << 8 |
+                h2c_parameter[2]);
 
        btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
 }
 
 void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist,
                                              u8 type) {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
        if (type == BTC_PACKET_DHCP) {
-               btc_iface_dbg(INTF_NOTIFY,
-                             "[BTCoex], DHCP Packet notify\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], DHCP Packet notify\n");
        }
 }
 
 void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
                                       u8 *tmp_buf, u8 length)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        u8              bt_info = 0;
        u8              i, rsp_source = 0;
        static u32      set_bt_lna_cnt, set_bt_psd_mode;
@@ -3676,19 +3603,19 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
                rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW;
        coex_sta->bt_info_c2h_cnt[rsp_source]++;
 
-       btc_iface_dbg(INTF_NOTIFY,
-                     "[BTCoex], Bt info[%d], length = %d, hex data = [",
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Bt info[%d], length = %d, hex data = [",
                      rsp_source, length);
        for (i = 0; i < length; i++) {
                coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
                if (i == 1)
                        bt_info = tmp_buf[i];
                if (i == length-1) {
-                       btc_iface_dbg(INTF_NOTIFY,
-                                     "0x%02x]\n", tmp_buf[i]);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "0x%02x]\n", tmp_buf[i]);
                } else {
-                       btc_iface_dbg(INTF_NOTIFY,
-                                     "0x%02x, ", tmp_buf[i]);
+                       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                                "0x%02x, ", tmp_buf[i]);
                }
        }
 
@@ -3814,8 +3741,10 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
 
 void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist)
 {
-       btc_iface_dbg(INTF_NOTIFY,
-                     "[BTCoex], Halt notify\n");
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
+
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], Halt notify\n");
 
        halbtc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
        ex_halbtc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3823,36 +3752,37 @@ void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist)
 
 void ex_halbtc8821a2ant_periodical(struct btc_coexist *btcoexist)
 {
+       struct rtl_priv *rtlpriv = btcoexist->adapter;
        static u8       dis_ver_info_cnt;
        u32             fw_ver = 0, bt_patch_ver = 0;
        struct btc_board_info *board_info = &btcoexist->board_info;
        struct btc_stack_info *stack_info = &btcoexist->stack_info;
 
-       btc_alg_dbg(ALGO_TRACE,
-                   "[BTCoex], ==========================Periodical===========================\n");
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "[BTCoex], ==========================Periodical===========================\n");
 
        if (dis_ver_info_cnt <= 5) {
                dis_ver_info_cnt += 1;
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], ****************************************************************\n");
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
-                             board_info->pg_ant_num,
-                             board_info->btdm_ant_num,
-                             board_info->btdm_ant_pos);
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
-                             stack_info->profile_notified ? "Yes" : "No",
-                             stack_info->hci_version);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], ****************************************************************\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+                        board_info->pg_ant_num,
+                        board_info->btdm_ant_num,
+                        board_info->btdm_ant_pos);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+                        stack_info->profile_notified ? "Yes" : "No",
+                        stack_info->hci_version);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
                                   &bt_patch_ver);
                btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
-                             glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
-                             fw_ver, bt_patch_ver, bt_patch_ver);
-               btc_iface_dbg(INTF_INIT,
-                             "[BTCoex], ****************************************************************\n");
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+                        glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
+                        fw_ver, bt_patch_ver, bt_patch_ver);
+               RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                        "[BTCoex], ****************************************************************\n");
        }
 
        halbtc8821a2ant_query_bt_info(btcoexist);
index 91cc1397b150652219e668c5289fced3e93c3472..150aeb8e79d1a4870abbc6e6f8174e3ae7fb6990 100644 (file)
@@ -141,11 +141,40 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
 
        if (rtlphy->current_channel != 0)
                chnl = rtlphy->current_channel;
-       btc_alg_dbg(ALGO_TRACE,
-                   "static halbtc_get_wifi_central_chnl:%d\n", chnl);
+       RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+                "static halbtc_get_wifi_central_chnl:%d\n", chnl);
        return chnl;
 }
 
+u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv)
+{
+       return rtlpriv->btcoexist.btc_info.single_ant_path;
+}
+
+u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
+{
+       return rtlpriv->btcoexist.btc_info.bt_type;
+}
+
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
+{
+       u8 num;
+
+       if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
+               num = 2;
+       else
+               num = 1;
+
+       return num;
+}
+
+u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv)
+{
+       struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+       return rtlhal->package_type;
+}
+
 static void halbtc_leave_lps(struct btc_coexist *btcoexist)
 {
        struct rtl_priv *rtlpriv;
@@ -335,6 +364,9 @@ static bool halbtc_get(void *void_btcoexist, u8 get_type, void *out_buf)
        case BTC_GET_U4_BT_PATCH_VER:
                *u32_tmp = halbtc_get_bt_patch_version(btcoexist);
                break;
+       case BTC_GET_U4_VENDOR:
+               *u32_tmp = BTC_VENDOR_OTHER;
+               break;
        case BTC_GET_U1_WIFI_DOT11_CHNL:
                *u8_tmp = rtlphy->current_channel;
                break;
index 3d308ebbe0488d42c3365f4d7640dd4c6babc6e8..601bbe1d22b35ff911064c3db5638108eca91f94 100644 (file)
@@ -116,18 +116,6 @@ extern u32 btc_dbg_type[];
 #define                WIFI_P2P_GO_CONNECTED                   BIT3
 #define                WIFI_P2P_GC_CONNECTED                   BIT4
 
-#define        btc_alg_dbg(dbgflag, fmt, ...)                                  \
-do {                                                                   \
-       if (unlikely(btc_dbg_type[BTC_MSG_ALGORITHM] & dbgflag))        \
-               printk(KERN_DEBUG fmt, ##__VA_ARGS__);                  \
-} while (0)
-#define        btc_iface_dbg(dbgflag, fmt, ...)                                \
-do {                                                                   \
-       if (unlikely(btc_dbg_type[BTC_MSG_INTERFACE] & dbgflag))        \
-               printk(KERN_DEBUG fmt, ##__VA_ARGS__);                  \
-} while (0)
-
-
 #define        BTC_RSSI_HIGH(_rssi_)   \
        ((_rssi_ == BTC_RSSI_STATE_HIGH ||      \
          _rssi_ == BTC_RSSI_STATE_STAY_HIGH) ? true : false)
@@ -228,6 +216,7 @@ enum btc_get_type {
        BTC_GET_U4_WIFI_FW_VER,
        BTC_GET_U4_WIFI_LINK_STATUS,
        BTC_GET_U4_BT_PATCH_VER,
+       BTC_GET_U4_VENDOR,
 
        /* type u1Byte */
        BTC_GET_U1_WIFI_DOT11_CHNL,
@@ -245,6 +234,12 @@ enum btc_get_type {
        BTC_GET_MAX
 };
 
+enum btc_vendor {
+       BTC_VENDOR_LENOVO,
+       BTC_VENDOR_ASUS,
+       BTC_VENDOR_OTHER
+};
+
 enum btc_set_type {
        /* type bool */
        BTC_SET_BL_BT_DISABLE,
@@ -263,6 +258,7 @@ enum btc_set_type {
        /* type trigger some action */
        BTC_SET_ACT_GET_BT_RSSI,
        BTC_SET_ACT_AGGREGATE_CTRL,
+       BTC_SET_ACT_ANTPOSREGRISTRY_CTRL,
 
        /********* for 1Ant **********/
        /* type bool */
index d3fd9211b3a48fd1c8b495069fccbb0085dbb140..46e0fa6be273345ea030d56a0b45f52548c1bce4 100644 (file)
@@ -178,17 +178,6 @@ struct rtl_btc_ops *rtl_btc_get_ops_pointer(void)
 }
 EXPORT_SYMBOL(rtl_btc_get_ops_pointer);
 
-u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv)
-{
-       u8 num;
-
-       if (rtlpriv->btcoexist.btc_info.ant_num == ANT_X2)
-               num = 2;
-       else
-               num = 1;
-
-       return num;
-}
 
 enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw)
 {
@@ -209,11 +198,6 @@ u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv)
        return rtlpriv->btcoexist.btc_info.btcoexist;
 }
 
-u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv)
-{
-       return rtlpriv->btcoexist.btc_info.bt_type;
-}
-
 MODULE_AUTHOR("Page He <page_he@realsil.com.cn>");
 MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
 MODULE_AUTHOR("Larry Finger    <Larry.FInger@lwfinger.net>");
index ccd5a0f91e3b1bf626ca53cef370d7f717c08f03..fff5117e1c4e5aa23b734a93e6d7532b4995be8c 100644 (file)
@@ -46,9 +46,12 @@ void rtl_btc_special_packet_notify(struct rtl_priv *rtlpriv, u8 pkt_type);
 
 struct rtl_btc_ops *rtl_btc_get_ops_pointer(void);
 
-u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv);
 u8 rtl_get_hwpg_bt_exist(struct rtl_priv *rtlpriv);
 u8 rtl_get_hwpg_bt_type(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_ant_num(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_single_ant_path(struct rtl_priv *rtlpriv);
+u8 rtl_get_hwpg_package_type(struct rtl_priv *rtlpriv);
+
 enum rt_media_status mgnt_link_status_query(struct ieee80211_hw *hw);
 
 #endif
index 8fe8b4cfae6c5aecb7ed58c1043ddc95f08d437f..f7a7dcbf945ef67e5a1f2b991906c1c598de0eab 100644 (file)
@@ -45,12 +45,13 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
 
        u32 target_command;
        u32 target_content = 0;
-       u8 entry_i;
+       int entry_i;
 
        RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_DMESG, "Key content :",
                      key_cont_128, 16);
 
-       for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
+       /* 0-1 config + mac, 2-5 fill 128key,6-7 are reserved */
+       for (entry_i = CAM_CONTENT_COUNT - 1; entry_i >= 0; entry_i--) {
                target_command = entry_i + CAM_CONTENT_COUNT * entry_no;
                target_command = target_command | BIT(31) | BIT(16);
 
@@ -102,7 +103,6 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
                                        target_content);
                        rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
                                        target_command);
-                       udelay(100);
 
                        RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
                                 "WRITE A4: %x\n", target_content);
@@ -285,8 +285,7 @@ u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr)
        u8 i, *addr;
 
        if (NULL == sta_addr) {
-               RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
-                        "sta_addr is NULL.\n");
+               pr_err("sta_addr is NULL.\n");
                return TOTAL_CAM_ENTRY;
        }
        /* Does STA already exist? */
@@ -298,9 +297,8 @@ u8 rtl_cam_get_free_entry(struct ieee80211_hw *hw, u8 *sta_addr)
        /* Get a free CAM entry. */
        for (entry_idx = 4; entry_idx < TOTAL_CAM_ENTRY; entry_idx++) {
                if ((bitmap & BIT(0)) == 0) {
-                       RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
-                                "-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n",
-                                rtlpriv->sec.hwsec_cam_bitmap, entry_idx);
+                       pr_err("-----hwsec_cam_bitmap: 0x%x entry_idx=%d\n",
+                              rtlpriv->sec.hwsec_cam_bitmap, entry_idx);
                        rtlpriv->sec.hwsec_cam_bitmap |= BIT(0) << entry_idx;
                        memcpy(rtlpriv->sec.hwsec_cam_sta_addr[entry_idx],
                               sta_addr, ETH_ALEN);
@@ -319,14 +317,12 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
        u8 i, *addr;
 
        if (NULL == sta_addr) {
-               RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
-                        "sta_addr is NULL.\n");
+               pr_err("sta_addr is NULL.\n");
                return;
        }
 
        if (is_zero_ether_addr(sta_addr)) {
-               RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
-                        "sta_addr is %pM\n", sta_addr);
+               pr_err("sta_addr is %pM\n", sta_addr);
                return;
        }
        /* Does STA already exist? */
index ded1493fee9c975742ede341b991b4446a48e3d7..a4f8e326a2bc171cd4c00bed64d24c240b5c3ee7 100644 (file)
@@ -117,8 +117,7 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
        }
 found_alt:
        if (firmware->size > rtlpriv->max_fw_size) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Firmware is too big!\n");
+               pr_err("Firmware is too big!\n");
                release_firmware(firmware);
                return;
        }
@@ -234,6 +233,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        int err = 0;
+       u8 retry_limit = 0x30;
 
        if (mac->vif) {
                RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
@@ -272,6 +272,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
                                (u8 *)(&mac->basic_rates));
 
+               retry_limit = 0x07;
                break;
        case NL80211_IFTYPE_P2P_GO:
                mac->p2p = P2P_ROLE_GO;
@@ -288,6 +289,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
                        mac->basic_rates = 0xff0;
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
                                              (u8 *)(&mac->basic_rates));
+
+               retry_limit = 0x07;
                break;
        case NL80211_IFTYPE_MESH_POINT:
                RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
@@ -301,10 +304,12 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
                        mac->basic_rates = 0xff0;
                rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
                                (u8 *)(&mac->basic_rates));
+
+               retry_limit = 0x07;
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "operation mode %d is not support!\n", vif->type);
+               pr_err("operation mode %d is not supported!\n",
+                      vif->type);
                err = -EOPNOTSUPP;
                goto out;
        }
@@ -322,6 +327,10 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
        memcpy(mac->mac_addr, vif->addr, ETH_ALEN);
        rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
 
+       mac->retry_long = retry_limit;
+       mac->retry_short = retry_limit;
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
+                       (u8 *)(&retry_limit));
 out:
        mutex_unlock(&rtlpriv->locks.conf_mutex);
        return err;
@@ -646,10 +655,15 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
                RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
                         "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
                         hw->conf.long_frame_max_tx_count);
-               mac->retry_long = hw->conf.long_frame_max_tx_count;
-               mac->retry_short = hw->conf.long_frame_max_tx_count;
-               rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
+               /* brought up everything changes (changed == ~0) indicates first
+                * open, so use our default value instead of that of wiphy.
+                */
+               if (changed != ~0) {
+                       mac->retry_long = hw->conf.long_frame_max_tx_count;
+                       mac->retry_short = hw->conf.long_frame_max_tx_count;
+                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
                                (u8 *)(&hw->conf.long_frame_max_tx_count));
+               }
        }
 
        if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
@@ -764,9 +778,8 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
                        default:
                                        mac->bw_40 = false;
                                        mac->bw_80 = false;
-                                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                                "switch case %#x not processed\n",
-                                                channel_type);
+                                       pr_err("switch case %#x not processed\n",
+                                              channel_type);
                                        break;
                        }
                }
@@ -1399,8 +1412,7 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
                         "IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid);
                return rtl_rx_agg_stop(hw, sta, tid);
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "IEEE80211_AMPDU_ERR!!!!:\n");
+               pr_err("IEEE80211_AMPDU_ERR!!!!:\n");
                return -EOPNOTSUPP;
        }
        return 0;
@@ -1532,12 +1544,11 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                key_type = AESCMAC_ENCRYPTION;
                RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CMAC\n");
                RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
-                        "HW don't support CMAC encrypiton, use software CMAC encrypiton\n");
+                        "HW don't support CMAC encryption, use software CMAC encryption\n");
                err = -EOPNOTSUPP;
                goto out_unlock;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "alg_err:%x!!!!:\n", key->cipher);
+               pr_err("alg_err:%x!!!!:\n", key->cipher);
                goto out_unlock;
        }
        if (key_type == WEP40_ENCRYPTION ||
@@ -1613,8 +1624,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                        RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
                                 "set pairwise key\n");
                        if (!sta) {
-                               RT_ASSERT(false,
-                                         "pairwise key without mac_addr\n");
+                               WARN_ONCE(true,
+                                         "rtlwifi: pairwise key without mac_addr\n");
 
                                err = -EOPNOTSUPP;
                                goto out_unlock;
@@ -1662,8 +1673,7 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "cmd_err:%x!!!!:\n", cmd);
+               pr_err("cmd_err:%x!!!!:\n", cmd);
        }
 out_unlock:
        mutex_unlock(&rtlpriv->locks.conf_mutex);
@@ -1804,8 +1814,8 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
                                         "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n");
                                return true;
                        default:
-                               RT_ASSERT(false,
-                                         "rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n");
+                               WARN_ONCE(true,
+                                         "rtlwifi: rtl_hal_pwrseqcmdparsing(): Unknown CMD!!\n");
                                break;
                        }
                }
index 33905bbacad212443d33dde648f5bf10511eb9bd..7ecac6116d5dfa01a91b36ee7c6def6bdf31ef54 100644 (file)
 
 #include <linux/moduleparam.h>
 
-void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
+#ifdef CONFIG_RTLWIFI_DEBUG
+void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level,
+                   const char *fmt, ...)
 {
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 i;
+       if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
+                    (level <= rtlpriv->cfg->mod_params->debug_level))) {
+               struct va_format vaf;
+               va_list args;
 
-       rtlpriv->dbg.global_debugcomponents =
-           COMP_ERR | COMP_FW | COMP_INIT | COMP_RECV | COMP_SEND |
-           COMP_MLME | COMP_SCAN | COMP_INTR | COMP_LED | COMP_SEC |
-           COMP_BEACON | COMP_RATE | COMP_RXDESC | COMP_DIG | COMP_TXAGC |
-           COMP_POWER | COMP_POWER_TRACKING | COMP_BB_POWERSAVING | COMP_SWAS |
-           COMP_RF | COMP_TURBO | COMP_RATR | COMP_CMD |
-           COMP_EFUSE | COMP_QOS | COMP_MAC80211 | COMP_REGD | COMP_CHAN |
-           COMP_EASY_CONCURRENT | COMP_EFUSE | COMP_QOS | COMP_MAC80211 |
-           COMP_REGD | COMP_CHAN | COMP_BT_COEXIST;
+               va_start(args, fmt);
 
+               vaf.fmt = fmt;
+               vaf.va = &args;
 
-       for (i = 0; i < DBGP_TYPE_MAX; i++)
-               rtlpriv->dbg.dbgp_type[i] = 0;
+               pr_info(":<%lx> %pV", in_interrupt(), &vaf);
 
-       /*Init Debug flag enable condition */
+               va_end(args);
+       }
 }
-EXPORT_SYMBOL_GPL(rtl_dbgp_flag_init);
+EXPORT_SYMBOL_GPL(_rtl_dbg_trace);
 
-#ifdef CONFIG_RTLWIFI_DEBUG
-void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level,
-                   const char *modname, const char *fmt, ...)
+void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
+                   const char *fmt, ...)
 {
-       if (unlikely((comp & rtlpriv->dbg.global_debugcomponents) &&
-                    (level <= rtlpriv->dbg.global_debuglevel))) {
+       if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
+                    (level <= rtlpriv->cfg->mod_params->debug_level))) {
                struct va_format vaf;
                va_list args;
 
@@ -63,13 +60,25 @@ void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level,
                vaf.fmt = fmt;
                vaf.va = &args;
 
-               printk(KERN_DEBUG "%s:%ps:<%lx-%x> %pV",
-                      modname, __builtin_return_address(0),
-                      in_interrupt(), in_atomic(),
-                      &vaf);
+               pr_info("%pV", &vaf);
 
                va_end(args);
        }
 }
-EXPORT_SYMBOL_GPL(_rtl_dbg_trace);
+EXPORT_SYMBOL_GPL(_rtl_dbg_print);
+
+void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
+                        const char *titlestring,
+                        const void *hexdata, int hexdatalen)
+{
+       if (unlikely(((comp) & rtlpriv->cfg->mod_params->debug_mask) &&
+                    ((level) <= rtlpriv->cfg->mod_params->debug_level))) {
+               pr_info("In process \"%s\" (pid %i): %s\n",
+                       current->comm, current->pid, titlestring);
+               print_hex_dump_bytes("", DUMP_PREFIX_NONE,
+                                    hexdata, hexdatalen);
+       }
+}
+EXPORT_SYMBOL_GPL(_rtl_dbg_print_data);
+
 #endif
index 6156a79328c1168086747d57812b9a5592941049..bf5339f1c1bcaad8457392b2404ea76faf74adac 100644 (file)
@@ -36,7 +36,7 @@
  *unexpected HW behavior, HW BUG
  *and so on.
  */
-#define DBG_EMERG                      0
+/*#define DBG_EMERG                    0 */
 
 /*
  *Abnormal, rare, or unexpeted cases.
@@ -166,55 +166,36 @@ enum dbgp_flag_e {
 
 #ifdef CONFIG_RTLWIFI_DEBUG
 
-#define RT_ASSERT(_exp, fmt, ...)                                      \
-do {                                                                   \
-       if (!(_exp)) {                                                  \
-               printk(KERN_DEBUG KBUILD_MODNAME ":%s(): " fmt,         \
-                      __func__, ##__VA_ARGS__);                        \
-       }                                                               \
-} while (0)
-
-
 struct rtl_priv;
 
-__printf(5, 6)
+__printf(4, 5)
 void _rtl_dbg_trace(struct rtl_priv *rtlpriv, int comp, int level,
-                   const char *modname, const char *fmt, ...);
+                   const char *fmt, ...);
+
+__printf(4, 5)
+void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
+                   const char *fmt, ...);
+
+void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
+                        const char *titlestring,
+                        const void *hexdata, int hexdatalen);
 
 #define RT_TRACE(rtlpriv, comp, level, fmt, ...)                       \
        _rtl_dbg_trace(rtlpriv, comp, level,                            \
-                      KBUILD_MODNAME, fmt, ##__VA_ARGS__)
+                      fmt, ##__VA_ARGS__)
 
 #define RTPRINT(rtlpriv, dbgtype, dbgflag, fmt, ...)                   \
-do {                                                                   \
-       if (unlikely(rtlpriv->dbg.dbgp_type[dbgtype] & dbgflag)) {      \
-               printk(KERN_DEBUG KBUILD_MODNAME ": " fmt,              \
-                      ##__VA_ARGS__);                                  \
-       }                                                               \
-} while (0)
+       _rtl_dbg_print(rtlpriv, dbgtype, dbgflag, fmt, ##__VA_ARGS__)
 
 #define RT_PRINT_DATA(rtlpriv, _comp, _level, _titlestring, _hexdata,  \
                      _hexdatalen)                                      \
-do {                                                                   \
-       if (unlikely(((_comp) & rtlpriv->dbg.global_debugcomponents) && \
-                    (_level <= rtlpriv->dbg.global_debuglevel))) {     \
-               printk(KERN_DEBUG "%s: In process \"%s\" (pid %i): %s\n", \
-                      KBUILD_MODNAME, current->comm, current->pid,     \
-                      _titlestring);                                   \
-               print_hex_dump_bytes("", DUMP_PREFIX_NONE,              \
-                                    _hexdata, _hexdatalen);            \
-       }                                                               \
-} while (0)
+       _rtl_dbg_print_data(rtlpriv, _comp, _level,                     \
+                           _titlestring, _hexdata, _hexdatalen)
 
 #else
 
 struct rtl_priv;
 
-__printf(2, 3)
-static inline void RT_ASSERT(int exp, const char *fmt, ...)
-{
-}
-
 __printf(4, 5)
 static inline void RT_TRACE(struct rtl_priv *rtlpriv,
                            int comp, int level,
@@ -237,6 +218,4 @@ static inline void RT_PRINT_DATA(struct rtl_priv *rtlpriv,
 }
 
 #endif
-
-void rtl_dbgp_flag_init(struct ieee80211_hw *hw);
 #endif
index 7becfef6cd5ce31954b920efb72730baed836943..ef9acd466cca95920b797951e806f68546b30865 100644 (file)
@@ -31,6 +31,9 @@ static const u8 MAX_PGPKT_SIZE = 9;
 static const u8 PGPKT_DATA_SIZE = 8;
 static const int EFUSE_MAX_SIZE = 512;
 
+#define START_ADDRESS          0x1000
+#define REG_MCUFWDL            0x0080
+
 static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = {
        {0, 0, 0, 2},
        {0, 1, 0, 2},
@@ -70,8 +73,6 @@ static void efuse_word_enable_data_read(u8 word_en, u8 *sourdata,
                                        u8 *targetdata);
 static u8 enable_efuse_data_write(struct ieee80211_hw *hw,
                                  u16 efuse_addr, u8 word_en, u8 *data);
-static void efuse_power_switch(struct ieee80211_hw *hw, u8 write,
-                              u8 pwrstate);
 static u16 efuse_get_current_size(struct ieee80211_hw *hw);
 static u8 efuse_calculate_word_cnts(u8 word_en);
 
@@ -1121,7 +1122,7 @@ static u8 enable_efuse_data_write(struct ieee80211_hw *hw,
        return badworden;
 }
 
-static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
+void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1207,6 +1208,7 @@ static void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate)
                }
        }
 }
+EXPORT_SYMBOL(efuse_power_switch);
 
 static u16 efuse_get_current_size(struct ieee80211_hw *hw)
 {
@@ -1259,8 +1261,7 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
                break;
 
        case EEPROM_93C46:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "RTL8XXX did not boot from eeprom, check it !!\n");
+               pr_err("RTL8XXX did not boot from eeprom, check it !!\n");
                return 1;
 
        default:
@@ -1321,3 +1322,45 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
        return 0;
 }
 EXPORT_SYMBOL_GPL(rtl_get_hwinfo);
+
+void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 *pu4byteptr = (u8 *)buffer;
+       u32 i;
+
+       for (i = 0; i < size; i++)
+               rtl_write_byte(rtlpriv, (START_ADDRESS + i), *(pu4byteptr + i));
+}
+EXPORT_SYMBOL_GPL(rtl_fw_block_write);
+
+void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
+                      u32 size)
+{
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       u8 value8;
+       u8 u8page = (u8)(page & 0x07);
+
+       value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
+
+       rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
+       rtl_fw_block_write(hw, buffer, size);
+}
+EXPORT_SYMBOL_GPL(rtl_fw_page_write);
+
+void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
+{
+       u32 fwlen = *pfwlen;
+       u8 remain = (u8)(fwlen % 4);
+
+       remain = (remain == 0) ? 0 : (4 - remain);
+
+       while (remain > 0) {
+               pfwbuf[fwlen] = 0;
+               fwlen++;
+               remain--;
+       }
+
+       *pfwlen = fwlen;
+}
+EXPORT_SYMBOL_GPL(rtl_fill_dummy);
index 51aa1210def5f2e9e8cea9d634345ea6b3d35c36..952fdc288f0e6f0d2e74551840456e7f83160192 100644 (file)
@@ -109,7 +109,12 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
 void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
 void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
 void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
+void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate);
 int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
                   int max_size, u8 *hwinfo, int *params);
+void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen);
+void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, const u8 *buffer,
+                      u32 size);
+void rtl_fw_block_write(struct ieee80211_hw *hw, const u8 *buffer, u32 size);
 
 #endif
index 8bfe020edd3a24aaa9f8ec7c194f5b8fa7093d1f..2e6b888bd417f0aeef5202a231fbbfb9e5ab2652 100644 (file)
@@ -174,9 +174,8 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
                }
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n",
-                        rtlpci->const_support_pciaspm);
+               pr_err("switch case %#x not processed\n",
+                      rtlpci->const_support_pciaspm);
                break;
        }
 
@@ -1214,6 +1213,10 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
        mac->current_ampdu_density = 7;
        mac->current_ampdu_factor = 3;
 
+       /*Retry Limit*/
+       mac->retry_short = 7;
+       mac->retry_long = 7;
+
        /*QOS*/
        rtlpci->acm_method = EACMWAY2_SW;
 
@@ -1247,9 +1250,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
                                         &buffer_desc_dma);
 
                if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Cannot allocate TX ring (prio = %d)\n",
-                                prio);
+                       pr_err("Cannot allocate TX ring (prio = %d)\n",
+                              prio);
                        return -ENOMEM;
                }
 
@@ -1266,8 +1268,7 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
                                     sizeof(*desc) * entries, &desc_dma);
 
        if (!desc || (unsigned long)desc & 0xFF) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Cannot allocate TX ring (prio = %d)\n", prio);
+               pr_err("Cannot allocate TX ring (prio = %d)\n", prio);
                return -ENOMEM;
        }
 
@@ -1314,8 +1315,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
                                          &rtlpci->rx_ring[rxring_idx].dma);
                if (!rtlpci->rx_ring[rxring_idx].buffer_desc ||
                    (ulong)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Cannot allocate RX ring\n");
+                       pr_err("Cannot allocate RX ring\n");
                        return -ENOMEM;
                }
 
@@ -1338,8 +1338,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
                                          &rtlpci->rx_ring[rxring_idx].dma);
                if (!rtlpci->rx_ring[rxring_idx].desc ||
                    (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Cannot allocate RX ring\n");
+                       pr_err("Cannot allocate RX ring\n");
                        return -ENOMEM;
                }
 
@@ -1799,15 +1798,13 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
 
 static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
 {
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
        int err;
 
        _rtl_pci_init_struct(hw, pdev);
 
        err = _rtl_pci_init_trx_ring(hw);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "tx ring initialization failed\n");
+               pr_err("tx ring initialization failed\n");
                return err;
        }
 
@@ -1820,6 +1817,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+       struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
 
        int err;
 
@@ -1837,6 +1835,8 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
                         "Failed to config hardware!\n");
                return err;
        }
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
+                       &rtlmac->retry_long);
 
        rtlpriv->cfg->ops->enable_interrupt(hw);
        RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
@@ -2174,15 +2174,15 @@ int rtl_pci_probe(struct pci_dev *pdev,
 
        err = pci_enable_device(pdev);
        if (err) {
-               RT_ASSERT(false, "%s : Cannot enable new PCI device\n",
+               WARN_ONCE(true, "%s : Cannot enable new PCI device\n",
                          pci_name(pdev));
                return err;
        }
 
        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
                if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
-                       RT_ASSERT(false,
-                                 "Unable to obtain 32bit DMA for consistent allocations\n");
+                       WARN_ONCE(true,
+                                 "rtlwifi: Unable to obtain 32bit DMA for consistent allocations\n");
                        err = -ENOMEM;
                        goto fail1;
                }
@@ -2193,7 +2193,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
        hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
                                sizeof(struct rtl_priv), &rtl_ops);
        if (!hw) {
-               RT_ASSERT(false,
+               WARN_ONCE(true,
                          "%s : ieee80211 alloc failed\n", pci_name(pdev));
                err = -ENOMEM;
                goto fail1;
@@ -2219,20 +2219,10 @@ int rtl_pci_probe(struct pci_dev *pdev,
        rtlpriv->intf_ops = &rtl_pci_ops;
        rtlpriv->glb_var = &rtl_global_var;
 
-       /*
-        *init dbgp flags before all
-        *other functions, because we will
-        *use it in other funtions like
-        *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
-        *you can not use these macro
-        *before this
-        */
-       rtl_dbgp_flag_init(hw);
-
        /* MEM map */
        err = pci_request_regions(pdev, KBUILD_MODNAME);
        if (err) {
-               RT_ASSERT(false, "Can't obtain PCI resources\n");
+               WARN_ONCE(true, "rtlwifi: Can't obtain PCI resources\n");
                goto fail1;
        }
 
@@ -2245,7 +2235,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
                        (unsigned long)pci_iomap(pdev,
                        rtlpriv->cfg->bar_id, pmem_len);
        if (rtlpriv->io.pci_mem_start == 0) {
-               RT_ASSERT(false, "Can't map PCI mem\n");
+               WARN_ONCE(true, "rtlwifi: Can't map PCI mem\n");
                err = -ENOMEM;
                goto fail2;
        }
@@ -2275,7 +2265,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
        rtlpriv->cfg->ops->read_eeprom_info(hw);
 
        if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
+               pr_err("Can't init_sw_vars\n");
                err = -ENODEV;
                goto fail3;
        }
@@ -2287,34 +2277,25 @@ int rtl_pci_probe(struct pci_dev *pdev,
        /* Init mac80211 sw */
        err = rtl_init_core(hw);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't allocate sw for mac80211\n");
+               pr_err("Can't allocate sw for mac80211\n");
                goto fail3;
        }
 
        /* Init PCI sw */
        err = rtl_pci_init(hw, pdev);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to init PCI\n");
+               pr_err("Failed to init PCI\n");
                goto fail3;
        }
 
        err = ieee80211_register_hw(hw);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't register mac80211 hw.\n");
+               pr_err("Can't register mac80211 hw.\n");
                err = -ENODEV;
                goto fail3;
        }
        rtlpriv->mac80211.mac80211_registered = 1;
 
-       err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
-       if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "failed to create sysfs device attributes\n");
-               goto fail3;
-       }
-
        /*init rfkill */
        rtl_init_rfkill(hw);    /* Init PCI sw */
 
@@ -2364,8 +2345,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
        wait_for_completion(&rtlpriv->firmware_loading_complete);
        clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
 
-       sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group);
-
        /*ieee80211_unregister_hw will call ops_stop */
        if (rtlmac->mac80211_registered == 1) {
                ieee80211_unregister_hw(hw);
index 578b1d900bfbcd9f7a4a8f39b73a9248651e4a7c..d9039ea10ba4d4231875d67a641caf37cae15dc8 100644 (file)
@@ -271,10 +271,10 @@ struct mp_adapter {
 };
 
 struct rtl_pci_priv {
+       struct bt_coexist_info bt_coexist;
+       struct rtl_led_ctl ledctl;
        struct rtl_pci dev;
        struct mp_adapter ndis_adapter;
-       struct rtl_led_ctl ledctl;
-       struct bt_coexist_info bt_coexist;
 };
 
 #define rtl_pcipriv(hw)                (((struct rtl_pci_priv *)(rtl_priv(hw))->priv))
index d0ffc4d508cff2df70606c9a88845a22884c7bc9..0d152877d9698e47e222d7509179855f3f43c0d1 100644 (file)
@@ -34,6 +34,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
 
        /*<1> reset trx ring */
        if (rtlhal->interface == INTF_PCI)
@@ -46,6 +47,8 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
        /*<2> Enable Adapter */
        if (rtlpriv->cfg->ops->hw_init(hw))
                return false;
+       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
+                       &rtlmac->retry_long);
        RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
 
        /*<3> Enable Interrupt */
@@ -150,8 +153,7 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
                break;
 
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", state_toset);
+               pr_err("switch case %#x not processed\n", state_toset);
                break;
        }
 
index ce8621a0f7aa3b0a4ad2dbd759c02cc38d08307b..951d257cd4c01721cf266f99aa71f661f5f5aa24 100644 (file)
@@ -267,8 +267,7 @@ static void *rtl_rate_alloc_sta(void *ppriv,
 
        rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp);
        if (!rate_priv) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Unable to allocate private rc structure\n");
+               pr_err("Unable to allocate private rc structure\n");
                return NULL;
        }
 
index 6ee6bf8e7eafd86b45feca53ff742307c704821f..558c31bf5c807dda6b56539b39f3bdc3e29292f3 100644 (file)
@@ -440,7 +440,7 @@ int rtl_regd_init(struct ieee80211_hw *hw,
 
        if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
                RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
-                        "rtl: EEPROM indicates invalid contry code, world wide 13 should be used\n");
+                        "rtl: EEPROM indicates invalid country code, world wide 13 should be used\n");
 
                rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
        }
index 5360d533235944a94fe931ab76ce4e04439a77ab..21ed9ad3be7ad0c209a296e2114981742aa23799 100644 (file)
@@ -27,6 +27,7 @@
 #include "../pci.h"
 #include "../base.h"
 #include "../core.h"
+#include "../efuse.h"
 #include "reg.h"
 #include "def.h"
 #include "fw.h"
@@ -53,63 +54,6 @@ static void _rtl88e_enable_fw_download(struct ieee80211_hw *hw, bool enable)
        }
 }
 
-static void _rtl88e_fw_block_write(struct ieee80211_hw *hw,
-                                  const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 blocksize = sizeof(u32);
-       u8 *bufferptr = (u8 *)buffer;
-       u32 *pu4BytePtr = (u32 *)buffer;
-       u32 i, offset, blockcount, remainsize;
-
-       blockcount = size / blocksize;
-       remainsize = size % blocksize;
-
-       for (i = 0; i < blockcount; i++) {
-               offset = i * blocksize;
-               rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
-                               *(pu4BytePtr + i));
-       }
-
-       if (remainsize) {
-               offset = blockcount * blocksize;
-               bufferptr += offset;
-               for (i = 0; i < remainsize; i++) {
-                       rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
-                                                offset + i), *(bufferptr + i));
-               }
-       }
-}
-
-static void _rtl88e_fw_page_write(struct ieee80211_hw *hw,
-                                 u32 page, const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 value8;
-       u8 u8page = (u8) (page & 0x07);
-
-       value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
-
-       rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
-       _rtl88e_fw_block_write(hw, buffer, size);
-}
-
-static void _rtl88e_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
-       u32 fwlen = *pfwlen;
-       u8 remain = (u8) (fwlen % 4);
-
-       remain = (remain == 0) ? 0 : (4 - remain);
-
-       while (remain > 0) {
-               pfwbuf[fwlen] = 0;
-               fwlen++;
-               remain--;
-       }
-
-       *pfwlen = fwlen;
-}
-
 static void _rtl88e_write_fw(struct ieee80211_hw *hw,
                             enum version_8188e version, u8 *buffer, u32 size)
 {
@@ -120,27 +64,24 @@ static void _rtl88e_write_fw(struct ieee80211_hw *hw,
 
        RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
 
-       _rtl88e_fill_dummy(bufferptr, &size);
+       rtl_fill_dummy(bufferptr, &size);
 
        pagenums = size / FW_8192C_PAGE_SIZE;
        remainsize = size % FW_8192C_PAGE_SIZE;
 
-       if (pagenums > 8) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Page numbers should not greater then 8\n");
-       }
+       if (pagenums > 8)
+               pr_err("Page numbers should not greater then 8\n");
 
        for (page = 0; page < pagenums; page++) {
                offset = page * FW_8192C_PAGE_SIZE;
-               _rtl88e_fw_page_write(hw, page, (bufferptr + offset),
-                                     FW_8192C_PAGE_SIZE);
+               rtl_fw_page_write(hw, page, (bufferptr + offset),
+                                 FW_8192C_PAGE_SIZE);
        }
 
        if (remainsize) {
                offset = pagenums * FW_8192C_PAGE_SIZE;
                page = pagenums;
-               _rtl88e_fw_page_write(hw, page, (bufferptr + offset),
-                                     remainsize);
+               rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
        }
 }
 
@@ -157,15 +98,10 @@ static int _rtl88e_fw_free_to_go(struct ieee80211_hw *hw)
                 (!(value32 & FWDL_CHKSUM_RPT)));
 
        if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "chksum report faill ! REG_MCUFWDL:0x%08x .\n",
-                         value32);
+               pr_err("chksum report fail! REG_MCUFWDL:0x%08x .\n",
+                      value32);
                goto exit;
        }
-
-       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
-
        value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
        value32 |= MCUFWDL_RDY;
        value32 &= ~WINTINI_RDY;
@@ -176,20 +112,15 @@ static int _rtl88e_fw_free_to_go(struct ieee80211_hw *hw)
 
        do {
                value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
-               if (value32 & WINTINI_RDY) {
-                       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                                "Polling FW ready success!! REG_MCUFWDL:0x%08x.\n",
-                                 value32);
-                       err = 0;
-                       goto exit;
-               }
+               if (value32 & WINTINI_RDY)
+                       return 0;
 
                udelay(FW_8192C_POLLING_DELAY);
 
        } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
 
-       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32);
+       pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n",
+              value32);
 
 exit:
        return err;
@@ -234,13 +165,8 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
        _rtl88e_enable_fw_download(hw, false);
 
        err = _rtl88e_fw_free_to_go(hw);
-       if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Firmware is not ready to run!\n");
-       } else {
-               RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
-                        "Firmware is ready to run!\n");
-       }
+       if (err)
+               pr_err("Firmware is not ready to run!\n");
 
        return 0;
 }
@@ -309,8 +235,7 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
        while (!write_sucess) {
                wait_writeh2c_limit--;
                if (wait_writeh2c_limit == 0) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Write H2C fail because no trigger for FW INT!\n");
+                       pr_err("Write H2C fail because no trigger for FW INT!\n");
                        break;
                }
 
@@ -434,8 +359,8 @@ void rtl88e_fill_h2c_cmd(struct ieee80211_hw *hw,
        u32 tmp_cmdbuf[2];
 
        if (!rtlhal->fw_ready) {
-               RT_ASSERT(false,
-                         "return H2C cmd because of Fw download fail!!!\n");
+               WARN_ONCE(true,
+                         "rtl8188ee: error H2C cmd because of Fw download fail!!!\n");
                return;
        }
 
index 37d6efc3d240d267a179b04be3ea0207a64fb3cb..0ba26d27d11cb21ab3798c8ebc9507442c036610 100644 (file)
@@ -358,8 +358,7 @@ void rtl88ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
        case HAL_DEF_WOWLAN:
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", variable);
+               pr_err("switch case %#x not processed\n", variable);
                break;
        }
 }
@@ -572,9 +571,8 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                acm_ctrl &= (~ACMHW_VOQEN);
                                break;
                        default:
-                               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                        "switch case %#x not processed\n",
-                                        e_aci);
+                               pr_err("switch case %#x not processed\n",
+                                      e_aci);
                                break;
                        }
                }
@@ -737,8 +735,7 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                    2, array);
                break; }
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", variable);
+               pr_err("switch case %#x not processed\n", variable);
                break;
        }
 }
@@ -759,9 +756,8 @@ static bool _rtl88ee_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
                        break;
 
                if (count > POLLING_LLT_THRESHOLD) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Failed to polling write LLT done at address %d!\n",
-                                address);
+                       pr_err("Failed to polling write LLT done at address %d!\n",
+                              address);
                        status = false;
                        break;
                }
@@ -821,19 +817,18 @@ static bool _rtl88ee_llt_table_init(struct ieee80211_hw *hw)
 static void _rtl88ee_gen_refresh_led_state(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
 
        if (rtlpriv->rtlhal.up_first_time)
                return;
 
        if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
-               rtl88ee_sw_led_on(hw, pLed0);
+               rtl88ee_sw_led_on(hw, pled0);
        else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
-               rtl88ee_sw_led_on(hw, pLed0);
+               rtl88ee_sw_led_on(hw, pled0);
        else
-               rtl88ee_sw_led_off(hw, pLed0);
+               rtl88ee_sw_led_off(hw, pled0);
 }
 
 static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
@@ -1096,7 +1091,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
 
        rtstatus = _rtl88ee_init_mac(hw);
        if (rtstatus != true) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+               pr_info("Init MAC failed\n");
                err = 1;
                goto exit;
        }
@@ -1252,8 +1247,7 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
                         "Set Network type to AP!\n");
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Network type %d not support!\n", type);
+               pr_err("Network type %d not support!\n", type);
                return 1;
                break;
        }
@@ -1352,7 +1346,7 @@ void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci)
                rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
                break;
        default:
-               RT_ASSERT(false, "invalid aci: %d !\n", aci);
+               WARN_ONCE(true, "rtl8188ee: invalid aci: %d !\n", aci);
                break;
        }
 }
@@ -1936,14 +1930,13 @@ exit:
 static void _rtl88ee_hal_customized_behavior(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
-       pcipriv->ledctl.led_opendrain = true;
+       rtlpriv->ledctl.led_opendrain = true;
 
        switch (rtlhal->oem_id) {
        case RT_CID_819X_HP:
-               pcipriv->ledctl.led_opendrain = true;
+               rtlpriv->ledctl.led_opendrain = true;
                break;
        case RT_CID_819X_LENOVO:
        case RT_CID_DEFAULT:
@@ -1987,7 +1980,7 @@ void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw)
                rtlefuse->autoload_failflag = false;
                _rtl88ee_read_adapter_info(hw);
        } else {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+               pr_err("Autoload ERR!!\n");
        }
        _rtl88ee_hal_customized_behavior(hw);
 }
@@ -2354,8 +2347,8 @@ void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index,
                        enc_algo = CAM_AES;
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "switch case %#x not processed\n", enc_algo);
+                       pr_err("switch case %#x not processed\n",
+                              enc_algo);
                        enc_algo = CAM_TKIP;
                        break;
                }
@@ -2373,9 +2366,7 @@ void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index,
                                        entry_id =
                                          rtl_cam_get_free_entry(hw, p_macaddr);
                                        if (entry_id >=  TOTAL_CAM_ENTRY) {
-                                               RT_TRACE(rtlpriv, COMP_SEC,
-                                                        DBG_EMERG,
-                                                        "Can not find free hw security cam entry\n");
+                                               pr_err("Can not find free hw security cam entry\n");
                                                return;
                                        }
                                } else {
index 6ea7fd7bb527c511a88ae0ec280dc92ae689f423..df3e214460db7e7fed02f8cf372addef1817ba4d 100644 (file)
@@ -67,7 +67,6 @@ void rtl88ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
 void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        u8 ledcfg;
 
        RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
@@ -79,7 +78,7 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
        case LED_PIN_LED0:
                ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
                ledcfg &= 0xf0;
-               if (pcipriv->ledctl.led_opendrain) {
+               if (rtlpriv->ledctl.led_opendrain) {
                        rtl_write_byte(rtlpriv, REG_LEDCFG2,
                                       (ledcfg | BIT(3) | BIT(5) | BIT(6)));
                        ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
@@ -104,24 +103,26 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 
 void rtl88ee_init_sw_leds(struct ieee80211_hw *hw)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-       _rtl88ee_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0);
-       _rtl88ee_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       _rtl88ee_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+       _rtl88ee_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
 }
 
 static void _rtl88ee_sw_led_control(struct ieee80211_hw *hw,
                                    enum led_ctl_mode ledaction)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-       struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
+
        switch (ledaction) {
        case LED_CTL_POWER_ON:
        case LED_CTL_LINK:
        case LED_CTL_NO_LINK:
-               rtl88ee_sw_led_on(hw, pLed0);
+               rtl88ee_sw_led_on(hw, pled0);
                break;
        case LED_CTL_POWER_OFF:
-               rtl88ee_sw_led_off(hw, pLed0);
+               rtl88ee_sw_led_off(hw, pled0);
                break;
        default:
                break;
index fffaa92eda812fc91f03cfb2b429e11efee4006d..14a2560626141bfaf05d8fd3b06ac30899dad8f4 100644 (file)
@@ -176,7 +176,7 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw,
        offset &= 0xff;
        newoffset = offset;
        if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
+               pr_err("return all one\n");
                return 0xFFFFFFFF;
        }
        tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
@@ -220,7 +220,7 @@ static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw,
        struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
 
        if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
+               pr_err("stop\n");
                return;
        }
        offset &= 0xff;
@@ -373,7 +373,7 @@ static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw)
 
        rtstatus = phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG);
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
+               pr_err("Write BB Reg Fail!!\n");
                return false;
        }
 
@@ -383,13 +383,13 @@ static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw)
                  phy_config_bb_with_pghdr(hw, BASEBAND_CONFIG_PHY_REG);
        }
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
+               pr_err("BB_PG Reg Fail!!\n");
                return false;
        }
        rtstatus =
          phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB);
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+               pr_err("AGC Table Fail\n");
                return false;
        }
        rtlphy->cck_high_power =
@@ -1095,8 +1095,7 @@ void rtl88e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
                                                      (u8 *)&iotype);
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Unknown Scan Backup operation.\n");
+                       pr_err("Unknown Scan Backup operation.\n");
                        break;
                }
        }
@@ -1137,8 +1136,8 @@ void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
                rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
        }
 
@@ -1162,8 +1161,8 @@ void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
                               HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
        }
        rtl88e_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
@@ -1231,8 +1230,8 @@ u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw)
                return 0;
        if (rtlphy->set_bwmode_inprogress)
                return 0;
-       RT_ASSERT((rtlphy->current_channel <= 14),
-                 "WIRELESS_MODE_G but channel>14");
+       WARN_ONCE((rtlphy->current_channel > 14),
+                 "rtl8188ee: WIRELESS_MODE_G but channel>14");
        rtlphy->sw_chnl_inprogress = true;
        rtlphy->sw_chnl_stage = 0;
        rtlphy->sw_chnl_step = 0;
@@ -1280,8 +1279,8 @@ static bool _rtl88e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
 
        rfdependcmdcnt = 0;
 
-       RT_ASSERT((channel >= 1 && channel <= 14),
-                 "illegal channel for Zebra: %d\n", channel);
+       WARN_ONCE((channel < 1 || channel > 14),
+                 "rtl8188ee: illegal channel for Zebra: %d\n", channel);
 
        _rtl88e_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
                                         MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
@@ -1303,8 +1302,8 @@ static bool _rtl88e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
                        currentcmd = &postcommoncmd[*step];
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Invalid 'stage' = %d, Check it!\n", *stage);
+                       pr_err("Invalid 'stage' = %d, Check it!\n",
+                              *stage);
                        return true;
                }
 
@@ -1367,7 +1366,7 @@ static bool _rtl88e_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
        struct swchnlcmd *pcmd;
 
        if (cmdtable == NULL) {
-               RT_ASSERT(false, "cmdtable cannot be NULL.\n");
+               WARN_ONCE(true, "rtl8188ee: cmdtable cannot be NULL.\n");
                return false;
        }
 
index 26ac4c2903c7373e299be867ce867f9d842d17d8..30798b12a363cb40a6c6f92b5b3256d38413b50f 100644 (file)
@@ -51,8 +51,7 @@ void rtl88e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
                              rtlphy->rfreg_chnlval[0]);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", bandwidth);
+               pr_err("unknown bandwidth: %#X\n", bandwidth);
                break;
        }
 }
index f361808def47af36272213111413748546783b76..7661cfa5303209dda82d67b29bca3d9d046427cc 100644 (file)
@@ -131,8 +131,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
        rtlpci->irq_mask[1] = (u32) (IMR_RXFOVW | 0);
        rtlpci->sys_irq_mask = (u32) (HSIMR_PDN_INT_EN | HSIMR_RON_INT_EN);
 
-       /* for debug level */
-       rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
        /* for LPS & IPS */
        rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -165,8 +163,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
        /* for firmware buf */
        rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
        if (!rtlpriv->rtlhal.pfirmware) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't alloc buffer for fw.\n");
+               pr_info("Can't alloc buffer for fw.\n");
                return 1;
        }
 
@@ -177,8 +174,7 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
                                      rtlpriv->io.dev, GFP_KERNEL, hw,
                                      rtl_fw_cb);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Failed to request firmware!\n");
+               pr_info("Failed to request firmware!\n");
                return 1;
        }
 
@@ -278,7 +274,8 @@ static struct rtl_mod_params rtl88ee_mod_params = {
        .swctrl_lps = false,
        .fwctrl_lps = false,
        .msi_support = true,
-       .debug = DBG_EMERG,
+       .debug_level = 0,
+       .debug_mask = 0,
 };
 
 static const struct rtl_hal_cfg rtl88ee_hal_cfg = {
@@ -394,7 +391,8 @@ MODULE_DESCRIPTION("Realtek 8188E 802.11n PCI wireless");
 MODULE_FIRMWARE("rtlwifi/rtl8188efw.bin");
 
 module_param_named(swenc, rtl88ee_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl88ee_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl88ee_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl88ee_mod_params.debug_mask, ullong, 0644);
 module_param_named(ips, rtl88ee_mod_params.inactiveps, bool, 0444);
 module_param_named(swlps, rtl88ee_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl88ee_mod_params.fwctrl_lps, bool, 0444);
@@ -406,7 +404,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
 MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
 MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
 MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
 MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
index 3e3b88664883cb0b88b009dc04bc6bc6244d32e3..09c908d4cf91f710e7f6c4c22c04d52baed30d5f 100644 (file)
@@ -760,7 +760,7 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
                        SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val);
                        break;
                default:
-                       RT_ASSERT(false, "ERR txdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8188ee: ERR txdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
@@ -779,7 +779,7 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
                        SET_RX_DESC_EOR(pdesc, 1);
                        break;
                default:
-                       RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8188ee: ERR rxdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
@@ -799,7 +799,7 @@ u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name)
                        ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
                        break;
                default:
-                       RT_ASSERT(false, "ERR txdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8188ee: ERR txdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
@@ -815,7 +815,7 @@ u32 rtl88ee_get_desc(u8 *pdesc, bool istx, u8 desc_name)
                        ret = GET_RX_DESC_BUFF_ADDR(pdesc);
                        break;
                default:
-                       RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8188ee: ERR rxdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
index bdc132bef822b4b5a7e65b25663d18b65ca00615..0b5a06ffa482660cba47c3e9b32bcdd15e47da7c 100644 (file)
@@ -638,7 +638,6 @@ EXPORT_SYMBOL(rtl92c_dm_init_edca_turbo);
 static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 
        static u64 last_txok_cnt;
@@ -651,20 +650,20 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
        u32 edca_be_dl = 0x5ea42b;
        bool bt_change_edca = false;
 
-       if ((last_bt_edca_ul != rtlpcipriv->bt_coexist.bt_edca_ul) ||
-           (last_bt_edca_dl != rtlpcipriv->bt_coexist.bt_edca_dl)) {
+       if ((last_bt_edca_ul != rtlpriv->btcoexist.bt_edca_ul) ||
+           (last_bt_edca_dl != rtlpriv->btcoexist.bt_edca_dl)) {
                rtlpriv->dm.current_turbo_edca = false;
-               last_bt_edca_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
-               last_bt_edca_dl = rtlpcipriv->bt_coexist.bt_edca_dl;
+               last_bt_edca_ul = rtlpriv->btcoexist.bt_edca_ul;
+               last_bt_edca_dl = rtlpriv->btcoexist.bt_edca_dl;
        }
 
-       if (rtlpcipriv->bt_coexist.bt_edca_ul != 0) {
-               edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_ul;
+       if (rtlpriv->btcoexist.bt_edca_ul != 0) {
+               edca_be_ul = rtlpriv->btcoexist.bt_edca_ul;
                bt_change_edca = true;
        }
 
-       if (rtlpcipriv->bt_coexist.bt_edca_dl != 0) {
-               edca_be_ul = rtlpcipriv->bt_coexist.bt_edca_dl;
+       if (rtlpriv->btcoexist.bt_edca_dl != 0) {
+               edca_be_ul = rtlpriv->btcoexist.bt_edca_dl;
                bt_change_edca = true;
        }
 
@@ -673,7 +672,7 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
                return;
        }
 
-       if ((!mac->ht_enable) && (!rtlpcipriv->bt_coexist.bt_coexistence)) {
+       if ((!mac->ht_enable) && (!rtlpriv->btcoexist.bt_coexistence)) {
                if (!(edca_be_ul & 0xffff0000))
                        edca_be_ul |= 0x005e0000;
 
@@ -1471,7 +1470,6 @@ EXPORT_SYMBOL(rtl92c_dm_watchdog);
 u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
        long undec_sm_pwdb;
        u8 curr_bt_rssi_state = 0x00;
 
@@ -1510,8 +1508,8 @@ u8 rtl92c_bt_rssi_state_change(struct ieee80211_hw *hw)
        else
                curr_bt_rssi_state &= (~BT_RSSI_STATE_BG_EDCA_LOW);
 
-       if (curr_bt_rssi_state != rtlpcipriv->bt_coexist.bt_rssi_state) {
-               rtlpcipriv->bt_coexist.bt_rssi_state = curr_bt_rssi_state;
+       if (curr_bt_rssi_state != rtlpriv->btcoexist.bt_rssi_state) {
+               rtlpriv->btcoexist.bt_rssi_state = curr_bt_rssi_state;
                return true;
        } else {
                return false;
@@ -1522,7 +1520,6 @@ EXPORT_SYMBOL(rtl92c_bt_rssi_state_change);
 static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
 
        u32 polling, ratio_tx, ratio_pri;
        u32 bt_tx, bt_pri;
@@ -1542,14 +1539,14 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
                return false;
 
        bt_state &= BIT_OFFSET_LEN_MASK_32(0, 1);
-       if (bt_state != rtlpcipriv->bt_coexist.bt_cur_state) {
-               rtlpcipriv->bt_coexist.bt_cur_state = bt_state;
+       if (bt_state != rtlpriv->btcoexist.bt_cur_state) {
+               rtlpriv->btcoexist.bt_cur_state = bt_state;
 
-               if (rtlpcipriv->bt_coexist.reg_bt_sco == 3) {
-                       rtlpcipriv->bt_coexist.bt_service = BT_IDLE;
+               if (rtlpriv->btcoexist.reg_bt_sco == 3) {
+                       rtlpriv->btcoexist.bt_service = BT_IDLE;
 
                        bt_state = bt_state |
-                         ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
+                         ((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
                          0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
                          BIT_OFFSET_LEN_MASK_32(2, 1);
                        rtl_write_byte(rtlpriv, 0x4fd, bt_state);
@@ -1559,10 +1556,10 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
 
        ratio_tx = bt_tx * 1000 / polling;
        ratio_pri = bt_pri * 1000 / polling;
-       rtlpcipriv->bt_coexist.ratio_tx = ratio_tx;
-       rtlpcipriv->bt_coexist.ratio_pri = ratio_pri;
+       rtlpriv->btcoexist.ratio_tx = ratio_tx;
+       rtlpriv->btcoexist.ratio_pri = ratio_pri;
 
-       if (bt_state && rtlpcipriv->bt_coexist.reg_bt_sco == 3) {
+       if (bt_state && rtlpriv->btcoexist.reg_bt_sco == 3) {
 
                if ((ratio_tx < 30)  && (ratio_pri < 30))
                        cur_service_type = BT_IDLE;
@@ -1577,17 +1574,17 @@ static bool rtl92c_bt_state_change(struct ieee80211_hw *hw)
                else
                        cur_service_type = BT_OTHER_ACTION;
 
-               if (cur_service_type != rtlpcipriv->bt_coexist.bt_service) {
-                       rtlpcipriv->bt_coexist.bt_service = cur_service_type;
+               if (cur_service_type != rtlpriv->btcoexist.bt_service) {
+                       rtlpriv->btcoexist.bt_service = cur_service_type;
                        bt_state = bt_state |
-                          ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
+                          ((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
                           0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
-                          ((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) ?
+                          ((rtlpriv->btcoexist.bt_service != BT_IDLE) ?
                           0 : BIT_OFFSET_LEN_MASK_32(2, 1));
 
                        /* Add interrupt migration when bt is not ini
                         * idle state (no traffic). */
-                       if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
+                       if (rtlpriv->btcoexist.bt_service != BT_IDLE) {
                                rtl_write_word(rtlpriv, 0x504, 0x0ccc);
                                rtl_write_byte(rtlpriv, 0x506, 0x54);
                                rtl_write_byte(rtlpriv, 0x507, 0x54);
@@ -1626,80 +1623,77 @@ static bool rtl92c_bt_wifi_connect_change(struct ieee80211_hw *hw)
 static void rtl92c_bt_set_normal(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
-
-
-       if (rtlpcipriv->bt_coexist.bt_service == BT_OTHERBUSY) {
-               rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72b;
-               rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72b;
-       } else if (rtlpcipriv->bt_coexist.bt_service == BT_BUSY) {
-               rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82f;
-               rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82f;
-       } else if (rtlpcipriv->bt_coexist.bt_service == BT_SCO) {
-               if (rtlpcipriv->bt_coexist.ratio_tx > 160) {
-                       rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea72f;
-                       rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea72f;
+
+       if (rtlpriv->btcoexist.bt_service == BT_OTHERBUSY) {
+               rtlpriv->btcoexist.bt_edca_ul = 0x5ea72b;
+               rtlpriv->btcoexist.bt_edca_dl = 0x5ea72b;
+       } else if (rtlpriv->btcoexist.bt_service == BT_BUSY) {
+               rtlpriv->btcoexist.bt_edca_ul = 0x5eb82f;
+               rtlpriv->btcoexist.bt_edca_dl = 0x5eb82f;
+       } else if (rtlpriv->btcoexist.bt_service == BT_SCO) {
+               if (rtlpriv->btcoexist.ratio_tx > 160) {
+                       rtlpriv->btcoexist.bt_edca_ul = 0x5ea72f;
+                       rtlpriv->btcoexist.bt_edca_dl = 0x5ea72f;
                } else {
-                       rtlpcipriv->bt_coexist.bt_edca_ul = 0x5ea32b;
-                       rtlpcipriv->bt_coexist.bt_edca_dl = 0x5ea42b;
+                       rtlpriv->btcoexist.bt_edca_ul = 0x5ea32b;
+                       rtlpriv->btcoexist.bt_edca_dl = 0x5ea42b;
                }
        } else {
-               rtlpcipriv->bt_coexist.bt_edca_ul = 0;
-               rtlpcipriv->bt_coexist.bt_edca_dl = 0;
+               rtlpriv->btcoexist.bt_edca_ul = 0;
+               rtlpriv->btcoexist.bt_edca_dl = 0;
        }
 
-       if ((rtlpcipriv->bt_coexist.bt_service != BT_IDLE) &&
-            (rtlpriv->mac80211.mode == WIRELESS_MODE_G ||
+       if ((rtlpriv->btcoexist.bt_service != BT_IDLE) &&
+           (rtlpriv->mac80211.mode == WIRELESS_MODE_G ||
             (rtlpriv->mac80211.mode == (WIRELESS_MODE_G | WIRELESS_MODE_B))) &&
-            (rtlpcipriv->bt_coexist.bt_rssi_state &
+           (rtlpriv->btcoexist.bt_rssi_state &
             BT_RSSI_STATE_BG_EDCA_LOW)) {
-               rtlpcipriv->bt_coexist.bt_edca_ul = 0x5eb82b;
-               rtlpcipriv->bt_coexist.bt_edca_dl = 0x5eb82b;
+               rtlpriv->btcoexist.bt_edca_ul = 0x5eb82b;
+               rtlpriv->btcoexist.bt_edca_dl = 0x5eb82b;
        }
 }
 
 static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
 
 
        /* Only enable HW BT coexist when BT in "Busy" state. */
        if (rtlpriv->mac80211.vendor == PEER_CISCO &&
-           rtlpcipriv->bt_coexist.bt_service == BT_OTHER_ACTION) {
+           rtlpriv->btcoexist.bt_service == BT_OTHER_ACTION) {
                rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
        } else {
-               if ((rtlpcipriv->bt_coexist.bt_service == BT_BUSY) &&
-                   (rtlpcipriv->bt_coexist.bt_rssi_state &
+               if ((rtlpriv->btcoexist.bt_service == BT_BUSY) &&
+                   (rtlpriv->btcoexist.bt_rssi_state &
                     BT_RSSI_STATE_NORMAL_POWER)) {
                        rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
-               } else if ((rtlpcipriv->bt_coexist.bt_service ==
+               } else if ((rtlpriv->btcoexist.bt_service ==
                            BT_OTHER_ACTION) && (rtlpriv->mac80211.mode <
                            WIRELESS_MODE_N_24G) &&
-                           (rtlpcipriv->bt_coexist.bt_rssi_state &
+                           (rtlpriv->btcoexist.bt_rssi_state &
                            BT_RSSI_STATE_SPECIAL_LOW)) {
                        rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
-               } else if (rtlpcipriv->bt_coexist.bt_service == BT_PAN) {
+               } else if (rtlpriv->btcoexist.bt_service == BT_PAN) {
                        rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
                } else {
                        rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
                }
        }
 
-       if (rtlpcipriv->bt_coexist.bt_service == BT_PAN)
+       if (rtlpriv->btcoexist.bt_service == BT_PAN)
                rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x10100);
        else
                rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x0);
 
-       if (rtlpcipriv->bt_coexist.bt_rssi_state &
+       if (rtlpriv->btcoexist.bt_rssi_state &
            BT_RSSI_STATE_NORMAL_POWER) {
                rtl92c_bt_set_normal(hw);
        } else {
-               rtlpcipriv->bt_coexist.bt_edca_ul = 0;
-               rtlpcipriv->bt_coexist.bt_edca_dl = 0;
+               rtlpriv->btcoexist.bt_edca_ul = 0;
+               rtlpriv->btcoexist.bt_edca_dl = 0;
        }
 
-       if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
+       if (rtlpriv->btcoexist.bt_service != BT_IDLE) {
                rtlpriv->cfg->ops->set_rfreg(hw,
                                 RF90_PATH_A,
                                 0x1e,
@@ -1707,12 +1701,12 @@ static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte)
        } else {
                rtlpriv->cfg->ops->set_rfreg(hw,
                     RF90_PATH_A, 0x1e, 0xf0,
-                    rtlpcipriv->bt_coexist.bt_rfreg_origin_1e);
+                    rtlpriv->btcoexist.bt_rfreg_origin_1e);
        }
 
        if (!rtlpriv->dm.dynamic_txpower_enable) {
-               if (rtlpcipriv->bt_coexist.bt_service != BT_IDLE) {
-                       if (rtlpcipriv->bt_coexist.bt_rssi_state &
+               if (rtlpriv->btcoexist.bt_service != BT_IDLE) {
+                       if (rtlpriv->btcoexist.bt_rssi_state &
                                BT_RSSI_STATE_TXPOWER_LOW) {
                                rtlpriv->dm.dynamic_txhighpower_lvl =
                                                        TXHIGHPWRLEVEL_BT2;
@@ -1732,37 +1726,34 @@ static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte)
 static void rtl92c_check_bt_change(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        u8 tmp1byte = 0;
 
        if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version) &&
-           rtlpcipriv->bt_coexist.bt_coexistence)
+           rtlpriv->btcoexist.bt_coexistence)
                tmp1byte |= BIT(5);
-       if (rtlpcipriv->bt_coexist.bt_cur_state) {
-               if (rtlpcipriv->bt_coexist.bt_ant_isolation)
+       if (rtlpriv->btcoexist.bt_cur_state) {
+               if (rtlpriv->btcoexist.bt_ant_isolation)
                        rtl92c_bt_ant_isolation(hw, tmp1byte);
        } else {
                rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, tmp1byte);
                rtlpriv->cfg->ops->set_rfreg(hw, RF90_PATH_A, 0x1e, 0xf0,
-                               rtlpcipriv->bt_coexist.bt_rfreg_origin_1e);
+                               rtlpriv->btcoexist.bt_rfreg_origin_1e);
 
-               rtlpcipriv->bt_coexist.bt_edca_ul = 0;
-               rtlpcipriv->bt_coexist.bt_edca_dl = 0;
+               rtlpriv->btcoexist.bt_edca_ul = 0;
+               rtlpriv->btcoexist.bt_edca_dl = 0;
        }
 }
 
 void rtl92c_dm_bt_coexist(struct ieee80211_hw *hw)
 {
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
-
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
        bool wifi_connect_change;
        bool bt_state_change;
        bool rssi_state_change;
 
-       if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
-            (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) {
-
+       if ((rtlpriv->btcoexist.bt_coexistence) &&
+           (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4)) {
                wifi_connect_change = rtl92c_bt_wifi_connect_change(hw);
                bt_state_change = rtl92c_bt_state_change(hw);
                rssi_state_change = rtl92c_bt_rssi_state_change(hw);
index 7d152466152b8b799ac97bd7e4fb697c7268a19a..c7a77467b20e3401023f09c63334043179c932a1 100644 (file)
@@ -27,6 +27,7 @@
 #include "../pci.h"
 #include "../base.h"
 #include "../core.h"
+#include "../efuse.h"
 #include "../rtl8192ce/reg.h"
 #include "../rtl8192ce/def.h"
 #include "fw_common.h"
@@ -68,63 +69,6 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
        }
 }
 
-static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
-                                  const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 blocksize = sizeof(u32);
-       u8 *bufferptr = (u8 *)buffer;
-       u32 *pu4byteptr = (u32 *)buffer;
-       u32 i, offset, blockcount, remainsize;
-
-       blockcount = size / blocksize;
-       remainsize = size % blocksize;
-
-       for (i = 0; i < blockcount; i++) {
-               offset = i * blocksize;
-               rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
-                               *(pu4byteptr + i));
-       }
-
-       if (remainsize) {
-               offset = blockcount * blocksize;
-               bufferptr += offset;
-               for (i = 0; i < remainsize; i++) {
-                       rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
-                                                offset + i), *(bufferptr + i));
-               }
-       }
-}
-
-static void _rtl92c_fw_page_write(struct ieee80211_hw *hw,
-                                 u32 page, const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 value8;
-       u8 u8page = (u8) (page & 0x07);
-
-       value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
-
-       rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
-       _rtl92c_fw_block_write(hw, buffer, size);
-}
-
-static void _rtl92c_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
-       u32 fwlen = *pfwlen;
-       u8 remain = (u8) (fwlen % 4);
-
-       remain = (remain == 0) ? 0 : (4 - remain);
-
-       while (remain > 0) {
-               pfwbuf[fwlen] = 0;
-               fwlen++;
-               remain--;
-       }
-
-       *pfwlen = fwlen;
-}
-
 static void _rtl92c_write_fw(struct ieee80211_hw *hw,
                             enum version_8192c version, u8 *buffer, u32 size)
 {
@@ -140,30 +84,28 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
                u32 page, offset;
 
                if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE)
-                       _rtl92c_fill_dummy(bufferptr, &size);
+                       rtl_fill_dummy(bufferptr, &size);
 
                pageNums = size / FW_8192C_PAGE_SIZE;
                remainsize = size % FW_8192C_PAGE_SIZE;
 
-               if (pageNums > 4) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Page numbers should not greater then 4\n");
-               }
+               if (pageNums > 4)
+                       pr_err("Page numbers should not greater then 4\n");
 
                for (page = 0; page < pageNums; page++) {
                        offset = page * FW_8192C_PAGE_SIZE;
-                       _rtl92c_fw_page_write(hw, page, (bufferptr + offset),
-                                             FW_8192C_PAGE_SIZE);
+                       rtl_fw_page_write(hw, page, (bufferptr + offset),
+                                         FW_8192C_PAGE_SIZE);
                }
 
                if (remainsize) {
                        offset = pageNums * FW_8192C_PAGE_SIZE;
                        page = pageNums;
-                       _rtl92c_fw_page_write(hw, page, (bufferptr + offset),
-                                             remainsize);
+                       rtl_fw_page_write(hw, page, (bufferptr + offset),
+                                         remainsize);
                }
        } else {
-               _rtl92c_fw_block_write(hw, buffer, size);
+               rtl_fw_block_write(hw, buffer, size);
        }
 }
 
@@ -180,15 +122,10 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
                 (!(value32 & FWDL_ChkSum_rpt)));
 
        if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "chksum report faill ! REG_MCUFWDL:0x%08x .\n",
-                         value32);
+               pr_err("chksum report fail! REG_MCUFWDL:0x%08x .\n",
+                      value32);
                goto exit;
        }
-
-       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
-
        value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
        value32 |= MCUFWDL_RDY;
        value32 &= ~WINTINI_RDY;
@@ -198,20 +135,15 @@ static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
 
        do {
                value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
-               if (value32 & WINTINI_RDY) {
-                       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                                "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
-                                       value32);
-                       err = 0;
-                       goto exit;
-               }
+               if (value32 & WINTINI_RDY)
+                       return 0;
 
                mdelay(FW_8192C_POLLING_DELAY);
 
        } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
 
-       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32);
+       pr_err("Polling FW ready fail! REG_MCUFWDL:0x%08x.\n",
+              value32);
 
 exit:
        return err;
@@ -250,13 +182,8 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
        _rtl92c_enable_fw_download(hw, false);
 
        err = _rtl92c_fw_free_to_go(hw);
-       if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Firmware is not ready to run!\n");
-       } else {
-               RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                        "Firmware is ready to run!\n");
-       }
+       if (err)
+               pr_err("Firmware is not ready to run!\n");
 
        return 0;
 }
@@ -327,8 +254,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
        while (!bwrite_sucess) {
                wait_writeh2c_limmit--;
                if (wait_writeh2c_limmit == 0) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Write H2C fail because no trigger for FW INT!\n");
+                       pr_err("Write H2C fail because no trigger for FW INT!\n");
                        break;
                }
 
@@ -485,8 +411,8 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
        u32 tmp_cmdbuf[2];
 
        if (!rtlhal->fw_ready) {
-               RT_ASSERT(false,
-                         "return H2C cmd because of Fw download fail!!!\n");
+               WARN_ONCE(true,
+                         "rtl8192c-common: return H2C cmd because of Fw download fail!!!\n");
                return;
        }
 
@@ -510,7 +436,7 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw)
        while (u1b_tmp & BIT(2)) {
                delay--;
                if (delay == 0) {
-                       RT_ASSERT(false, "8051 reset fail.\n");
+                       WARN_ONCE(true, "rtl8192c-common: 8051 reset fail.\n");
                        break;
                }
                udelay(50);
index 94dd25cf1ca86a6a3fe6a4ff55efb5b8fc9074a5..7c6e5d91439d0816bfab371bacbaf7542cb2b7c6 100644 (file)
@@ -77,7 +77,7 @@ EXPORT_SYMBOL(rtl92c_phy_set_bb_reg);
 u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
                                  enum radio_path rfpath, u32 offset)
 {
-       RT_ASSERT(false, "deprecated!\n");
+       WARN_ONCE(true, "rtl8192c-common: _rtl92c_phy_fw_rf_serial_read deprecated!\n");
        return 0;
 }
 EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_read);
@@ -86,7 +86,7 @@ void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
                                    enum radio_path rfpath, u32 offset,
                                    u32 data)
 {
-       RT_ASSERT(false, "deprecated!\n");
+       WARN_ONCE(true, "rtl8192c-common: _rtl92c_phy_fw_rf_serial_write deprecated!\n");
 }
 EXPORT_SYMBOL(_rtl92c_phy_fw_rf_serial_write);
 
@@ -104,7 +104,7 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
        offset &= 0x3f;
        newoffset = offset;
        if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
+               pr_err("return all one\n");
                return 0xFFFFFFFF;
        }
        tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
@@ -152,7 +152,7 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
        struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
 
        if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
+               pr_err("stop\n");
                return;
        }
        offset &= 0x3f;
@@ -209,7 +209,7 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
        rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
                                                 BASEBAND_CONFIG_PHY_REG);
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
+               pr_err("Write BB Reg Fail!!\n");
                return false;
        }
        if (rtlphy->rf_type == RF_1T2R) {
@@ -222,13 +222,13 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
                                                   BASEBAND_CONFIG_PHY_REG);
        }
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
+               pr_err("BB_PG Reg Fail!!\n");
                return false;
        }
        rtstatus = rtlpriv->cfg->ops->config_bb_with_headerfile(hw,
                                                 BASEBAND_CONFIG_AGC_TAB);
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+               pr_err("AGC Table Fail\n");
                return false;
        }
        rtlphy->cck_high_power =
@@ -745,8 +745,8 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
                return 0;
        if (rtlphy->set_bwmode_inprogress)
                return 0;
-       RT_ASSERT((rtlphy->current_channel <= 14),
-                 "WIRELESS_MODE_G but channel>14");
+       WARN_ONCE((rtlphy->current_channel > 14),
+                 "rtl8192c-common: WIRELESS_MODE_G but channel>14");
        rtlphy->sw_chnl_inprogress = true;
        rtlphy->sw_chnl_stage = 0;
        rtlphy->sw_chnl_step = 0;
@@ -792,7 +792,7 @@ static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
        struct swchnlcmd *pcmd;
 
        if (cmdtable == NULL) {
-               RT_ASSERT(false, "cmdtable cannot be NULL.\n");
+               WARN_ONCE(true, "rtl8192c-common: cmdtable cannot be NULL.\n");
                return false;
        }
 
@@ -837,8 +837,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
 
        rfdependcmdcnt = 0;
 
-       RT_ASSERT((channel >= 1 && channel <= 14),
-                 "illegal channel for Zebra: %d\n", channel);
+       WARN_ONCE((channel < 1 || channel > 14),
+                 "rtl8192c-common: illegal channel for Zebra: %d\n", channel);
 
        _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
                                         MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
@@ -860,8 +860,8 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
                        currentcmd = &postcommoncmd[*step];
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Invalid 'stage' = %d, Check it!\n", *stage);
+                       pr_err("Invalid 'stage' = %d, Check it!\n",
+                              *stage);
                        return true;
                }
 
index 4483d40ecad1b7adec14077caec71fbeb2f83545..9956026bae0ad098308d873a754904b8ce9023a7 100644 (file)
@@ -140,8 +140,7 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
        case HAL_DEF_WOWLAN:
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", variable);
+               pr_err("switch case %#x not processed\n", variable);
                break;
        }
 }
@@ -149,7 +148,6 @@ void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
 void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -277,8 +275,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                        u8 *p_regtoset = NULL;
                        u8 index = 0;
 
-                       if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
-                           (rtlpcipriv->bt_coexist.bt_coexist_type ==
+                       if ((rtlpriv->btcoexist.bt_coexistence) &&
+                           (rtlpriv->btcoexist.bt_coexist_type ==
                            BT_CSR_BC4))
                                p_regtoset = regtoset_bt;
                        else
@@ -364,9 +362,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                        acm_ctrl &= (~AcmHw_VoqEn);
                                        break;
                                default:
-                                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                                "switch case %#x not processed\n",
-                                                e_aci);
+                                       pr_err("switch case %#x not processed\n",
+                                              e_aci);
                                        break;
                                }
                        }
@@ -551,8 +548,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2, array);
                break; }
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %d not processed\n", variable);
+               pr_err("switch case %d not processed\n", variable);
                break;
        }
 }
@@ -573,9 +569,8 @@ static bool _rtl92ce_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
                        break;
 
                if (count > POLLING_LLT_THRESHOLD) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Failed to polling write LLT done at address %d!\n",
-                                address);
+                       pr_err("Failed to polling write LLT done at address %d!\n",
+                              address);
                        status = false;
                        break;
                }
@@ -659,26 +654,25 @@ static bool _rtl92ce_llt_table_init(struct ieee80211_hw *hw)
 
 static void _rtl92ce_gen_refresh_led_state(struct ieee80211_hw *hw)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
 
        if (rtlpci->up_first_time)
                return;
 
        if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
-               rtl92ce_sw_led_on(hw, pLed0);
+               rtl92ce_sw_led_on(hw, pled0);
        else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
-               rtl92ce_sw_led_on(hw, pLed0);
+               rtl92ce_sw_led_on(hw, pled0);
        else
-               rtl92ce_sw_led_off(hw, pLed0);
+               rtl92ce_sw_led_off(hw, pled0);
 }
 
 static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
@@ -687,7 +681,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
        u16 retry;
 
        rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
-       if (rtlpcipriv->bt_coexist.bt_coexistence) {
+       if (rtlpriv->btcoexist.bt_coexistence) {
                u32 value32;
                value32 = rtl_read_dword(rtlpriv, REG_APS_FSMCO);
                value32 |= (SOP_ABG | SOP_AMB | XOP_BTCK);
@@ -696,7 +690,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
        rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
        rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, 0x0F);
 
-       if (rtlpcipriv->bt_coexist.bt_coexistence) {
+       if (rtlpriv->btcoexist.bt_coexistence) {
                u32 u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL);
 
                u4b_tmp &= (~0x00024800);
@@ -730,7 +724,7 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
        rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL + 1, 0x82);
        udelay(2);
 
-       if (rtlpcipriv->bt_coexist.bt_coexistence) {
+       if (rtlpriv->btcoexist.bt_coexistence) {
                bytetmp = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL+2) & 0xfd;
                rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL+2, bytetmp);
        }
@@ -802,7 +796,6 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
 {
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
        u8 reg_bw_opmode;
        u32 reg_prsr;
 
@@ -832,8 +825,8 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
        rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000);
        rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504);
 
-       if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
-           (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4))
+       if ((rtlpriv->btcoexist.bt_coexistence) &&
+           (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4))
                rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x97427431);
        else
                rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0xb972a841);
@@ -852,8 +845,8 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
        rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
        rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
 
-       if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
-           (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4)) {
+       if ((rtlpriv->btcoexist.bt_coexistence) &&
+           (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4)) {
                rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
                rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0402);
        } else {
@@ -861,8 +854,8 @@ static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
                rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
        }
 
-       if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
-            (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4))
+       if ((rtlpriv->btcoexist.bt_coexistence) &&
+           (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4))
                rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03086666);
        else
                rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x086666);
@@ -963,7 +956,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
        rtlpriv->intf_ops->disable_aspm(hw);
        rtstatus = _rtl92ce_init_mac(hw);
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+               pr_err("Init MAC failed\n");
                err = 1;
                goto exit;
        }
@@ -1128,8 +1121,7 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
                break;
        }
 
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
-                "Chip Version ID: %s\n", versionid);
+       pr_info("Chip Version ID: %s\n", versionid);
 
        switch (version & 0x3) {
        case CHIP_88C:
@@ -1143,8 +1135,7 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
                break;
        default:
                rtlphy->rf_type = RF_1T1R;
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "ERROR RF_Type is set!!\n");
+               pr_err("ERROR RF_Type is set!!\n");
                break;
        }
 
@@ -1193,8 +1184,7 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
                         "Set Network type to Mesh Point!\n");
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Network type %d not supported!\n", type);
+               pr_err("Network type %d not supported!\n", type);
                return 1;
 
        }
@@ -1292,7 +1282,7 @@ void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci)
                rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
                break;
        default:
-               RT_ASSERT(false, "invalid aci: %d !\n", aci);
+               WARN_ONCE(true, "rtl8192ce: invalid aci: %d !\n", aci);
                break;
        }
 }
@@ -1320,7 +1310,6 @@ void rtl92ce_disable_interrupt(struct ieee80211_hw *hw)
 static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
        u8 u1b_tmp;
        u32 u4b_tmp;
@@ -1338,9 +1327,9 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
        rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
        rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00000000);
        u1b_tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL);
-       if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
-            ((rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) ||
-            (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC8))) {
+       if ((rtlpriv->btcoexist.bt_coexistence) &&
+           ((rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) ||
+            (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC8))) {
                rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00F30000 |
                                (u1b_tmp << 8));
        } else {
@@ -1352,7 +1341,7 @@ static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
        rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
        if (!IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
                rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
-       if (rtlpcipriv->bt_coexist.bt_coexistence) {
+       if (rtlpriv->btcoexist.bt_coexistence) {
                u4b_tmp = rtl_read_dword(rtlpriv, REG_AFE_XTAL_CTRL);
                u4b_tmp |= 0x03824800;
                rtl_write_dword(rtlpriv, REG_AFE_XTAL_CTRL, u4b_tmp);
@@ -1731,12 +1720,11 @@ exit:
 static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
        switch (rtlhal->oem_id) {
        case RT_CID_819X_HP:
-               pcipriv->ledctl.led_opendrain = true;
+               rtlpriv->ledctl.led_opendrain = true;
                break;
        case RT_CID_819X_LENOVO:
        case RT_CID_DEFAULT:
@@ -1780,7 +1768,7 @@ void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
                rtlefuse->autoload_failflag = false;
                _rtl92ce_read_adapter_info(hw);
        } else {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+               pr_err("Autoload ERR!!\n");
        }
        _rtl92ce_hal_customized_behavior(hw);
 }
@@ -1789,7 +1777,6 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
                struct ieee80211_sta *sta)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
        struct rtl_phy *rtlphy = &(rtlpriv->phy);
        struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -1845,12 +1832,12 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
                break;
        }
 
-       if ((rtlpcipriv->bt_coexist.bt_coexistence) &&
-           (rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) &&
-           (rtlpcipriv->bt_coexist.bt_cur_state) &&
-           (rtlpcipriv->bt_coexist.bt_ant_isolation) &&
-           ((rtlpcipriv->bt_coexist.bt_service == BT_SCO) ||
-           (rtlpcipriv->bt_coexist.bt_service == BT_BUSY)))
+       if ((rtlpriv->btcoexist.bt_coexistence) &&
+           (rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) &&
+           (rtlpriv->btcoexist.bt_cur_state) &&
+           (rtlpriv->btcoexist.bt_ant_isolation) &&
+           ((rtlpriv->btcoexist.bt_service == BT_SCO) ||
+           (rtlpriv->btcoexist.bt_service == BT_BUSY)))
                ratr_value &= 0x0fffcfc0;
        else
                ratr_value &= 0x0FFFFFFF;
@@ -2152,8 +2139,8 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
                        enc_algo = CAM_AES;
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "switch case %#x not processed\n", enc_algo);
+                       pr_err("switch case %#x not processed\n",
+                              enc_algo);
                        enc_algo = CAM_TKIP;
                        break;
                }
@@ -2171,9 +2158,7 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
                                        entry_id = rtl_cam_get_free_entry(hw,
                                                                 p_macaddr);
                                        if (entry_id >=  TOTAL_CAM_ENTRY) {
-                                               RT_TRACE(rtlpriv, COMP_SEC,
-                                                        DBG_EMERG,
-                                                        "Can not find free hw security cam entry\n");
+                                               pr_err("Can not find free hw security cam entry\n");
                                                return;
                                        }
                                } else {
@@ -2246,65 +2231,64 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
 
 static void rtl8192ce_bt_var_init(struct ieee80211_hw *hw)
 {
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
-
-       rtlpcipriv->bt_coexist.bt_coexistence =
-                       rtlpcipriv->bt_coexist.eeprom_bt_coexist;
-       rtlpcipriv->bt_coexist.bt_ant_num =
-                       rtlpcipriv->bt_coexist.eeprom_bt_ant_num;
-       rtlpcipriv->bt_coexist.bt_coexist_type =
-                       rtlpcipriv->bt_coexist.eeprom_bt_type;
-
-       if (rtlpcipriv->bt_coexist.reg_bt_iso == 2)
-               rtlpcipriv->bt_coexist.bt_ant_isolation =
-                       rtlpcipriv->bt_coexist.eeprom_bt_ant_isol;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       rtlpriv->btcoexist.bt_coexistence =
+                       rtlpriv->btcoexist.eeprom_bt_coexist;
+       rtlpriv->btcoexist.bt_ant_num =
+                       rtlpriv->btcoexist.eeprom_bt_ant_num;
+       rtlpriv->btcoexist.bt_coexist_type =
+                       rtlpriv->btcoexist.eeprom_bt_type;
+
+       if (rtlpriv->btcoexist.reg_bt_iso == 2)
+               rtlpriv->btcoexist.bt_ant_isolation =
+                       rtlpriv->btcoexist.eeprom_bt_ant_isol;
        else
-               rtlpcipriv->bt_coexist.bt_ant_isolation =
-                       rtlpcipriv->bt_coexist.reg_bt_iso;
-
-       rtlpcipriv->bt_coexist.bt_radio_shared_type =
-                       rtlpcipriv->bt_coexist.eeprom_bt_radio_shared;
-
-       if (rtlpcipriv->bt_coexist.bt_coexistence) {
-
-               if (rtlpcipriv->bt_coexist.reg_bt_sco == 1)
-                       rtlpcipriv->bt_coexist.bt_service = BT_OTHER_ACTION;
-               else if (rtlpcipriv->bt_coexist.reg_bt_sco == 2)
-                       rtlpcipriv->bt_coexist.bt_service = BT_SCO;
-               else if (rtlpcipriv->bt_coexist.reg_bt_sco == 4)
-                       rtlpcipriv->bt_coexist.bt_service = BT_BUSY;
-               else if (rtlpcipriv->bt_coexist.reg_bt_sco == 5)
-                       rtlpcipriv->bt_coexist.bt_service = BT_OTHERBUSY;
+               rtlpriv->btcoexist.bt_ant_isolation =
+                       rtlpriv->btcoexist.reg_bt_iso;
+
+       rtlpriv->btcoexist.bt_radio_shared_type =
+                       rtlpriv->btcoexist.eeprom_bt_radio_shared;
+
+       if (rtlpriv->btcoexist.bt_coexistence) {
+               if (rtlpriv->btcoexist.reg_bt_sco == 1)
+                       rtlpriv->btcoexist.bt_service = BT_OTHER_ACTION;
+               else if (rtlpriv->btcoexist.reg_bt_sco == 2)
+                       rtlpriv->btcoexist.bt_service = BT_SCO;
+               else if (rtlpriv->btcoexist.reg_bt_sco == 4)
+                       rtlpriv->btcoexist.bt_service = BT_BUSY;
+               else if (rtlpriv->btcoexist.reg_bt_sco == 5)
+                       rtlpriv->btcoexist.bt_service = BT_OTHERBUSY;
                else
-                       rtlpcipriv->bt_coexist.bt_service = BT_IDLE;
+                       rtlpriv->btcoexist.bt_service = BT_IDLE;
 
-               rtlpcipriv->bt_coexist.bt_edca_ul = 0;
-               rtlpcipriv->bt_coexist.bt_edca_dl = 0;
-               rtlpcipriv->bt_coexist.bt_rssi_state = 0xff;
+               rtlpriv->btcoexist.bt_edca_ul = 0;
+               rtlpriv->btcoexist.bt_edca_dl = 0;
+               rtlpriv->btcoexist.bt_rssi_state = 0xff;
        }
 }
 
 void rtl8192ce_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
                                              bool auto_load_fail, u8 *hwinfo)
 {
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
        u8 val;
 
        if (!auto_load_fail) {
-               rtlpcipriv->bt_coexist.eeprom_bt_coexist =
+               rtlpriv->btcoexist.eeprom_bt_coexist =
                                        ((hwinfo[RF_OPTION1] & 0xe0) >> 5);
                val = hwinfo[RF_OPTION4];
-               rtlpcipriv->bt_coexist.eeprom_bt_type = ((val & 0xe) >> 1);
-               rtlpcipriv->bt_coexist.eeprom_bt_ant_num = (val & 0x1);
-               rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = ((val & 0x10) >> 4);
-               rtlpcipriv->bt_coexist.eeprom_bt_radio_shared =
+               rtlpriv->btcoexist.eeprom_bt_type = ((val & 0xe) >> 1);
+               rtlpriv->btcoexist.eeprom_bt_ant_num = (val & 0x1);
+               rtlpriv->btcoexist.eeprom_bt_ant_isol = ((val & 0x10) >> 4);
+               rtlpriv->btcoexist.eeprom_bt_radio_shared =
                                                         ((val & 0x20) >> 5);
        } else {
-               rtlpcipriv->bt_coexist.eeprom_bt_coexist = 0;
-               rtlpcipriv->bt_coexist.eeprom_bt_type = BT_2WIRE;
-               rtlpcipriv->bt_coexist.eeprom_bt_ant_num = ANT_X2;
-               rtlpcipriv->bt_coexist.eeprom_bt_ant_isol = 0;
-               rtlpcipriv->bt_coexist.eeprom_bt_radio_shared = BT_RADIO_SHARED;
+               rtlpriv->btcoexist.eeprom_bt_coexist = 0;
+               rtlpriv->btcoexist.eeprom_bt_type = BT_2WIRE;
+               rtlpriv->btcoexist.eeprom_bt_ant_num = ANT_X2;
+               rtlpriv->btcoexist.eeprom_bt_ant_isol = 0;
+               rtlpriv->btcoexist.eeprom_bt_radio_shared = BT_RADIO_SHARED;
        }
 
        rtl8192ce_bt_var_init(hw);
@@ -2312,14 +2296,14 @@ void rtl8192ce_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
 
 void rtl8192ce_bt_reg_init(struct ieee80211_hw *hw)
 {
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
 
        /* 0:Low, 1:High, 2:From Efuse. */
-       rtlpcipriv->bt_coexist.reg_bt_iso = 2;
+       rtlpriv->btcoexist.reg_bt_iso = 2;
        /* 0:Idle, 1:None-SCO, 2:SCO, 3:From Counter. */
-       rtlpcipriv->bt_coexist.reg_bt_sco = 3;
+       rtlpriv->btcoexist.reg_bt_sco = 3;
        /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */
-       rtlpcipriv->bt_coexist.reg_bt_sco = 0;
+       rtlpriv->btcoexist.reg_bt_sco = 0;
 }
 
 
@@ -2327,23 +2311,22 @@ void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_phy *rtlphy = &(rtlpriv->phy);
-       struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
 
        u8 u1_tmp;
 
-       if (rtlpcipriv->bt_coexist.bt_coexistence &&
-           ((rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC4) ||
-             rtlpcipriv->bt_coexist.bt_coexist_type == BT_CSR_BC8)) {
+       if (rtlpriv->btcoexist.bt_coexistence &&
+           ((rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC4) ||
+             rtlpriv->btcoexist.bt_coexist_type == BT_CSR_BC8)) {
 
-               if (rtlpcipriv->bt_coexist.bt_ant_isolation)
+               if (rtlpriv->btcoexist.bt_ant_isolation)
                        rtl_write_byte(rtlpriv, REG_GPIO_MUXCFG, 0xa0);
 
                u1_tmp = rtl_read_byte(rtlpriv, 0x4fd) &
                         BIT_OFFSET_LEN_MASK_32(0, 1);
                u1_tmp = u1_tmp |
-                        ((rtlpcipriv->bt_coexist.bt_ant_isolation == 1) ?
+                        ((rtlpriv->btcoexist.bt_ant_isolation == 1) ?
                         0 : BIT_OFFSET_LEN_MASK_32(1, 1)) |
-                        ((rtlpcipriv->bt_coexist.bt_service == BT_SCO) ?
+                        ((rtlpriv->btcoexist.bt_service == BT_SCO) ?
                         0 : BIT_OFFSET_LEN_MASK_32(2, 1));
                rtl_write_byte(rtlpriv, 0x4fd, u1_tmp);
 
index 833193b751f73673cf3c0460a79a3427e3a6ee04..7edf5af9046eb69eafe82600f1dcbcce3f486d49 100644 (file)
@@ -57,8 +57,8 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
                rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", pled->ledpin);
+               pr_err("switch case %#x not processed\n",
+                      pled->ledpin);
                break;
        }
        pled->ledon = true;
@@ -67,7 +67,6 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
 void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        u8 ledcfg;
 
        RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
@@ -80,7 +79,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                break;
        case LED_PIN_LED0:
                ledcfg &= 0xf0;
-               if (pcipriv->ledctl.led_opendrain)
+               if (rtlpriv->ledctl.led_opendrain)
                        rtl_write_byte(rtlpriv, REG_LEDCFG2,
                                       (ledcfg | BIT(1) | BIT(5) | BIT(6)));
                else
@@ -92,8 +91,7 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", pled->ledpin);
+               pr_info("switch case %#x not processed\n", pled->ledpin);
                break;
        }
        pled->ledon = false;
@@ -101,24 +99,26 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 
 void rtl92ce_init_sw_leds(struct ieee80211_hw *hw)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-       _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
-       _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       _rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+       _rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
 }
 
 static void _rtl92ce_sw_led_control(struct ieee80211_hw *hw,
                                    enum led_ctl_mode ledaction)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-       struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
+
        switch (ledaction) {
        case LED_CTL_POWER_ON:
        case LED_CTL_LINK:
        case LED_CTL_NO_LINK:
-               rtl92ce_sw_led_on(hw, pLed0);
+               rtl92ce_sw_led_on(hw, pled0);
                break;
        case LED_CTL_POWER_OFF:
-               rtl92ce_sw_led_off(hw, pLed0);
+               rtl92ce_sw_led_off(hw, pled0);
                break;
        default:
                break;
index d1b6a8fe7b6a5c64094da84efafe5f85003d1f5f..7c6d7fc1ef9a7082df71cf12cb7a09c64c22818e 100644 (file)
@@ -297,10 +297,10 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                break;
        case RF90_PATH_C:
        case RF90_PATH_D:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", rfpath);
+               pr_info("Incorrect rfpath %#x\n", rfpath);
                break;
        default:
+               pr_info("switch case %#x not processed\n", rfpath);
                break;
        }
        return true;
@@ -340,8 +340,7 @@ void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
                rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_info("unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
                break;
        }
 
@@ -365,8 +364,8 @@ void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
                               HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
        }
        rtl92ce_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
@@ -546,8 +545,8 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
                        break;
                }
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", rfpwr_state);
+               pr_err("switch case %#x not processed\n",
+                      rfpwr_state);
                bresult = false;
                break;
        }
index 7cae6350437c72395a5a9bed876329a474a7bb55..e68ed7f37c79874d77819ece2387f449e6d8728a 100644 (file)
@@ -51,8 +51,7 @@ void rtl92ce_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
                              rtlphy->rfreg_chnlval[0]);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", bandwidth);
+               pr_err("unknown bandwidth: %#X\n", bandwidth);
                break;
        }
 }
index a33a06d58a9ae8496e9baa2cd7a6e2ac1f3087e2..bcbb0c60f1f12bc9d1cd706510d5a97dbc5e3ae3 100644 (file)
@@ -130,8 +130,6 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
 
        rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD | 0);
 
-       /* for debug level */
-       rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
        /* for LPS & IPS */
        rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -158,8 +156,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
        /* for firmware buf */
        rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
        if (!rtlpriv->rtlhal.pfirmware) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't alloc buffer for fw\n");
+               pr_err("Can't alloc buffer for fw\n");
                return 1;
        }
 
@@ -178,8 +175,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
                                      rtlpriv->io.dev, GFP_KERNEL, hw,
                                      rtl_fw_cb);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Failed to request firmware!\n");
+               pr_err("Failed to request firmware!\n");
                return 1;
        }
 
@@ -254,7 +250,8 @@ static struct rtl_mod_params rtl92ce_mod_params = {
        .inactiveps = true,
        .swctrl_lps = false,
        .fwctrl_lps = true,
-       .debug = DBG_EMERG,
+       .debug_level = 0,
+       .debug_mask = 0,
 };
 
 static const struct rtl_hal_cfg rtl92ce_hal_cfg = {
@@ -371,7 +368,8 @@ MODULE_FIRMWARE("rtlwifi/rtl8192cfwU.bin");
 MODULE_FIRMWARE("rtlwifi/rtl8192cfwU_B.bin");
 
 module_param_named(swenc, rtl92ce_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl92ce_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl92ce_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl92ce_mod_params.debug_mask, ullong, 0644);
 module_param_named(ips, rtl92ce_mod_params.inactiveps, bool, 0444);
 module_param_named(swlps, rtl92ce_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl92ce_mod_params.fwctrl_lps, bool, 0444);
@@ -379,7 +377,8 @@ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
 MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
 MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
 MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
 
index 2ab4a00246cca14fa77af6ba98a176f29917884f..3616ba21959d1edeca7df314ba2b7a648dab255d 100644 (file)
@@ -670,7 +670,7 @@ void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
                        SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
                        break;
                default:
-                       RT_ASSERT(false, "ERR txdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8192ce: ERR txdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
@@ -690,7 +690,7 @@ void rtl92ce_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
                        SET_RX_DESC_EOR(pdesc, 1);
                        break;
                default:
-                       RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8192ce: ERR rxdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
@@ -710,7 +710,7 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
                        ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc);
                        break;
                default:
-                       RT_ASSERT(false, "ERR txdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8192ce: ERR txdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
@@ -726,7 +726,7 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
                        ret = GET_RX_DESC_BUFF_ADDR(p_desc);
                        break;
                default:
-                       RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8192ce: ERR rxdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
index 5c7da0cfc684eb6039705479bda3690f5347016b..f95a64507f17f1ea44b49c1f76892f058cad0c57 100644 (file)
@@ -393,12 +393,11 @@ exit:
 static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
        switch (rtlhal->oem_id) {
        case RT_CID_819X_HP:
-               usb_priv->ledctl.led_opendrain = true;
+               rtlpriv->ledctl.led_opendrain = true;
                break;
        case RT_CID_819X_LENOVO:
        case RT_CID_DEFAULT:
@@ -452,8 +451,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
                        break;
                }
                if (pollingCount++ > 100) {
-                       RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
-                                "Failed to polling REG_APS_FSMCO[PFM_ALDN] done!\n");
+                       pr_err("Failed to polling REG_APS_FSMCO[PFM_ALDN] done!\n");
                        return -ENODEV;
                }
        } while (true);
@@ -486,8 +484,7 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
                        break;
                }
                if (pollingCount++ > 1000) {
-                       RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
-                                "Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n");
+                       pr_err("Failed to polling REG_APS_FSMCO[APFM_ONMAC] done!\n");
                        return -ENODEV;
                }
        } while (true);
@@ -687,7 +684,6 @@ static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw,
                                                      u8 queue_sel)
 {
        u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
 
        if (!wmm_enable) { /* typical setting */
                beQ     = QUEUE_LOW;
@@ -705,8 +701,7 @@ static void _rtl92cu_init_chipN_three_out_ep_priority(struct ieee80211_hw *hw,
                hiQ     = QUEUE_HIGH;
        }
        _rtl92c_init_chipN_reg_priority(hw, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Tx queue select :0x%02x..\n",
-                queue_sel);
+       pr_info("Tx queue select :0x%02x..\n", queue_sel);
 }
 
 static void _rtl92cu_init_chipN_queue_priority(struct ieee80211_hw *hw,
@@ -765,8 +760,7 @@ static void _rtl92cu_init_chipT_queue_priority(struct ieee80211_hw *hw,
                break;
        }
        rtl_write_byte(rtlpriv, (REG_TRXDMA_CTRL+1), hq_sele);
-       RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Tx queue select :0x%02x..\n",
-                hq_sele);
+       pr_info("Tx queue select :0x%02x..\n", hq_sele);
 }
 
 static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw,
@@ -848,8 +842,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
        err = _rtl92cu_init_power_on(hw);
 
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Failed to init power on!\n");
+               pr_err("Failed to init power on!\n");
                return err;
        }
        if (!wmm_enable) {
@@ -860,8 +853,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
                                        : WMM_CHIP_A_TX_PAGE_BOUNDARY;
        }
        if (false == rtl92c_init_llt_table(hw, boundary)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Failed to init LLT Table!\n");
+               pr_err("Failed to init LLT Table!\n");
                return -EINVAL;
        }
        _rtl92cu_init_queue_reserved_page(hw, wmm_enable, out_ep_nums,
@@ -986,7 +978,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
        rtlhal->hw_type = HARDWARE_TYPE_RTL8192CU;
        err = _rtl92cu_init_mac(hw);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n");
+               pr_err("init mac failed!\n");
                goto exit;
        }
        err = rtl92c_download_fw(hw);
@@ -1099,8 +1091,7 @@ static void  _ResetDigitalProcedure1(struct ieee80211_hw *hw, bool bWithoutHWSM)
                                udelay(50);
                        }
                        if (retry_cnts >= 100) {
-                               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                        "#####=> 8051 reset failed!.........................\n");
+                               pr_err("8051 reset failed!.........................\n");
                                /* if 8051 reset fail, reset MAC. */
                                rtl_write_byte(rtlpriv,
                                               REG_SYS_FUNC_EN + 1,
@@ -1340,8 +1331,7 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
                         "Set Network type to AP!\n");
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Network type %d not supported!\n", type);
+               pr_err("Network type %d not supported!\n", type);
                goto error_out;
        }
        rtl_write_byte(rtlpriv, MSR, bt_msr);
@@ -1555,8 +1545,7 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
        case HAL_DEF_WOWLAN:
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", variable);
+               pr_err("switch case %#x not processed\n", variable);
                break;
        }
 }
@@ -1790,7 +1779,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                                u4b_ac_param);
                                break;
                        default:
-                               RT_ASSERT(false, "invalid aci: %d !\n",
+                               WARN_ONCE(true, "rtl8192cu: invalid aci: %d !\n",
                                          e_aci);
                                break;
                        }
@@ -1926,8 +1915,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                        break;
                }
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", variable);
+               pr_err("switch case %#x not processed\n", variable);
                break;
        }
 }
index c6240813ff7b3034a81a58aee10d88b3127ae554..66d2784de67dbdf44a2551b2be0e0ab668713615 100644 (file)
@@ -57,8 +57,8 @@ void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
                rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", pled->ledpin);
+               pr_err("switch case %#x not processed\n",
+                      pled->ledpin);
                break;
        }
        pled->ledon = true;
@@ -67,7 +67,6 @@ void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
 void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
        u8 ledcfg;
 
        RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
@@ -78,7 +77,7 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                break;
        case LED_PIN_LED0:
                ledcfg &= 0xf0;
-               if (usbpriv->ledctl.led_opendrain)
+               if (rtlpriv->ledctl.led_opendrain)
                        rtl_write_byte(rtlpriv, REG_LEDCFG2,
                                       (ledcfg | BIT(1) | BIT(5) | BIT(6)));
                else
@@ -90,8 +89,8 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", pled->ledpin);
+               pr_err("switch case %#x not processed\n",
+                      pled->ledpin);
                break;
        }
        pled->ledon = false;
@@ -99,16 +98,18 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 
 void rtl92cu_init_sw_leds(struct ieee80211_hw *hw)
 {
-       struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
-       _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led0), LED_PIN_LED0);
-       _rtl92cu_init_led(hw, &(usbpriv->ledctl.sw_led1), LED_PIN_LED1);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       _rtl92cu_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+       _rtl92cu_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
 }
 
 void rtl92cu_deinit_sw_leds(struct ieee80211_hw *hw)
 {
-       struct rtl_usb_priv *usbpriv = rtl_usbpriv(hw);
-       _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led0));
-       _rtl92cu_deInit_led(&(usbpriv->ledctl.sw_led1));
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       _rtl92cu_deInit_led(&rtlpriv->ledctl.sw_led0);
+       _rtl92cu_deInit_led(&rtlpriv->ledctl.sw_led1);
 }
 
 static void _rtl92cu_sw_led_control(struct ieee80211_hw *hw,
index cf212f694db5011f0ef45f25a21c2c582a58cca6..1b124eade846dce8df328a6b3ed087064ce78c97 100644 (file)
@@ -157,9 +157,8 @@ bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
                if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
                        break;
                if (count > POLLING_LLT_THRESHOLD) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Failed to polling write LLT done at address %d! _LLT_OP_VALUE(%x)\n",
-                                address, _LLT_OP_VALUE(value));
+                       pr_err("Failed to polling write LLT done at address %d! _LLT_OP_VALUE(%x)\n",
+                              address, _LLT_OP_VALUE(value));
                        status = false;
                        break;
                }
@@ -262,8 +261,7 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
                        enc_algo = CAM_AES;
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "illegal switch case\n");
+                       pr_err("illegal switch case\n");
                        enc_algo = CAM_TKIP;
                        break;
                }
@@ -280,9 +278,7 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
                                        entry_id = rtl_cam_get_free_entry(hw,
                                                                 p_macaddr);
                                        if (entry_id >=  TOTAL_CAM_ENTRY) {
-                                               RT_TRACE(rtlpriv, COMP_SEC,
-                                                        DBG_EMERG,
-                                                        "Can not find free hw security cam entry\n");
+                                               pr_err("Can not find free hw security cam entry\n");
                                                return;
                                        }
                                } else {
index f35f435c094eea2cc397748b0b0f207369f748b3..f068dd5317a7b30e649a41268639c483a5371338 100644 (file)
@@ -274,8 +274,7 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                break;
        case RF90_PATH_C:
        case RF90_PATH_D:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", rfpath);
+               pr_err("switch case %#x not processed\n", rfpath);
                break;
        default:
                break;
@@ -314,8 +313,8 @@ void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
                rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
        }
        switch (rtlphy->current_chan_bw) {
@@ -336,8 +335,8 @@ void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
                               HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
        }
        rtl92cu_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
@@ -509,8 +508,8 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
                _rtl92c_phy_set_rf_sleep(hw);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", rfpwr_state);
+               pr_err("switch case %#x not processed\n",
+                      rfpwr_state);
                bresult = false;
                break;
        }
index 5e3183024aa01d56f99171b359b4cb5c7e7e6797..9cff6bc4049c993a78ab7db6b512110a213834a0 100644 (file)
@@ -51,8 +51,7 @@ void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
                              rtlphy->rfreg_chnlval[0]);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", bandwidth);
+               pr_err("unknown bandwidth: %#X\n", bandwidth);
                break;
        }
 }
index b84e13ac6ead554b28b25f36b4d2e0508157cfd0..96c923b3feb4cdbf6317cd3e6b3371c5e7f37b17 100644 (file)
@@ -61,15 +61,13 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
        rtlpriv->dm.dm_flag = 0;
        rtlpriv->dm.disable_framebursting = false;
        rtlpriv->dm.thermalvalue = 0;
-       rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
        rtlpriv->cfg->mod_params->sw_crypto =
                rtlpriv->cfg->mod_params->sw_crypto;
 
        /* for firmware buf */
        rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
        if (!rtlpriv->rtlhal.pfirmware) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't alloc buffer for fw\n");
+               pr_err("Can't alloc buffer for fw\n");
                return 1;
        }
        if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) &&
@@ -158,13 +156,16 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
 
 static struct rtl_mod_params rtl92cu_mod_params = {
        .sw_crypto = 0,
-       .debug = DBG_EMERG,
+       .debug_level = 0,
+       .debug_mask = 0,
 };
 
 module_param_named(swenc, rtl92cu_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl92cu_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl92cu_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl92cu_mod_params.debug_mask, ullong, 0644);
 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
 
 static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
        /* rx */
index 1ea878fa7901266ace020a0ff683d6e36b359ec1..1611e42479d97eb9b5f8322f36af119c2747cac5 100644 (file)
@@ -241,7 +241,7 @@ u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index)
                break;
        default:
                hw_queue_index = RTL_TXQ_BE;
-               RT_ASSERT(false, "QSLT_BE queue, skb_queue:%d\n",
+               WARN_ONCE(true, "rtl8192cu: QSLT_BE queue, skb_queue:%d\n",
                          mac80211_queue_index);
                break;
        }
@@ -477,14 +477,14 @@ static void _rtl_fill_usb_tx_desc(u8 *txdesc)
  */
 static void _rtl_tx_desc_checksum(u8 *txdesc)
 {
-       u16 *ptr = (u16 *)txdesc;
+       __le16 *ptr = (__le16 *)txdesc;
        u16     checksum = 0;
        u32 index;
 
        /* Clear first */
        SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
        for (index = 0; index < 16; index++)
-               checksum = checksum ^ (*(ptr + index));
+               checksum = checksum ^ le16_to_cpu(*(ptr + index));
        SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
 }
 
index df88e39301c2dabf089a3089b49cf710fb98eff1..487eec89bc29c5ec01b87547cc1648df73a6a0d4 100644 (file)
@@ -92,129 +92,107 @@ struct rx_drv_info_92c {
        u8 reserve:4;
 } __packed;
 
-/* Define a macro that takes a le32 word, converts it to host ordering,
- * right shifts by a specified count, creates a mask of the specified
- * bit count, and extracts that number of bits.
- */
-
-#define SHIFT_AND_MASK_LE(__pdesc, __shift, __bits)            \
-       ((le32_to_cpu(*(((__le32 *)(__pdesc)))) >> (__shift)) & \
-       BIT_LEN_MASK_32(__bits))
-
-/* Define a macro that clears a bit field in an le32 word and
- * sets the specified value into that bit field. The resulting
- * value remains in le32 ordering; however, it is properly converted
- * to host ordering for the clear and set operations before conversion
- * back to le32.
- */
-
-#define SET_BITS_OFFSET_LE(__pdesc, __shift, __len, __val)     \
-       (*(__le32 *)(__pdesc) =                                 \
-       (cpu_to_le32((le32_to_cpu(*((__le32 *)(__pdesc))) &     \
-       (~(BIT_OFFSET_LEN_MASK_32((__shift), __len)))) |                \
-       (((u32)(__val) & BIT_LEN_MASK_32(__len)) << (__shift)))));
-
 /* macros to read various fields in RX descriptor */
 
 /* DWORD 0 */
 #define GET_RX_DESC_PKT_LEN(__rxdesc)          \
-       SHIFT_AND_MASK_LE((__rxdesc), 0, 14)
+       LE_BITS_TO_4BYTE((__rxdesc), 0, 14)
 #define GET_RX_DESC_CRC32(__rxdesc)            \
-       SHIFT_AND_MASK_LE(__rxdesc, 14, 1)
+       LE_BITS_TO_4BYTE(__rxdesc, 14, 1)
 #define GET_RX_DESC_ICV(__rxdesc)              \
-       SHIFT_AND_MASK_LE(__rxdesc, 15, 1)
+       LE_BITS_TO_4BYTE(__rxdesc, 15, 1)
 #define GET_RX_DESC_DRVINFO_SIZE(__rxdesc)     \
-       SHIFT_AND_MASK_LE(__rxdesc, 16, 4)
+       LE_BITS_TO_4BYTE(__rxdesc, 16, 4)
 #define GET_RX_DESC_SECURITY(__rxdesc)         \
-       SHIFT_AND_MASK_LE(__rxdesc, 20, 3)
+       LE_BITS_TO_4BYTE(__rxdesc, 20, 3)
 #define GET_RX_DESC_QOS(__rxdesc)              \
-       SHIFT_AND_MASK_LE(__rxdesc, 23, 1)
+       LE_BITS_TO_4BYTE(__rxdesc, 23, 1)
 #define GET_RX_DESC_SHIFT(__rxdesc)            \
-       SHIFT_AND_MASK_LE(__rxdesc, 24, 2)
+       LE_BITS_TO_4BYTE(__rxdesc, 24, 2)
 #define GET_RX_DESC_PHY_STATUS(__rxdesc)       \
-       SHIFT_AND_MASK_LE(__rxdesc, 26, 1)
+       LE_BITS_TO_4BYTE(__rxdesc, 26, 1)
 #define GET_RX_DESC_SWDEC(__rxdesc)            \
-       SHIFT_AND_MASK_LE(__rxdesc, 27, 1)
+       LE_BITS_TO_4BYTE(__rxdesc, 27, 1)
 #define GET_RX_DESC_LAST_SEG(__rxdesc)         \
-       SHIFT_AND_MASK_LE(__rxdesc, 28, 1)
+       LE_BITS_TO_4BYTE(__rxdesc, 28, 1)
 #define GET_RX_DESC_FIRST_SEG(__rxdesc)                \
-       SHIFT_AND_MASK_LE(__rxdesc, 29, 1)
+       LE_BITS_TO_4BYTE(__rxdesc, 29, 1)
 #define GET_RX_DESC_EOR(__rxdesc)              \
-       SHIFT_AND_MASK_LE(__rxdesc, 30, 1)
+       LE_BITS_TO_4BYTE(__rxdesc, 30, 1)
 #define GET_RX_DESC_OWN(__rxdesc)              \
-       SHIFT_AND_MASK_LE(__rxdesc, 31, 1)
+       LE_BITS_TO_4BYTE(__rxdesc, 31, 1)
 
 /* DWORD 1 */
 #define GET_RX_DESC_MACID(__rxdesc)            \
-       SHIFT_AND_MASK_LE(__rxdesc+4, 0, 5)
+       LE_BITS_TO_4BYTE(__rxdesc + 4, 0, 5)
 #define GET_RX_DESC_TID(__rxdesc)              \
-       SHIFT_AND_MASK_LE(__rxdesc+4, 5, 4)
+       LE_BITS_TO_4BYTE(__rxdesc + 4, 5, 4)
 #define GET_RX_DESC_PAGGR(__rxdesc)            \
-       SHIFT_AND_MASK_LE(__rxdesc+4, 14, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 4, 14, 1)
 #define GET_RX_DESC_FAGGR(__rxdesc)            \
-       SHIFT_AND_MASK_LE(__rxdesc+4, 15, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 4, 15, 1)
 #define GET_RX_DESC_A1_FIT(__rxdesc)           \
-       SHIFT_AND_MASK_LE(__rxdesc+4, 16, 4)
+       LE_BITS_TO_4BYTE(__rxdesc + 4, 16, 4)
 #define GET_RX_DESC_A2_FIT(__rxdesc)           \
-       SHIFT_AND_MASK_LE(__rxdesc+4, 20, 4)
+       LE_BITS_TO_4BYTE(__rxdesc + 4, 20, 4)
 #define GET_RX_DESC_PAM(__rxdesc)              \
-       SHIFT_AND_MASK_LE(__rxdesc+4, 24, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 4, 24, 1)
 #define GET_RX_DESC_PWR(__rxdesc)              \
-       SHIFT_AND_MASK_LE(__rxdesc+4, 25, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 4, 25, 1)
 #define GET_RX_DESC_MORE_DATA(__rxdesc)                \
-       SHIFT_AND_MASK_LE(__rxdesc+4, 26, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 4, 26, 1)
 #define GET_RX_DESC_MORE_FRAG(__rxdesc)                \
-       SHIFT_AND_MASK_LE(__rxdesc+4, 27, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 4, 27, 1)
 #define GET_RX_DESC_TYPE(__rxdesc)             \
-       SHIFT_AND_MASK_LE(__rxdesc+4, 28, 2)
+       LE_BITS_TO_4BYTE(__rxdesc + 4, 28, 2)
 #define GET_RX_DESC_MC(__rxdesc)               \
-       SHIFT_AND_MASK_LE(__rxdesc+4, 30, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 4, 30, 1)
 #define GET_RX_DESC_BC(__rxdesc)               \
-       SHIFT_AND_MASK_LE(__rxdesc+4, 31, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 4, 31, 1)
 
 /* DWORD 2 */
 #define GET_RX_DESC_SEQ(__rxdesc)              \
-       SHIFT_AND_MASK_LE(__rxdesc+8, 0, 12)
+       LE_BITS_TO_4BYTE(__rxdesc + 8, 0, 12)
 #define GET_RX_DESC_FRAG(__rxdesc)             \
-       SHIFT_AND_MASK_LE(__rxdesc+8, 12, 4)
+       LE_BITS_TO_4BYTE(__rxdesc + 8, 12, 4)
 #define GET_RX_DESC_USB_AGG_PKTNUM(__rxdesc)   \
-       SHIFT_AND_MASK_LE(__rxdesc+8, 16, 8)
+       LE_BITS_TO_4BYTE(__rxdesc + 8, 16, 8)
 #define GET_RX_DESC_NEXT_IND(__rxdesc)         \
-       SHIFT_AND_MASK_LE(__rxdesc+8, 30, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 8, 30, 1)
 
 /* DWORD 3 */
 #define GET_RX_DESC_RX_MCS(__rxdesc)           \
-       SHIFT_AND_MASK_LE(__rxdesc+12, 0, 6)
+       LE_BITS_TO_4BYTE(__rxdesc + 12, 0, 6)
 #define GET_RX_DESC_RX_HT(__rxdesc)            \
-       SHIFT_AND_MASK_LE(__rxdesc+12, 6, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 12, 6, 1)
 #define GET_RX_DESC_AMSDU(__rxdesc)            \
-       SHIFT_AND_MASK_LE(__rxdesc+12, 7, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 12, 7, 1)
 #define GET_RX_DESC_SPLCP(__rxdesc)            \
-       SHIFT_AND_MASK_LE(__rxdesc+12, 8, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 12, 8, 1)
 #define GET_RX_DESC_BW(__rxdesc)               \
-       SHIFT_AND_MASK_LE(__rxdesc+12, 9, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 12, 9, 1)
 #define GET_RX_DESC_HTC(__rxdesc)              \
-       SHIFT_AND_MASK_LE(__rxdesc+12, 10, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 12, 10, 1)
 #define GET_RX_DESC_TCP_CHK_RPT(__rxdesc)      \
-       SHIFT_AND_MASK_LE(__rxdesc+12, 11, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 12, 11, 1)
 #define GET_RX_DESC_IP_CHK_RPT(__rxdesc)       \
-       SHIFT_AND_MASK_LE(__rxdesc+12, 12, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 12, 12, 1)
 #define GET_RX_DESC_TCP_CHK_VALID(__rxdesc)    \
-       SHIFT_AND_MASK_LE(__rxdesc+12, 13, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 12, 13, 1)
 #define GET_RX_DESC_HWPC_ERR(__rxdesc)         \
-       SHIFT_AND_MASK_LE(__rxdesc+12, 14, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 12, 14, 1)
 #define GET_RX_DESC_HWPC_IND(__rxdesc)         \
-       SHIFT_AND_MASK_LE(__rxdesc+12, 15, 1)
+       LE_BITS_TO_4BYTE(__rxdesc + 12, 15, 1)
 #define GET_RX_DESC_IV0(__rxdesc)              \
-       SHIFT_AND_MASK_LE(__rxdesc+12, 16, 16)
+       LE_BITS_TO_4BYTE(__rxdesc + 12, 16, 16)
 
 /* DWORD 4 */
 #define GET_RX_DESC_IV1(__rxdesc)              \
-       SHIFT_AND_MASK_LE(__rxdesc+16, 0, 32)
+       LE_BITS_TO_4BYTE(__rxdesc + 16, 0, 32)
 
 /* DWORD 5 */
 #define GET_RX_DESC_TSFL(__rxdesc)             \
-       SHIFT_AND_MASK_LE(__rxdesc+20, 0, 32)
+       LE_BITS_TO_4BYTE(__rxdesc + 20, 0, 32)
 
 /*======================= tx desc ============================================*/
 
@@ -222,182 +200,182 @@ struct rx_drv_info_92c {
 
 /* Dword 0 */
 #define SET_TX_DESC_PKT_SIZE(__txdesc, __value)                \
-       SET_BITS_OFFSET_LE(__txdesc, 0, 16, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc, 0, 16, __value)
 #define SET_TX_DESC_OFFSET(__txdesc, __value)          \
-       SET_BITS_OFFSET_LE(__txdesc, 16, 8, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc, 16, 8, __value)
 #define SET_TX_DESC_BMC(__txdesc, __value)             \
-       SET_BITS_OFFSET_LE(__txdesc, 24, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc, 24, 1, __value)
 #define SET_TX_DESC_HTC(__txdesc, __value)             \
-       SET_BITS_OFFSET_LE(__txdesc, 25, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc, 25, 1, __value)
 #define SET_TX_DESC_LAST_SEG(__txdesc, __value)                \
-       SET_BITS_OFFSET_LE(__txdesc, 26, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc, 26, 1, __value)
 #define SET_TX_DESC_FIRST_SEG(__txdesc, __value)       \
-        SET_BITS_OFFSET_LE(__txdesc, 27, 1, __value)
+        SET_BITS_TO_LE_4BYTE(__txdesc, 27, 1, __value)
 #define SET_TX_DESC_LINIP(__txdesc, __value)           \
-       SET_BITS_OFFSET_LE(__txdesc, 28, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc, 28, 1, __value)
 #define SET_TX_DESC_NO_ACM(__txdesc, __value)          \
-       SET_BITS_OFFSET_LE(__txdesc, 29, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc, 29, 1, __value)
 #define SET_TX_DESC_GF(__txdesc, __value)              \
-       SET_BITS_OFFSET_LE(__txdesc, 30, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc, 30, 1, __value)
 #define SET_TX_DESC_OWN(__txdesc, __value)             \
-       SET_BITS_OFFSET_LE(__txdesc, 31, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc, 31, 1, __value)
 
 
 /* Dword 1 */
 #define SET_TX_DESC_MACID(__txdesc, __value)           \
-       SET_BITS_OFFSET_LE(__txdesc+4, 0, 5, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 4, 0, 5, __value)
 #define SET_TX_DESC_AGG_ENABLE(__txdesc, __value)      \
-       SET_BITS_OFFSET_LE(__txdesc+4, 5, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 4, 5, 1, __value)
 #define SET_TX_DESC_AGG_BREAK(__txdesc, __value)       \
-       SET_BITS_OFFSET_LE(__txdesc+4, 6, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 4, 6, 1, __value)
 #define SET_TX_DESC_RDG_ENABLE(__txdesc, __value)      \
-       SET_BITS_OFFSET_LE(__txdesc+4, 7, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 4, 7, 1, __value)
 #define SET_TX_DESC_QUEUE_SEL(__txdesc, __value)       \
-       SET_BITS_OFFSET_LE(__txdesc+4, 8, 5, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 4, 8, 5, __value)
 #define SET_TX_DESC_RDG_NAV_EXT(__txdesc, __value)     \
-       SET_BITS_OFFSET_LE(__txdesc+4, 13, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 4, 13, 1, __value)
 #define SET_TX_DESC_LSIG_TXOP_EN(__txdesc, __value)    \
-       SET_BITS_OFFSET_LE(__txdesc+4, 14, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 4, 14, 1, __value)
 #define SET_TX_DESC_PIFS(__txdesc, __value)            \
-       SET_BITS_OFFSET_LE(__txdesc+4, 15, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 4, 15, 1, __value)
 #define SET_TX_DESC_RATE_ID(__txdesc, __value)         \
-       SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 4, 16, 4, __value)
 #define SET_TX_DESC_RA_BRSR_ID(__txdesc, __value)      \
-       SET_BITS_OFFSET_LE(__txdesc+4, 16, 4, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 4, 16, 4, __value)
 #define SET_TX_DESC_NAV_USE_HDR(__txdesc, __value)     \
-       SET_BITS_OFFSET_LE(__txdesc+4, 20, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 4, 20, 1, __value)
 #define SET_TX_DESC_EN_DESC_ID(__txdesc, __value)      \
-       SET_BITS_OFFSET_LE(__txdesc+4, 21, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 4, 21, 1, __value)
 #define SET_TX_DESC_SEC_TYPE(__txdesc, __value)                \
-       SET_BITS_OFFSET_LE(__txdesc+4, 22, 2, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 4, 22, 2, __value)
 #define SET_TX_DESC_PKT_OFFSET(__txdesc, __value)      \
-       SET_BITS_OFFSET_LE(__txdesc+4, 26, 5, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 4, 26, 5, __value)
 
 /* Dword 2 */
 #define SET_TX_DESC_RTS_RC(__txdesc, __value)          \
-       SET_BITS_OFFSET_LE(__txdesc+8, 0, 6, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 8, 0, 6, __value)
 #define SET_TX_DESC_DATA_RC(__txdesc, __value)         \
-       SET_BITS_OFFSET_LE(__txdesc+8, 6, 6, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 8, 6, 6, __value)
 #define SET_TX_DESC_BAR_RTY_TH(__txdesc, __value)      \
-       SET_BITS_OFFSET_LE(__txdesc+8, 14, 2, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 8, 14, 2, __value)
 #define SET_TX_DESC_MORE_FRAG(__txdesc, __value)       \
-       SET_BITS_OFFSET_LE(__txdesc+8, 17, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 8, 17, 1, __value)
 #define SET_TX_DESC_RAW(__txdesc, __value)             \
-       SET_BITS_OFFSET_LE(__txdesc+8, 18, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 8, 18, 1, __value)
 #define SET_TX_DESC_CCX(__txdesc, __value)             \
-       SET_BITS_OFFSET_LE(__txdesc+8, 19, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 8, 19, 1, __value)
 #define SET_TX_DESC_AMPDU_DENSITY(__txdesc, __value)   \
-       SET_BITS_OFFSET_LE(__txdesc+8, 20, 3, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 8, 20, 3, __value)
 #define SET_TX_DESC_ANTSEL_A(__txdesc, __value)                \
-       SET_BITS_OFFSET_LE(__txdesc+8, 24, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 8, 24, 1, __value)
 #define SET_TX_DESC_ANTSEL_B(__txdesc, __value)                \
-       SET_BITS_OFFSET_LE(__txdesc+8, 25, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 8, 25, 1, __value)
 #define SET_TX_DESC_TX_ANT_CCK(__txdesc, __value)      \
-       SET_BITS_OFFSET_LE(__txdesc+8, 26, 2, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 8, 26, 2, __value)
 #define SET_TX_DESC_TX_ANTL(__txdesc, __value)         \
-       SET_BITS_OFFSET_LE(__txdesc+8, 28, 2, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 8, 28, 2, __value)
 #define SET_TX_DESC_TX_ANT_HT(__txdesc, __value)       \
-       SET_BITS_OFFSET_LE(__txdesc+8, 30, 2, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 8, 30, 2, __value)
 
 /* Dword 3 */
 #define SET_TX_DESC_NEXT_HEAP_PAGE(__txdesc, __value)  \
-       SET_BITS_OFFSET_LE(__txdesc+12, 0, 8, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 12, 0, 8, __value)
 #define SET_TX_DESC_TAIL_PAGE(__txdesc, __value)       \
-       SET_BITS_OFFSET_LE(__txdesc+12, 8, 8, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 12, 8, 8, __value)
 #define SET_TX_DESC_SEQ(__txdesc, __value)             \
-       SET_BITS_OFFSET_LE(__txdesc+12, 16, 12, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 12, 16, 12, __value)
 #define SET_TX_DESC_PKT_ID(__txdesc, __value)          \
-       SET_BITS_OFFSET_LE(__txdesc+12, 28, 4, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 12, 28, 4, __value)
 
 /* Dword 4 */
 #define SET_TX_DESC_RTS_RATE(__txdesc, __value)                \
-       SET_BITS_OFFSET_LE(__txdesc+16, 0, 5, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 0, 5, __value)
 #define SET_TX_DESC_AP_DCFE(__txdesc, __value)         \
-       SET_BITS_OFFSET_LE(__txdesc+16, 5, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 5, 1, __value)
 #define SET_TX_DESC_QOS(__txdesc, __value)             \
-       SET_BITS_OFFSET_LE(__txdesc+16, 6, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 6, 1, __value)
 #define SET_TX_DESC_HWSEQ_EN(__txdesc, __value)                \
-       SET_BITS_OFFSET_LE(__txdesc+16, 7, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 7, 1, __value)
 #define SET_TX_DESC_USE_RATE(__txdesc, __value)                \
-       SET_BITS_OFFSET_LE(__txdesc+16, 8, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 8, 1, __value)
 #define SET_TX_DESC_DISABLE_RTS_FB(__txdesc, __value)  \
-       SET_BITS_OFFSET_LE(__txdesc+16, 9, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 9, 1, __value)
 #define SET_TX_DESC_DISABLE_FB(__txdesc, __value)      \
-       SET_BITS_OFFSET_LE(__txdesc+16, 10, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 10, 1, __value)
 #define SET_TX_DESC_CTS2SELF(__txdesc, __value)                \
-       SET_BITS_OFFSET_LE(__txdesc+16, 11, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 11, 1, __value)
 #define SET_TX_DESC_RTS_ENABLE(__txdesc, __value)      \
-       SET_BITS_OFFSET_LE(__txdesc+16, 12, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 12, 1, __value)
 #define SET_TX_DESC_HW_RTS_ENABLE(__txdesc, __value)   \
-       SET_BITS_OFFSET_LE(__txdesc+16, 13, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 13, 1, __value)
 #define SET_TX_DESC_WAIT_DCTS(__txdesc, __value)       \
-       SET_BITS_OFFSET_LE(__txdesc+16, 18, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 18, 1, __value)
 #define SET_TX_DESC_CTS2AP_EN(__txdesc, __value)       \
-       SET_BITS_OFFSET_LE(__txdesc+16, 19, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 19, 1, __value)
 #define SET_TX_DESC_DATA_SC(__txdesc, __value)         \
-       SET_BITS_OFFSET_LE(__txdesc+16, 20, 2, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 20, 2, __value)
 #define SET_TX_DESC_DATA_STBC(__txdesc, __value)       \
-       SET_BITS_OFFSET_LE(__txdesc+16, 22, 2, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 22, 2, __value)
 #define SET_TX_DESC_DATA_SHORT(__txdesc, __value)      \
-       SET_BITS_OFFSET_LE(__txdesc+16, 24, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 24, 1, __value)
 #define SET_TX_DESC_DATA_BW(__txdesc, __value)         \
-       SET_BITS_OFFSET_LE(__txdesc+16, 25, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 25, 1, __value)
 #define SET_TX_DESC_RTS_SHORT(__txdesc, __value)       \
-       SET_BITS_OFFSET_LE(__txdesc+16, 26, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 26, 1, __value)
 #define SET_TX_DESC_RTS_BW(__txdesc, __value)          \
-       SET_BITS_OFFSET_LE(__txdesc+16, 27, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 27, 1, __value)
 #define SET_TX_DESC_RTS_SC(__txdesc, __value)          \
-       SET_BITS_OFFSET_LE(__txdesc+16, 28, 2, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 28, 2, __value)
 #define SET_TX_DESC_RTS_STBC(__txdesc, __value)                \
-       SET_BITS_OFFSET_LE(__txdesc+16, 30, 2, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 16, 30, 2, __value)
 
 /* Dword 5 */
 #define SET_TX_DESC_TX_RATE(__pdesc, __val)            \
-       SET_BITS_OFFSET_LE(__pdesc+20, 0, 6, __val)
+       SET_BITS_TO_LE_4BYTE(__pdesc + 20, 0, 6, __val)
 #define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val)       \
-       SET_BITS_OFFSET_LE(__pdesc+20, 6, 1, __val)
+       SET_BITS_TO_LE_4BYTE(__pdesc + 20, 6, 1, __val)
 #define SET_TX_DESC_CCX_TAG(__pdesc, __val)            \
-       SET_BITS_OFFSET_LE(__pdesc+20, 7, 1, __val)
+       SET_BITS_TO_LE_4BYTE(__pdesc + 20, 7, 1, __val)
 #define SET_TX_DESC_DATA_RATE_FB_LIMIT(__txdesc, __value) \
-       SET_BITS_OFFSET_LE(__txdesc+20, 8, 5, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 20, 8, 5, __value)
 #define SET_TX_DESC_RTS_RATE_FB_LIMIT(__txdesc, __value) \
-       SET_BITS_OFFSET_LE(__txdesc+20, 13, 4, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 20, 13, 4, __value)
 #define SET_TX_DESC_RETRY_LIMIT_ENABLE(__txdesc, __value) \
-       SET_BITS_OFFSET_LE(__txdesc+20, 17, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 20, 17, 1, __value)
 #define SET_TX_DESC_DATA_RETRY_LIMIT(__txdesc, __value)        \
-       SET_BITS_OFFSET_LE(__txdesc+20, 18, 6, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 20, 18, 6, __value)
 #define SET_TX_DESC_USB_TXAGG_NUM(__txdesc, __value)   \
-       SET_BITS_OFFSET_LE(__txdesc+20, 24, 8, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 20, 24, 8, __value)
 
 /* Dword 6 */
 #define SET_TX_DESC_TXAGC_A(__txdesc, __value)         \
-       SET_BITS_OFFSET_LE(__txdesc+24, 0, 5, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 24, 0, 5, __value)
 #define SET_TX_DESC_TXAGC_B(__txdesc, __value)         \
-       SET_BITS_OFFSET_LE(__txdesc+24, 5, 5, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 24, 5, 5, __value)
 #define SET_TX_DESC_USB_MAX_LEN(__txdesc, __value)     \
-       SET_BITS_OFFSET_LE(__txdesc+24, 10, 1, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 24, 10, 1, __value)
 #define SET_TX_DESC_MAX_AGG_NUM(__txdesc, __value)     \
-       SET_BITS_OFFSET_LE(__txdesc+24, 11, 5, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 24, 11, 5, __value)
 #define SET_TX_DESC_MCSG1_MAX_LEN(__txdesc, __value)   \
-       SET_BITS_OFFSET_LE(__txdesc+24, 16, 4, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 24, 16, 4, __value)
 #define SET_TX_DESC_MCSG2_MAX_LEN(__txdesc, __value)   \
-       SET_BITS_OFFSET_LE(__txdesc+24, 20, 4, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 24, 20, 4, __value)
 #define SET_TX_DESC_MCSG3_MAX_LEN(__txdesc, __value)   \
-       SET_BITS_OFFSET_LE(__txdesc+24, 24, 4, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 24, 24, 4, __value)
 #define SET_TX_DESC_MCSG7_MAX_LEN(__txdesc, __value)   \
-       SET_BITS_OFFSET_LE(__txdesc+24, 28, 4, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 24, 28, 4, __value)
 
 /* Dword 7 */
 #define SET_TX_DESC_TX_DESC_CHECKSUM(__txdesc, __value) \
-       SET_BITS_OFFSET_LE(__txdesc+28, 0, 16, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 28, 0, 16, __value)
 #define SET_TX_DESC_MCSG4_MAX_LEN(__txdesc, __value)   \
-       SET_BITS_OFFSET_LE(__txdesc+28, 16, 4, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 28, 16, 4, __value)
 #define SET_TX_DESC_MCSG5_MAX_LEN(__txdesc, __value)   \
-       SET_BITS_OFFSET_LE(__txdesc+28, 20, 4, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 28, 20, 4, __value)
 #define SET_TX_DESC_MCSG6_MAX_LEN(__txdesc, __value)   \
-       SET_BITS_OFFSET_LE(__txdesc+28, 24, 4, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 28, 24, 4, __value)
 #define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value)  \
-       SET_BITS_OFFSET_LE(__txdesc+28, 28, 4, __value)
+       SET_BITS_TO_LE_4BYTE(__txdesc + 28, 28, 4, __value)
 
 
 int  rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw);
index 17f6903c14bbe6f33067b83529a6f124a0b0270d..88faeab2574f0b15a4edb43dc96772010eb61ef8 100644 (file)
@@ -26,6 +26,7 @@
 #include "../wifi.h"
 #include "../pci.h"
 #include "../base.h"
+#include "../efuse.h"
 #include "reg.h"
 #include "def.h"
 #include "fw.h"
@@ -59,86 +60,31 @@ static void _rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable)
        }
 }
 
-static void _rtl92d_fw_block_write(struct ieee80211_hw *hw,
-                                  const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 blocksize = sizeof(u32);
-       u8 *bufferptr = (u8 *) buffer;
-       u32 *pu4BytePtr = (u32 *) buffer;
-       u32 i, offset, blockCount, remainSize;
-
-       blockCount = size / blocksize;
-       remainSize = size % blocksize;
-       for (i = 0; i < blockCount; i++) {
-               offset = i * blocksize;
-               rtl_write_dword(rtlpriv, (FW_8192D_START_ADDRESS + offset),
-                               *(pu4BytePtr + i));
-       }
-       if (remainSize) {
-               offset = blockCount * blocksize;
-               bufferptr += offset;
-               for (i = 0; i < remainSize; i++) {
-                       rtl_write_byte(rtlpriv, (FW_8192D_START_ADDRESS +
-                                                offset + i), *(bufferptr + i));
-               }
-       }
-}
-
-static void _rtl92d_fw_page_write(struct ieee80211_hw *hw,
-                                 u32 page, const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 value8;
-       u8 u8page = (u8) (page & 0x07);
-
-       value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
-       rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
-       _rtl92d_fw_block_write(hw, buffer, size);
-}
-
-static void _rtl92d_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
-       u32 fwlen = *pfwlen;
-       u8 remain = (u8) (fwlen % 4);
-
-       remain = (remain == 0) ? 0 : (4 - remain);
-       while (remain > 0) {
-               pfwbuf[fwlen] = 0;
-               fwlen++;
-               remain--;
-       }
-       *pfwlen = fwlen;
-}
-
 static void _rtl92d_write_fw(struct ieee80211_hw *hw,
                             enum version_8192d version, u8 *buffer, u32 size)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       u8 *bufferPtr = buffer;
-       u32 pagenums, remainSize;
+       u8 *bufferptr = buffer;
+       u32 pagenums, remainsize;
        u32 page, offset;
 
        RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
        if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
-               _rtl92d_fill_dummy(bufferPtr, &size);
+               rtl_fill_dummy(bufferptr, &size);
        pagenums = size / FW_8192D_PAGE_SIZE;
-       remainSize = size % FW_8192D_PAGE_SIZE;
-       if (pagenums > 8) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Page numbers should not greater then 8\n");
-       }
+       remainsize = size % FW_8192D_PAGE_SIZE;
+       if (pagenums > 8)
+               pr_err("Page numbers should not greater then 8\n");
        for (page = 0; page < pagenums; page++) {
                offset = page * FW_8192D_PAGE_SIZE;
-               _rtl92d_fw_page_write(hw, page, (bufferPtr + offset),
-                                     FW_8192D_PAGE_SIZE);
+               rtl_fw_page_write(hw, page, (bufferptr + offset),
+                                 FW_8192D_PAGE_SIZE);
        }
-       if (remainSize) {
+       if (remainsize) {
                offset = pagenums * FW_8192D_PAGE_SIZE;
                page = pagenums;
-               _rtl92d_fw_page_write(hw, page, (bufferPtr + offset),
-                                     remainSize);
+               rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
        }
 }
 
@@ -153,13 +99,10 @@ static int _rtl92d_fw_free_to_go(struct ieee80211_hw *hw)
        } while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) &&
                 (!(value32 & FWDL_ChkSum_rpt)));
        if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "chksum report faill ! REG_MCUFWDL:0x%08x\n",
-                        value32);
+               pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n",
+                      value32);
                return -EIO;
        }
-       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                "Checksum report OK ! REG_MCUFWDL:0x%08x\n", value32);
        value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
        value32 |= MCUFWDL_RDY;
        rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
@@ -182,7 +125,7 @@ void rtl92d_firmware_selfreset(struct ieee80211_hw *hw)
                udelay(50);
                u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
        }
-       RT_ASSERT((delay > 0), "8051 reset failed!\n");
+       WARN_ONCE((delay <= 0), "rtl8192de: 8051 reset failed!\n");
        RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
                 "=====> 8051 reset success (%d)\n", delay);
 }
@@ -326,13 +269,8 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
        value &= (~BIT(5));
        rtl_write_byte(rtlpriv, 0x1f, value);
        spin_unlock_irqrestore(&globalmutex_for_fwdownload, flags);
-       if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "fw is not ready to run!\n");
-               goto exit;
-       } else {
-               RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "fw is ready to run!\n");
-       }
+       if (err)
+               pr_err("fw is not ready to run!\n");
 exit:
        err = _rtl92d_fw_init(hw);
        return err;
@@ -407,8 +345,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
        while (!bwrite_success) {
                wait_writeh2c_limmit--;
                if (wait_writeh2c_limmit == 0) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Write H2C fail because no trigger for FW INT!\n");
+                       pr_err("Write H2C fail because no trigger for FW INT!\n");
                        break;
                }
                boxnum = rtlhal->last_hmeboxnum;
@@ -430,8 +367,8 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
                        box_extreg = REG_HMEBOX_EXT_3;
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "switch case %#x not processed\n", boxnum);
+                       pr_err("switch case %#x not processed\n",
+                              boxnum);
                        break;
                }
                isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
@@ -507,8 +444,8 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
                                               boxcontent[idx]);
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "switch case %#x not processed\n", cmd_len);
+                       pr_err("switch case %#x not processed\n",
+                              cmd_len);
                        break;
                }
                bwrite_success = true;
index fcb14c5db172b889ed5039f04bc5226758097336..cf28d25c551ff58e20d017c4aef2b2575055747d 100644 (file)
@@ -163,8 +163,7 @@ void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
        case HAL_DEF_WOWLAN:
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", variable);
+               pr_err("switch case %#x not processed\n", variable);
                break;
        }
 }
@@ -358,9 +357,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                acm_ctrl &= (~ACMHW_VOQEN);
                                break;
                        default:
-                               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                        "switch case %#x not processed\n",
-                                        e_aci);
+                               pr_err("switch case %#x not processed\n",
+                                      e_aci);
                                break;
                        }
                }
@@ -500,8 +498,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                break;
        }
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", variable);
+               pr_err("switch case %#x not processed\n", variable);
                break;
        }
 }
@@ -520,9 +517,8 @@ static bool _rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
                if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
                        break;
                if (count > POLLING_LLT_THRESHOLD) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Failed to polling write LLT done at address %d!\n",
-                                address);
+                       pr_err("Failed to polling write LLT done at address %d!\n",
+                              address);
                        status = false;
                        break;
                }
@@ -618,19 +614,19 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
 
 static void _rtl92de_gen_refresh_led_state(struct ieee80211_hw *hw)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
 
        if (rtlpci->up_first_time)
                return;
        if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
-               rtl92de_sw_led_on(hw, pLed0);
+               rtl92de_sw_led_on(hw, pled0);
        else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
-               rtl92de_sw_led_on(hw, pLed0);
+               rtl92de_sw_led_on(hw, pled0);
        else
-               rtl92de_sw_led_off(hw, pLed0);
+               rtl92de_sw_led_off(hw, pled0);
 }
 
 static bool _rtl92de_init_mac(struct ieee80211_hw *hw)
@@ -920,7 +916,7 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
        /* rtlpriv->intf_ops->disable_aspm(hw); */
        rtstatus = _rtl92de_init_mac(hw);
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+               pr_err("Init MAC failed\n");
                err = 1;
                spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags);
                return err;
@@ -1119,11 +1115,8 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
                         "Set Network type to AP!\n");
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Network type %d not supported!\n", type);
+               pr_err("Network type %d not supported!\n", type);
                return 1;
-               break;
-
        }
        rtl_write_byte(rtlpriv, MSR, bt_msr);
        rtlpriv->cfg->ops->led_control(hw, ledaction);
@@ -1732,7 +1725,7 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
                break;
        default:
                chipver |= CHIP_92D_D_CUT;
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown CUT!\n");
+               pr_err("Unknown CUT!\n");
                break;
        }
        rtlpriv->rtlhal.version = chipver;
@@ -1816,7 +1809,7 @@ void rtl92de_read_eeprom_info(struct ieee80211_hw *hw)
                rtlefuse->autoload_failflag = false;
                _rtl92de_read_adapter_info(hw);
        } else {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+               pr_err("Autoload ERR!!\n");
        }
        return;
 }
@@ -2169,8 +2162,8 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
                        enc_algo = CAM_AES;
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "switch case %#x not processed\n", enc_algo);
+                       pr_err("switch case %#x not processed\n",
+                              enc_algo);
                        enc_algo = CAM_TKIP;
                        break;
                }
@@ -2186,9 +2179,7 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
                                        entry_id = rtl_cam_get_free_entry(hw,
                                                                 p_macaddr);
                                        if (entry_id >=  TOTAL_CAM_ENTRY) {
-                                               RT_TRACE(rtlpriv, COMP_SEC,
-                                                        DBG_EMERG,
-                                                        "Can not find free hw security cam entry\n");
+                                               pr_err("Can not find free hw security cam entry\n");
                                                return;
                                        }
                                } else {
index c22b8a215c877c99c656c7db7b054c2a8ace60a3..8851038c9ebaa6df7006d29a23b3c3185b950e7a 100644 (file)
@@ -66,8 +66,8 @@ void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
                rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", pled->ledpin);
+               pr_err("switch case %#x not processed\n",
+                      pled->ledpin);
                break;
        }
        pled->ledon = true;
@@ -76,7 +76,6 @@ void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
 void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        u8 ledcfg;
 
        RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
@@ -89,7 +88,7 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                break;
        case LED_PIN_LED0:
                ledcfg &= 0xf0;
-               if (pcipriv->ledctl.led_opendrain)
+               if (rtlpriv->ledctl.led_opendrain)
                        rtl_write_byte(rtlpriv, REG_LEDCFG2,
                                       (ledcfg | BIT(1) | BIT(5) | BIT(6)));
                else
@@ -101,8 +100,8 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", pled->ledpin);
+               pr_err("switch case %#x not processed\n",
+                      pled->ledpin);
                break;
        }
        pled->ledon = false;
@@ -110,24 +109,26 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 
 void rtl92de_init_sw_leds(struct ieee80211_hw *hw)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-       _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
-       _rtl92ce_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       _rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+       _rtl92ce_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
 }
 
 static void _rtl92ce_sw_led_control(struct ieee80211_hw *hw,
                                    enum led_ctl_mode ledaction)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-       struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
+
        switch (ledaction) {
        case LED_CTL_POWER_ON:
        case LED_CTL_LINK:
        case LED_CTL_NO_LINK:
-               rtl92de_sw_led_on(hw, pLed0);
+               rtl92de_sw_led_on(hw, pled0);
                break;
        case LED_CTL_POWER_OFF:
-               rtl92de_sw_led_off(hw, pLed0);
+               rtl92de_sw_led_off(hw, pled0);
                break;
        default:
                break;
index 424f54babd03e8cf22e1c7c30b67959331abcf57..de98d88199d6ec19b5a11fc6837eaf79436feb61 100644 (file)
@@ -716,7 +716,7 @@ static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw)
        rtstatus = _rtl92d_phy_config_bb_with_headerfile(hw,
                BASEBAND_CONFIG_PHY_REG);
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
+               pr_err("Write BB Reg Fail!!\n");
                return false;
        }
 
@@ -731,13 +731,13 @@ static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw)
                        BASEBAND_CONFIG_PHY_REG);
        }
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
+               pr_err("BB_PG Reg Fail!!\n");
                return false;
        }
        rtstatus = _rtl92d_phy_config_bb_with_headerfile(hw,
                BASEBAND_CONFIG_AGC_TAB);
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+               pr_err("AGC Table Fail\n");
                return false;
        }
        rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
@@ -833,8 +833,7 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                break;
        case RF90_PATH_C:
        case RF90_PATH_D:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", rfpath);
+               pr_err("switch case %#x not processed\n", rfpath);
                break;
        }
        return true;
@@ -987,8 +986,8 @@ void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
                rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
        }
        switch (rtlphy->current_chan_bw) {
@@ -1019,8 +1018,8 @@ void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
                        HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
 
        }
@@ -2700,7 +2699,7 @@ static bool _rtl92d_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
        struct swchnlcmd *pcmd;
 
        if (cmdtable == NULL) {
-               RT_ASSERT(false, "cmdtable cannot be NULL\n");
+               WARN_ONCE(true, "rtl8192de: cmdtable cannot be NULL\n");
                return false;
        }
        if (cmdtableidx >= cmdtablesz)
@@ -2842,9 +2841,8 @@ static bool _rtl92d_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
                        rtl92d_phy_reload_iqk_setting(hw, channel);
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "switch case %#x not processed\n",
-                                currentcmd->cmdid);
+                       pr_err("switch case %#x not processed\n",
+                              currentcmd->cmdid);
                        break;
                }
                break;
@@ -2893,17 +2891,17 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
                 * 5G and 2.4G band. */
                if (channel <= 14)
                        return 0;
-               RT_ASSERT((channel > 14), "5G but channel<=14\n");
+               WARN_ONCE((channel <= 14), "rtl8192de: 5G but channel<=14\n");
                break;
        case BAND_ON_2_4G:
                /* Get first channel error when change between
                 * 5G and 2.4G band. */
                if (channel > 14)
                        return 0;
-               RT_ASSERT((channel <= 14), "2G but channel>14\n");
+               WARN_ONCE((channel > 14), "rtl8192de: 2G but channel>14\n");
                break;
        default:
-               RT_ASSERT(false, "Invalid WirelessMode(%#x)!!\n",
+               WARN_ONCE(true, "rtl8192de: Invalid WirelessMode(%#x)!!\n",
                          rtlpriv->mac80211.mode);
                break;
        }
@@ -2956,9 +2954,8 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
                rtl92d_dm_write_dig(hw);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n",
-                        rtlphy->current_io_type);
+               pr_err("switch case %#x not processed\n",
+                      rtlphy->current_io_type);
                break;
        }
        rtlphy->set_io_inprogress = false;
@@ -2988,8 +2985,8 @@ bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
                        postprocessing = true;
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "switch case %#x not processed\n", iotype);
+                       pr_err("switch case %#x not processed\n",
+                              iotype);
                        break;
                }
        } while (false);
@@ -3176,8 +3173,8 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
                _rtl92d_phy_set_rfsleep(hw);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", rfpwr_state);
+               pr_err("switch case %#x not processed\n",
+                      rfpwr_state);
                bresult = false;
                break;
        }
@@ -3336,7 +3333,7 @@ void rtl92d_phy_set_poweron(struct ieee80211_hw *hw)
                        }
                }
                if (i == 200)
-                       RT_ASSERT(false, "Another mac power off over time\n");
+                       WARN_ONCE(true, "rtl8192de: Another mac power off over time\n");
        }
 }
 
index 9dc9e915513ea985a7ade3aac3dc9668779a51fc..021d3c538ac28be143e4277ca5254b2a712d4d99 100644 (file)
@@ -63,8 +63,7 @@ void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
                }
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", bandwidth);
+               pr_err("unknown bandwidth: %#X\n", bandwidth);
                break;
        }
 }
index 2d65e40952921924b42ecfbc5daed49471ba8b3b..16132c66e5e1b37db21d0cd81163f8c38152ee3e 100644 (file)
@@ -140,8 +140,6 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
 
        rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD);
 
-       /* for debug level */
-       rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
        /* for LPS & IPS */
        rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -171,8 +169,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
        /* for firmware buf */
        rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
        if (!rtlpriv->rtlhal.pfirmware) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't alloc buffer for fw\n");
+               pr_err("Can't alloc buffer for fw\n");
                return 1;
        }
 
@@ -185,8 +182,7 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
                                      rtlpriv->io.dev, GFP_KERNEL, hw,
                                      rtl_fw_cb);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Failed to request firmware!\n");
+               pr_err("Failed to request firmware!\n");
                return 1;
        }
 
@@ -256,7 +252,8 @@ static struct rtl_mod_params rtl92de_mod_params = {
        .inactiveps = true,
        .swctrl_lps = true,
        .fwctrl_lps = false,
-       .debug = DBG_EMERG,
+       .debug_level = 0,
+       .debug_mask = 0,
 };
 
 static const struct rtl_hal_cfg rtl92de_hal_cfg = {
@@ -366,15 +363,17 @@ MODULE_DESCRIPTION("Realtek 8192DE 802.11n Dual Mac PCI wireless");
 MODULE_FIRMWARE("rtlwifi/rtl8192defw.bin");
 
 module_param_named(swenc, rtl92de_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl92de_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl92de_mod_params.debug_level, int, 0644);
 module_param_named(ips, rtl92de_mod_params.inactiveps, bool, 0444);
 module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444);
+module_param_named(debug_mask, rtl92de_mod_params.debug_mask, ullong, 0644);
 MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
 MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
 MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
 MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
 
@@ -402,7 +401,7 @@ static int __init rtl92de_module_init(void)
 
        ret = pci_register_driver(&rtl92de_driver);
        if (ret)
-               RT_ASSERT(false, "No device found\n");
+               WARN_ONCE(true, "rtl8192de: No device found\n");
        return ret;
 }
 
index 5fb37564957cd9831c6c14d33908ef369e4cf5e3..5c9c8741134fb344898008c0cad7b432b7b9f547 100644 (file)
@@ -794,7 +794,7 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
                        SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
                        break;
                default:
-                       RT_ASSERT(false, "ERR txdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
@@ -814,7 +814,7 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
                        SET_RX_DESC_EOR(pdesc, 1);
                        break;
                default:
-                       RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
@@ -834,7 +834,7 @@ u32 rtl92de_get_desc(u8 *p_desc, bool istx, u8 desc_name)
                        ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc);
                        break;
                default:
-                       RT_ASSERT(false, "ERR txdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
@@ -848,7 +848,7 @@ u32 rtl92de_get_desc(u8 *p_desc, bool istx, u8 desc_name)
                        ret = GET_RX_DESC_PKT_LEN(pdesc);
                        break;
                default:
-                       RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
index b3f6a9ed15d44b03d7e8f5a210068d624e8396cb..9fec345a42a01c1a03fd74f6b4b9f14cfd148598 100644 (file)
@@ -27,6 +27,7 @@
 #include "../pci.h"
 #include "../base.h"
 #include "../core.h"
+#include "../efuse.h"
 #include "reg.h"
 #include "def.h"
 #include "fw.h"
@@ -48,64 +49,6 @@ static void _rtl92ee_enable_fw_download(struct ieee80211_hw *hw, bool enable)
        }
 }
 
-static void _rtl92ee_fw_block_write(struct ieee80211_hw *hw,
-                                   const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 blocksize = sizeof(u32);
-       u8 *bufferptr = (u8 *)buffer;
-       u32 *pu4byteptr = (u32 *)buffer;
-       u32 i, offset, blockcount, remainsize;
-
-       blockcount = size / blocksize;
-       remainsize = size % blocksize;
-
-       for (i = 0; i < blockcount; i++) {
-               offset = i * blocksize;
-               rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
-                               *(pu4byteptr + i));
-       }
-
-       if (remainsize) {
-               offset = blockcount * blocksize;
-               bufferptr += offset;
-               for (i = 0; i < remainsize; i++) {
-                       rtl_write_byte(rtlpriv,
-                                      (FW_8192C_START_ADDRESS + offset + i),
-                                      *(bufferptr + i));
-               }
-       }
-}
-
-static void _rtl92ee_fw_page_write(struct ieee80211_hw *hw, u32 page,
-                                  const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 value8;
-       u8 u8page = (u8)(page & 0x07);
-
-       value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
-       rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
-
-       _rtl92ee_fw_block_write(hw, buffer, size);
-}
-
-static void _rtl92ee_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
-       u32 fwlen = *pfwlen;
-       u8 remain = (u8)(fwlen % 4);
-
-       remain = (remain == 0) ? 0 : (4 - remain);
-
-       while (remain > 0) {
-               pfwbuf[fwlen] = 0;
-               fwlen++;
-               remain--;
-       }
-
-       *pfwlen = fwlen;
-}
-
 static void _rtl92ee_write_fw(struct ieee80211_hw *hw,
                              enum version_8192e version,
                              u8 *buffer, u32 size)
@@ -117,28 +60,25 @@ static void _rtl92ee_write_fw(struct ieee80211_hw *hw,
 
        RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "FW size is %d bytes,\n", size);
 
-       _rtl92ee_fill_dummy(bufferptr, &size);
+       rtl_fill_dummy(bufferptr, &size);
 
        pagenums = size / FW_8192C_PAGE_SIZE;
        remainsize = size % FW_8192C_PAGE_SIZE;
 
-       if (pagenums > 8) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Page numbers should not greater then 8\n");
-       }
+       if (pagenums > 8)
+               pr_err("Page numbers should not greater then 8\n");
 
        for (page = 0; page < pagenums; page++) {
                offset = page * FW_8192C_PAGE_SIZE;
-               _rtl92ee_fw_page_write(hw, page, (bufferptr + offset),
-                                      FW_8192C_PAGE_SIZE);
+               rtl_fw_page_write(hw, page, (bufferptr + offset),
+                                 FW_8192C_PAGE_SIZE);
                udelay(2);
        }
 
        if (remainsize) {
                offset = pagenums * FW_8192C_PAGE_SIZE;
                page = pagenums;
-               _rtl92ee_fw_page_write(hw, page, (bufferptr + offset),
-                                      remainsize);
+               rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
        }
 }
 
@@ -155,15 +95,10 @@ static int _rtl92ee_fw_free_to_go(struct ieee80211_hw *hw)
                 (!(value32 & FWDL_CHKSUM_RPT)));
 
        if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "chksum report faill ! REG_MCUFWDL:0x%08x .\n",
-                         value32);
+               pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n",
+                      value32);
                goto exit;
        }
-
-       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
-
        value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
        value32 |= MCUFWDL_RDY;
        value32 &= ~WINTINI_RDY;
@@ -174,21 +109,15 @@ static int _rtl92ee_fw_free_to_go(struct ieee80211_hw *hw)
 
        do {
                value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
-               if (value32 & WINTINI_RDY) {
-                       RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD ,
-                                "Polling FW ready success!! REG_MCUFWDL:0x%08x. count = %d\n",
-                                value32, counter);
-                       err = 0;
-                       goto exit;
-               }
+               if (value32 & WINTINI_RDY)
+                       return 0;
 
                udelay(FW_8192C_POLLING_DELAY*10);
 
        } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
 
-       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                "Polling FW ready fail!! REG_MCUFWDL:0x%08x. count = %d\n",
-                value32, counter);
+       pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x. count = %d\n",
+              value32, counter);
 
 exit:
        return err;
@@ -240,13 +169,6 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
        _rtl92ee_enable_fw_download(hw, false);
 
        err = _rtl92ee_fw_free_to_go(hw);
-       if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Firmware is not ready to run!\n");
-       } else {
-               RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD ,
-                        "Firmware is ready to run!\n");
-       }
 
        return 0;
 }
@@ -462,8 +384,8 @@ void rtl92ee_fill_h2c_cmd(struct ieee80211_hw *hw,
        u32 tmp_cmdbuf[2];
 
        if (!rtlhal->fw_ready) {
-               RT_ASSERT(false,
-                         "return H2C cmd because of Fw download fail!!!\n");
+               WARN_ONCE(true,
+                         "rtl8192ee: error H2C cmd because of Fw download fail!!!\n");
                return;
        }
 
@@ -842,8 +764,8 @@ static void _rtl92ee_c2h_ra_report_handler(struct ieee80211_hw *hw,
        rtl92ee_dm_dynamic_arfb_select(hw, rate, collision_state);
 }
 
-static void _rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id,
-                                        u8 c2h_cmd_len, u8 *tmp_buf)
+void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id,
+                                u8 c2h_cmd_len, u8 *tmp_buf)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
@@ -898,5 +820,14 @@ void rtl92ee_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len)
        RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_TRACE,
                      "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len);
 
-       _rtl92ee_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
+       switch (c2h_cmd_id) {
+       case C2H_8192E_BT_INFO:
+       case C2H_8192E_BT_MP:
+               rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
+               break;
+       default:
+               rtl92ee_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len,
+                                           tmp_buf);
+               break;
+       }
 }
index 069da1e7e80a481fbd17179917fdca018536a77d..72da3f92f02c57911ee307e5c74e22d8750e97fb 100644 (file)
@@ -185,5 +185,6 @@ void rtl92ee_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus);
 void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
 void rtl92ee_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
 void rtl92ee_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len);
-
+void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id,
+                                u8 c2h_cmd_len, u8 *tmp_buf);
 #endif
index ebf663e1a81a0ab8a5613508b82690e623592822..56ca7f5351eaba043a792764fe80c5860a0a9aae 100644 (file)
@@ -735,9 +735,8 @@ static bool _rtl92ee_llt_table_init(struct ieee80211_hw *hw)
 static void _rtl92ee_gen_refresh_led_state(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       struct rtl_led *pled0 = &pcipriv->ledctl.sw_led0;
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
 
        if (rtlpriv->rtlhal.up_first_time)
                return;
@@ -1006,7 +1005,7 @@ static void _rtl92ee_hw_configure(struct ieee80211_hw *hw)
        rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x100a);
 
        /* Note Data sheet don't define */
-       rtl_write_word(rtlpriv, 0x4C7, 0x80);
+       rtl_write_byte(rtlpriv, 0x4C7, 0x80);
 
        rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x20);
 
@@ -1320,7 +1319,7 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
                rtl_write_byte(rtlpriv, 0x65, 1);
        }
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+               pr_err("Init MAC failed\n");
                err = 1;
                return err;
        }
@@ -1485,8 +1484,7 @@ static int _rtl92ee_set_media_status(struct ieee80211_hw *hw,
                         "Set Network type to AP!\n");
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Network type %d not support!\n", type);
+               pr_err("Network type %d not support!\n", type);
                return 1;
        }
 
@@ -1582,7 +1580,7 @@ void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci)
                rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
                break;
        default:
-               RT_ASSERT(false, "invalid aci: %d !\n", aci);
+               WARN_ONCE(true, "rtl8192ee: invalid aci: %d !\n", aci);
                break;
        }
 }
@@ -2167,10 +2165,9 @@ exit:
 static void _rtl92ee_hal_customized_behavior(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
-       pcipriv->ledctl.led_opendrain = true;
+       rtlpriv->ledctl.led_opendrain = true;
 
        RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
                 "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
@@ -2206,7 +2203,7 @@ void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw)
                rtlefuse->autoload_failflag = false;
                _rtl92ee_read_adapter_info(hw);
        } else {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+               pr_err("Autoload ERR!!\n");
        }
        _rtl92ee_hal_customized_behavior(hw);
 
@@ -2484,9 +2481,7 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index,
                                        entry_id = rtl_cam_get_free_entry(hw,
                                                                     p_macaddr);
                                        if (entry_id >=  TOTAL_CAM_ENTRY) {
-                                               RT_TRACE(rtlpriv, COMP_SEC,
-                                                        DBG_EMERG,
-                                                        "Can not find free hw security cam entry\n");
+                                               pr_err("Can not find free hw security cam entry\n");
                                                return;
                                        }
                                } else {
index 47da05dd30763db47754b524a73e39e17fd1aa79..96c64785108baa07174ccf1ee650cca67b424503 100644 (file)
@@ -99,26 +99,26 @@ void rtl92ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 
 void rtl92ee_init_sw_leds(struct ieee80211_hw *hw)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
 
-       _rtl92ee_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0);
-       _rtl92ee_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1);
+       _rtl92ee_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+       _rtl92ee_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
 }
 
 static void _rtl92ee_sw_led_control(struct ieee80211_hw *hw,
                                    enum led_ctl_mode ledaction)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-       struct rtl_led *pLed0 = &pcipriv->ledctl.sw_led0;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
 
        switch (ledaction) {
        case LED_CTL_POWER_ON:
        case LED_CTL_LINK:
        case LED_CTL_NO_LINK:
-               rtl92ee_sw_led_on(hw, pLed0);
+               rtl92ee_sw_led_on(hw, pled0);
                break;
        case LED_CTL_POWER_OFF:
-               rtl92ee_sw_led_off(hw, pLed0);
+               rtl92ee_sw_led_off(hw, pled0);
                break;
        default:
                break;
index 5ad7e753c357f8671a9b5f518f1661205c57e898..8b072ee8e0d579258ad7659eca759fce60f9eb67 100644 (file)
@@ -170,7 +170,7 @@ static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw,
        offset &= 0xff;
        newoffset = offset;
        if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
+               pr_err("return all one\n");
                return 0xFFFFFFFF;
        }
        tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
@@ -214,7 +214,7 @@ static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw,
        struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
 
        if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
+               pr_err("stop\n");
                return;
        }
        offset &= 0xff;
@@ -650,7 +650,7 @@ static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw)
 
        rtstatus = phy_config_bb_with_hdr_file(hw, BASEBAND_CONFIG_PHY_REG);
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
+               pr_err("Write BB Reg Fail!!\n");
                return false;
        }
 
@@ -662,12 +662,12 @@ static bool _rtl92ee_phy_bb8192ee_config_parafile(struct ieee80211_hw *hw)
        }
        _rtl92ee_phy_txpower_by_rate_configuration(hw);
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
+               pr_err("BB_PG Reg Fail!!\n");
                return false;
        }
        rtstatus = phy_config_bb_with_hdr_file(hw, BASEBAND_CONFIG_AGC_TAB);
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+               pr_err("AGC Table Fail\n");
                return false;
        }
        rtlphy->cck_high_power = (bool)(rtl_get_bbreg(hw,
@@ -1176,7 +1176,7 @@ static u8 _rtl92ee_phy_get_ratesection_intxpower_byrate(enum radio_path path,
                rate_section = 7;
                break;
        default:
-               RT_ASSERT(true, "Rate_Section is Illegal\n");
+               WARN_ONCE(true, "rtl8192ee: Rate_Section is Illegal\n");
                break;
        }
        return rate_section;
@@ -1239,7 +1239,7 @@ static u8 _rtl92ee_get_txpower_by_rate(struct ieee80211_hw *hw,
                shift = 24;
                break;
        default:
-               RT_ASSERT(true, "Rate_Section is Illegal\n");
+               WARN_ONCE(true, "rtl8192ee: Rate_Section is Illegal\n");
                break;
        }
 
@@ -1675,8 +1675,7 @@ void rtl92ee_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
                                                      (u8 *)&iotype);
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Unknown Scan Backup operation.\n");
+                       pr_err("Unknown Scan Backup operation.\n");
                        break;
                }
        }
@@ -1717,8 +1716,8 @@ void rtl92ee_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
                rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
        }
 
@@ -1742,8 +1741,8 @@ void rtl92ee_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
                               HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
        }
        rtl92ee_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
@@ -1811,8 +1810,8 @@ u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw)
                return 0;
        if (rtlphy->set_bwmode_inprogress)
                return 0;
-       RT_ASSERT((rtlphy->current_channel <= 14),
-                 "WIRELESS_MODE_G but channel>14");
+       WARN_ONCE((rtlphy->current_channel > 14),
+                 "rtl8192ee: WIRELESS_MODE_G but channel>14");
        rtlphy->sw_chnl_inprogress = true;
        rtlphy->sw_chnl_stage = 0;
        rtlphy->sw_chnl_step = 0;
@@ -1860,8 +1859,8 @@ static bool _rtl92ee_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
 
        rfdependcmdcnt = 0;
 
-       RT_ASSERT((channel >= 1 && channel <= 14),
-                 "illegal channel for Zebra: %d\n", channel);
+       WARN_ONCE((channel < 1 || channel > 14),
+                 "rtl8192ee: illegal channel for Zebra: %d\n", channel);
 
        _rtl92ee_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
                                          MAX_RFDEPENDCMD_CNT,
@@ -1884,8 +1883,8 @@ static bool _rtl92ee_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
                        currentcmd = &postcommoncmd[*step];
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Invalid 'stage' = %d, Check it!\n" , *stage);
+                       pr_err("Invalid 'stage' = %d, Check it!\n",
+                              *stage);
                        return true;
                }
 
@@ -1948,7 +1947,7 @@ static bool _rtl92ee_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
        struct swchnlcmd *pcmd;
 
        if (cmdtable == NULL) {
-               RT_ASSERT(false, "cmdtable cannot be NULL.\n");
+               WARN_ONCE(true, "rtl8192ee: cmdtable cannot be NULL.\n");
                return false;
        }
 
index 73716c07d43313e92c0a1717c1b0ac222a8e4369..bc76a91da762dee80cd72dc10435f03a7b9a74ae 100644 (file)
@@ -55,8 +55,7 @@ void rtl92ee_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
                              rtlphy->rfreg_chnlval[0]);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", bandwidth);
+               pr_err("unknown bandwidth: %#X\n", bandwidth);
                break;
        }
 }
index 46b605de36e722301bf1decb7602ec11c3314739..48820bc497d805716a6c0f018feaf8fad97d8917 100644 (file)
@@ -133,8 +133,6 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
                                     0);
        rtlpci->irq_mask[1] = (u32)(IMR_RXFOVW | 0);
 
-       /* for debug level */
-       rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
        /* for LPS & IPS */
        rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -165,8 +163,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
        /* for firmware buf */
        rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
        if (!rtlpriv->rtlhal.pfirmware) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't alloc buffer for fw\n");
+               pr_err("Can't alloc buffer for fw\n");
                return 1;
        }
 
@@ -179,8 +176,7 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
                                      rtlpriv->io.dev, GFP_KERNEL, hw,
                                      rtl_fw_cb);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Failed to request firmware!\n");
+               pr_err("Failed to request firmware!\n");
                return 1;
        }
 
@@ -252,6 +248,7 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = {
        .fill_h2c_cmd = rtl92ee_fill_h2c_cmd,
        .get_btc_status = rtl92ee_get_btc_status,
        .rx_command_packet = rtl92ee_rx_command_packet,
+       .c2h_content_parsing = rtl92ee_c2h_content_parsing,
 };
 
 static struct rtl_mod_params rtl92ee_mod_params = {
@@ -260,7 +257,8 @@ static struct rtl_mod_params rtl92ee_mod_params = {
        .swctrl_lps = false,
        .fwctrl_lps = true,
        .msi_support = true,
-       .debug = DBG_EMERG,
+       .debug_level = 0,
+       .debug_mask = 0,
 };
 
 static const struct rtl_hal_cfg rtl92ee_hal_cfg = {
@@ -370,7 +368,8 @@ MODULE_DESCRIPTION("Realtek 8192EE 802.11n PCI wireless");
 MODULE_FIRMWARE("rtlwifi/rtl8192eefw.bin");
 
 module_param_named(swenc, rtl92ee_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl92ee_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl92ee_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl92ee_mod_params.debug_mask, ullong, 0644);
 module_param_named(ips, rtl92ee_mod_params.inactiveps, bool, 0444);
 module_param_named(swlps, rtl92ee_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl92ee_mod_params.fwctrl_lps, bool, 0444);
@@ -382,7 +381,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
 MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
 MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
 MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
 MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
index 2d48ccd02ac85da1b056fe7199656f3f6f71071f..07440e9a8ca26ae0b259cf46760c188cc7422cc5 100644 (file)
@@ -991,8 +991,9 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
                        SET_RX_DESC_EOR(pdesc, 1);
                        break;
                default:
-                       RT_ASSERT(false,
-                                 "ERR rxdesc :%d not process\n", desc_name);
+                       WARN_ONCE(true,
+                                 "rtl8192ee: ERR rxdesc :%d not processed\n",
+                                 desc_name);
                        break;
                }
        }
@@ -1011,8 +1012,9 @@ u32 rtl92ee_get_desc(u8 *pdesc, bool istx, u8 desc_name)
                        ret = GET_TXBUFFER_DESC_ADDR_LOW(pdesc, 1);
                        break;
                default:
-                       RT_ASSERT(false,
-                                 "ERR txdesc :%d not process\n", desc_name);
+                       WARN_ONCE(true,
+                                 "rtl8192ee: ERR txdesc :%d not processed\n",
+                                 desc_name);
                        break;
                }
        } else {
@@ -1027,8 +1029,9 @@ u32 rtl92ee_get_desc(u8 *pdesc, bool istx, u8 desc_name)
                        ret = GET_RX_DESC_BUFF_ADDR(pdesc);
                        break;
                default:
-                       RT_ASSERT(false,
-                                 "ERR rxdesc :%d not process\n", desc_name);
+                       WARN_ONCE(true,
+                                 "rtl8192ee: ERR rxdesc :%d not processed\n",
+                                 desc_name);
                        break;
                }
        }
index 32f9207b5cf5d7eb45180a5fb51321c4b70c501b..1922e78ad6bdd7a7cd7390907db7ebb48d70b11e 100644 (file)
@@ -113,8 +113,7 @@ static u8 _rtl92s_firmware_header_map_rftype(struct ieee80211_hw *hw)
        case RF_2T2R:
                return 0x22;
        default:
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown RF type(%x)\n",
-                        rtlphy->rf_type);
+               pr_err("Unknown RF type(%x)\n", rtlphy->rf_type);
                break;
        }
        return 0x22;
@@ -168,9 +167,7 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
        _rtl92s_fw_set_rqpn(hw);
 
        if (buffer_len >= MAX_FIRMWARE_CODE_SIZE) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Size over FIRMWARE_CODE_SIZE!\n");
-
+               pr_err("Size over FIRMWARE_CODE_SIZE!\n");
                return false;
        }
 
@@ -239,9 +236,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
                } while (pollingcnt--);
 
                if (!(cpustatus & IMEM_CHK_RPT) || (pollingcnt <= 0)) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "FW_STATUS_LOAD_IMEM FAIL CPU, Status=%x\n",
-                                cpustatus);
+                       pr_err("FW_STATUS_LOAD_IMEM FAIL CPU, Status=%x\n",
+                              cpustatus);
                        goto status_check_fail;
                }
                break;
@@ -257,17 +253,15 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
                } while (pollingcnt--);
 
                if (!(cpustatus & EMEM_CHK_RPT) || (pollingcnt <= 0)) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "FW_STATUS_LOAD_EMEM FAIL CPU, Status=%x\n",
-                                cpustatus);
+                       pr_err("FW_STATUS_LOAD_EMEM FAIL CPU, Status=%x\n",
+                              cpustatus);
                        goto status_check_fail;
                }
 
                /* Turn On CPU */
                rtstatus = _rtl92s_firmware_enable_cpu(hw);
                if (!rtstatus) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Enable CPU fail!\n");
+                       pr_err("Enable CPU fail!\n");
                        goto status_check_fail;
                }
                break;
@@ -282,9 +276,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
                } while (pollingcnt--);
 
                if (!(cpustatus & DMEM_CODE_DONE) || (pollingcnt <= 0)) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Polling DMEM code done fail ! cpustatus(%#x)\n",
-                                cpustatus);
+                       pr_err("Polling DMEM code done fail ! cpustatus(%#x)\n",
+                              cpustatus);
                        goto status_check_fail;
                }
 
@@ -308,9 +301,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
 
                if (((cpustatus & LOAD_FW_READY) != LOAD_FW_READY) ||
                    (pollingcnt <= 0)) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Polling Load Firmware ready fail ! cpustatus(%x)\n",
-                                cpustatus);
+                       pr_err("Polling Load Firmware ready fail ! cpustatus(%x)\n",
+                              cpustatus);
                        goto status_check_fail;
                }
 
@@ -331,8 +323,7 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
                break;
 
        default:
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
-                        "Unknown status check!\n");
+               pr_err("Unknown status check!\n");
                rtstatus = false;
                break;
        }
@@ -380,8 +371,7 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
        /* 2. Retrieve IMEM image. */
        if ((pfwheader->img_imem_size == 0) || (pfwheader->img_imem_size >
            sizeof(firmware->fw_imem))) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "memory for data image is less than IMEM required\n");
+               pr_err("memory for data image is less than IMEM required\n");
                goto fail;
        } else {
                puc_mappedfile += fwhdr_size;
@@ -393,8 +383,7 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
 
        /* 3. Retriecve EMEM image. */
        if (pfwheader->img_sram_size > sizeof(firmware->fw_emem)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "memory for data image is less than EMEM required\n");
+               pr_err("memory for data image is less than EMEM required\n");
                goto fail;
        } else {
                puc_mappedfile += firmware->fw_imem_len;
@@ -428,8 +417,7 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
                                        RT_8192S_FIRMWARE_HDR_EXCLUDE_PRI_SIZE;
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Unexpected Download step!!\n");
+                       pr_err("Unexpected Download step!!\n");
                        goto fail;
                }
 
@@ -438,14 +426,14 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
                                ul_filelength);
 
                if (!rtstatus) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "fail!\n");
+                       pr_err("fail!\n");
                        goto fail;
                }
 
                /* <3> Check whether load FW process is ready */
                rtstatus = _rtl92s_firmware_checkready(hw, fwstatus);
                if (!rtstatus) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "fail!\n");
+                       pr_err("rtl8192se: firmware fail!\n");
                        goto fail;
                }
 
index 26e06b2837c3a97d7d74e2b84aee047d820766de..ba1bd782238b62273432e2aeeb83ea61d212d2d0 100644 (file)
@@ -75,11 +75,9 @@ void rtl92se_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                }
        case HAL_DEF_WOWLAN:
                break;
-       default: {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", variable);
-                       break;
-               }
+       default:
+               pr_err("switch case %#x not processed\n", variable);
+               break;
        }
 }
 
@@ -294,9 +292,8 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                                        acm_ctrl &= (~AcmHw_VoqEn);
                                        break;
                                default:
-                                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                                "switch case %#x not processed\n",
-                                                e_aci);
+                                       pr_err("switch case %#x not processed\n",
+                                              e_aci);
                                        break;
                                }
                        }
@@ -431,8 +428,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
                }
                break; }
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", variable);
+               pr_err("switch case %#x not processed\n", variable);
                break;
        }
 
@@ -745,9 +741,8 @@ static void _rtl92se_macconfig_before_fwdownload(struct ieee80211_hw *hw)
        } while (pollingcnt--);
 
        if (pollingcnt <= 0) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Polling TXDMA_INIT_VALUE timeout!! Current TCR(%#x)\n",
-                        tmpu1b);
+               pr_err("Polling TXDMA_INIT_VALUE timeout!! Current TCR(%#x)\n",
+                      tmpu1b);
                tmpu1b = rtl_read_byte(rtlpriv, CMDR);
                rtl_write_byte(rtlpriv, CMDR, tmpu1b & (~TXDMA_EN));
                udelay(2);
@@ -758,13 +753,12 @@ static void _rtl92se_macconfig_before_fwdownload(struct ieee80211_hw *hw)
        /* After MACIO reset,we must refresh LED state. */
        if ((ppsc->rfoff_reason == RF_CHANGE_BY_IPS) ||
           (ppsc->rfoff_reason == 0)) {
-               struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-               struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+               struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
                enum rf_pwrstate rfpwr_state_toset;
                rfpwr_state_toset = _rtl92se_rf_onoff_detect(hw);
 
                if (rfpwr_state_toset == ERFON)
-                       rtl92se_sw_led_on(hw, pLed0);
+                       rtl92se_sw_led_on(hw, pled0);
        }
 }
 
@@ -1004,7 +998,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
 
        /* 3. Initialize MAC/PHY Config by MACPHY_reg.txt */
        if (!rtl92s_phy_mac_config(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "MAC Config failed\n");
+               pr_err("MAC Config failed\n");
                err = rtstatus;
                goto exit;
        }
@@ -1024,7 +1018,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
 
        /* 4. Initialize BB After MAC Config PHY_reg.txt, AGC_Tab.txt */
        if (!rtl92s_phy_bb_config(hw)) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "BB Config failed\n");
+               pr_err("BB Config failed\n");
                err = rtstatus;
                goto exit;
        }
@@ -1194,8 +1188,7 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
                         "Set Network type to AP!\n");
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Network type %d not supported!\n", type);
+               pr_err("Network type %d not supported!\n", type);
                return 1;
 
        }
@@ -1251,7 +1244,7 @@ void rtl92se_set_qos(struct ieee80211_hw *hw, int aci)
                rtl_write_dword(rtlpriv, EDCAPARA_VO, 0x2f3222);
                break;
        default:
-               RT_ASSERT(false, "invalid aci: %d !\n", aci);
+               WARN_ONCE(true, "rtl8192se: invalid aci: %d !\n", aci);
                break;
        }
 }
@@ -1401,16 +1394,15 @@ static void _rtl92se_gen_refreshledstate(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-       struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
 
        if (rtlpci->up_first_time == 1)
                return;
 
        if (rtlpriv->psc.rfoff_reason == RF_CHANGE_BY_IPS)
-               rtl92se_sw_led_on(hw, pLed0);
+               rtl92se_sw_led_on(hw, pled0);
        else
-               rtl92se_sw_led_off(hw, pLed0);
+               rtl92se_sw_led_off(hw, pled0);
 }
 
 
@@ -1685,8 +1677,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
                break;
 
        case EEPROM_93C46:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "RTL819X Not boot from eeprom, check it !!\n");
+               pr_err("RTL819X Not boot from eeprom, check it !!\n");
                return;
 
        default:
@@ -2030,7 +2021,7 @@ void rtl92se_read_eeprom_info(struct ieee80211_hw *hw)
                rtlefuse->autoload_failflag = false;
                _rtl92se_read_adapter_info(hw);
        } else {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+               pr_err("Autoload ERR!!\n");
                rtlefuse->autoload_failflag = true;
        }
 }
@@ -2463,8 +2454,8 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
                        enc_algo = CAM_AES;
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "switch case %#x not processed\n", enc_algo);
+                       pr_err("switch case %#x not processed\n",
+                              enc_algo);
                        enc_algo = CAM_TKIP;
                        break;
                }
@@ -2481,9 +2472,7 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
                                        entry_id = rtl_cam_get_free_entry(hw,
                                                                 p_macaddr);
                                        if (entry_id >=  TOTAL_CAM_ENTRY) {
-                                               RT_TRACE(rtlpriv,
-                                                        COMP_SEC, DBG_EMERG,
-                                                        "Can not find free hw security cam entry\n");
+                                               pr_err("Can not find free hw security cam entry\n");
                                                return;
                                        }
                                } else {
index 870007801f6b41eb9caf4767191ed7980b6c546b..33c307aca9111ad1b243db894b3bd23e8fc123bf 100644 (file)
@@ -38,9 +38,10 @@ static void _rtl92se_init_led(struct ieee80211_hw *hw,
 
 void rtl92se_init_sw_leds(struct ieee80211_hw *hw)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-       _rtl92se_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
-       _rtl92se_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       _rtl92se_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+       _rtl92se_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
 }
 
 void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
@@ -63,8 +64,8 @@ void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
                rtl_write_byte(rtlpriv, LEDCFG, ledcfg & 0x0f);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", pled->ledpin);
+               pr_err("switch case %#x not processed\n",
+                      pled->ledpin);
                break;
        }
        pled->ledon = true;
@@ -73,7 +74,6 @@ void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
 void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 {
        struct rtl_priv *rtlpriv;
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        u8 ledcfg;
 
        rtlpriv = rtl_priv(hw);
@@ -89,7 +89,7 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                break;
        case LED_PIN_LED0:
                ledcfg &= 0xf0;
-               if (pcipriv->ledctl.led_opendrain)
+               if (rtlpriv->ledctl.led_opendrain)
                        rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(1)));
                else
                        rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(3)));
@@ -99,8 +99,8 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                rtl_write_byte(rtlpriv, LEDCFG, (ledcfg | BIT(3)));
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", pled->ledpin);
+               pr_err("switch case %#x not processed\n",
+                      pled->ledpin);
                break;
        }
        pled->ledon = false;
@@ -109,16 +109,17 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 static void _rtl92se_sw_led_control(struct ieee80211_hw *hw,
                                    enum led_ctl_mode ledaction)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-       struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
+
        switch (ledaction) {
        case LED_CTL_POWER_ON:
        case LED_CTL_LINK:
        case LED_CTL_NO_LINK:
-               rtl92se_sw_led_on(hw, pLed0);
+               rtl92se_sw_led_on(hw, pled0);
                break;
        case LED_CTL_POWER_OFF:
-               rtl92se_sw_led_off(hw, pLed0);
+               rtl92se_sw_led_off(hw, pled0);
                break;
        default:
                break;
index fcb9216af82d1731863a604d0bee03ade5be6720..86cb853f7169b2085407cd8e2310ded7f205ff4d 100644 (file)
@@ -235,7 +235,6 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
 void rtl92s_phy_scan_operation_backup(struct ieee80211_hw *hw,
                                      u8 operation)
 {
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
        if (!is_hal_stop(rtlhal)) {
@@ -247,8 +246,7 @@ void rtl92s_phy_scan_operation_backup(struct ieee80211_hw *hw,
                        rtl92s_phy_set_fw_cmd(hw, FW_CMD_RESUME_DM_BY_SCAN);
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Unknown operation\n");
+                       pr_err("Unknown operation\n");
                        break;
                }
        }
@@ -288,8 +286,8 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw,
                rtl_write_byte(rtlpriv, BW_OPMODE, reg_bw_opmode);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
        }
 
@@ -313,8 +311,8 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw,
                        rtl_write_byte(rtlpriv, RFPGA0_ANALOGPARAMETER2, 0x18);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
        }
 
@@ -330,7 +328,7 @@ static bool _rtl92s_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
        struct swchnlcmd *pcmd;
 
        if (cmdtable == NULL) {
-               RT_ASSERT(false, "cmdtable cannot be NULL\n");
+               WARN_ONCE(true, "rtl8192se: cmdtable cannot be NULL\n");
                return false;
        }
 
@@ -374,8 +372,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
 
        rfdependcmdcnt = 0;
 
-       RT_ASSERT((channel >= 1 && channel <= 14),
-                 "invalid channel for Zebra: %d\n", channel);
+       WARN_ONCE((channel < 1 || channel > 14),
+                 "rtl8192se: invalid channel for Zebra: %d\n", channel);
 
        _rtl92s_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
                                         MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
@@ -437,9 +435,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
                        }
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "switch case %#x not processed\n",
-                                currentcmd->cmdid);
+                       pr_err("switch case %#x not processed\n",
+                              currentcmd->cmdid);
                        break;
                }
 
@@ -644,8 +641,8 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
                        _rtl92se_phy_set_rf_sleep(hw);
                        break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", rfpwr_state);
+               pr_err("switch case %#x not processed\n",
+                      rfpwr_state);
                bresult = false;
                break;
        }
@@ -937,8 +934,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw)
        }
 
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
-                        "Write BB Reg Fail!!\n");
+               pr_err("Write BB Reg Fail!!\n");
                goto phy_BB8190_Config_ParaFile_Fail;
        }
 
@@ -951,8 +947,7 @@ static bool _rtl92s_phy_bb_config_parafile(struct ieee80211_hw *hw)
                                                 BASEBAND_CONFIG_PHY_REG);
        }
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
-                        "_rtl92s_phy_bb_config_parafile(): BB_PG Reg Fail!!\n");
+               pr_err("_rtl92s_phy_bb_config_parafile(): BB_PG Reg Fail!!\n");
                goto phy_BB8190_Config_ParaFile_Fail;
        }
 
@@ -1077,12 +1072,10 @@ bool rtl92s_phy_bb_config(struct ieee80211_hw *hw)
            (rtlphy->rf_type == RF_1T2R && rf_num != 2) ||
            (rtlphy->rf_type == RF_2T2R && rf_num != 2) ||
            (rtlphy->rf_type == RF_2T2R_GREEN && rf_num != 2)) {
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
-                        "RF_Type(%x) does not match RF_Num(%x)!!\n",
-                        rtlphy->rf_type, rf_num);
-               RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG,
-                        "path1 0x%x, path2 0x%x, pathmap 0x%x\n",
-                        path1, path2, pathmap);
+               pr_err("RF_Type(%x) does not match RF_Num(%x)!!\n",
+                      rtlphy->rf_type, rf_num);
+               pr_err("path1 0x%x, path2 0x%x, pathmap 0x%x\n",
+                      path1, path2, pathmap);
        }
 
        return rtstatus;
@@ -1221,7 +1214,7 @@ void rtl92s_phy_chk_fwcmd_iodone(struct ieee80211_hw *hw)
        } while (--pollingcnt);
 
        if (pollingcnt == 0)
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Set FW Cmd fail!!\n");
+               pr_err("Set FW Cmd fail!!\n");
 }
 
 
index bd2fa7735866486b280d1c8f8e8f0274b21b98f0..ea5b8ec45ec9b17e85039bfa52bd16f26f42b4fd 100644 (file)
@@ -523,8 +523,7 @@ void rtl92s_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
                                        rtlphy->rfreg_chnlval[0]);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", bandwidth);
+               pr_err("unknown bandwidth: %#X\n", bandwidth);
                break;
        }
 }
index 998cefbd7e89104b7797d5bf419fe567dea35ff2..2006b09ea74ffac4888e2700f72329e8630d48a7 100644 (file)
@@ -96,8 +96,7 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
                return;
        }
        if (firmware->size > rtlpriv->max_fw_size) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Firmware is too big!\n");
+               pr_err("Firmware is too big!\n");
                rtlpriv->max_fw_size = 0;
                release_firmware(firmware);
                return;
@@ -179,8 +178,6 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
 
        rtlpci->first_init = true;
 
-       /* for debug level */
-       rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
        /* for LPS & IPS */
        rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -218,8 +215,7 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
                                      rtlpriv->io.dev, GFP_KERNEL, hw,
                                      rtl92se_fw_cb);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Failed to request firmware!\n");
+               pr_err("Failed to request firmware!\n");
                return 1;
        }
 
@@ -299,7 +295,8 @@ static struct rtl_mod_params rtl92se_mod_params = {
        .inactiveps = true,
        .swctrl_lps = true,
        .fwctrl_lps = false,
-       .debug = DBG_EMERG,
+       .debug_level = 0,
+       .debug_mask = 0,
 };
 
 /* Because memory R/W bursting will cause system hang/crash
@@ -418,7 +415,8 @@ MODULE_DESCRIPTION("Realtek 8192S/8191S 802.11n PCI wireless");
 MODULE_FIRMWARE("rtlwifi/rtl8192sefw.bin");
 
 module_param_named(swenc, rtl92se_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl92se_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl92se_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl92se_mod_params.debug_mask, ullong, 0644);
 module_param_named(ips, rtl92se_mod_params.inactiveps, bool, 0444);
 module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444);
@@ -426,7 +424,8 @@ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
 MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
 MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
 MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
 
index 9a5a113992211d5641af7cae2d4d1412b06f9319..12cef01e593bb2f2550db876817b98c2107f2467 100644 (file)
@@ -583,7 +583,7 @@ void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
                        SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
                        break;
                default:
-                       RT_ASSERT(false, "ERR txdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8192se: ERR txdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
@@ -603,7 +603,7 @@ void rtl92se_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
                        SET_RX_STATUS_DESC_EOR(pdesc, 1);
                        break;
                default:
-                       RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8192se: ERR rxdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
@@ -623,7 +623,7 @@ u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name)
                        ret = GET_TX_DESC_TX_BUFFER_ADDRESS(desc);
                        break;
                default:
-                       RT_ASSERT(false, "ERR txdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8192se: ERR txdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
@@ -639,7 +639,7 @@ u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name)
                        ret = GET_RX_STATUS_DESC_BUFF_ADDR(desc);
                        break;
                default:
-                       RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8192se: ERR rxdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
index e5505387260b49269eb9b3b605ca5b61eac3ccaf..a954a87b0ed9bf9f14f937a6e4b8d98e3836306b 100644 (file)
@@ -99,8 +99,7 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
        while (!bwrite_sucess) {
                wait_writeh2c_limmit--;
                if (wait_writeh2c_limmit == 0) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Write H2C fail because no trigger for FW INT!\n");
+                       pr_err("Write H2C fail because no trigger for FW INT!\n");
                        break;
                }
 
@@ -123,8 +122,8 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
                        box_extreg = REG_HMEBOX_EXT_3;
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "switch case %#x not processed\n", boxnum);
+                       pr_err("switch case %#x not processed\n",
+                              boxnum);
                        break;
                }
 
@@ -229,8 +228,8 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
                        }
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "switch case %#x not processed\n", cmd_len);
+                       pr_err("switch case %#x not processed\n",
+                              cmd_len);
                        break;
                }
 
@@ -259,8 +258,8 @@ void rtl8723e_fill_h2c_cmd(struct ieee80211_hw *hw,
        u32 tmp_cmdbuf[2];
 
        if (!rtlhal->fw_ready) {
-               RT_ASSERT(false,
-                         "return H2C cmd because of Fw download fail!!!\n");
+               WARN_ONCE(true,
+                         "rtl8723ae: error H2C cmd because of Fw download fail!!!\n");
                return;
        }
        memset(tmp_cmdbuf, 0, 8);
index f8be0bd7e3269c128f226a53fdc4535256faf85e..859c045bd37c48b4c77b755126d403ace7d56b81 100644 (file)
@@ -570,9 +570,8 @@ static bool _rtl8723e_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
                        break;
 
                if (count > POLLING_LLT_THRESHOLD) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Failed to polling write LLT done at address %d!\n",
-                                address);
+                       pr_err("Failed to polling write LLT done at address %d!\n",
+                              address);
                        status = false;
                        break;
                }
@@ -665,9 +664,8 @@ static bool _rtl8723e_llt_table_init(struct ieee80211_hw *hw)
 static void _rtl8723e_gen_refresh_led_state(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       struct rtl_led *pled0 = &pcipriv->ledctl.sw_led0;
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
 
        if (rtlpriv->rtlhal.up_first_time)
                return;
@@ -961,7 +959,7 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw)
        rtlpriv->intf_ops->disable_aspm(hw);
        rtstatus = _rtl8712e_init_mac(hw);
        if (rtstatus != true) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+               pr_err("Init MAC failed\n");
                err = 1;
                goto exit;
        }
@@ -1107,8 +1105,7 @@ static enum version_8723e _rtl8723e_read_chip_version(struct ieee80211_hw *hw)
                         "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT.\n");
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Chip Version ID: Unknown. Bug?\n");
+               pr_err("Chip Version ID: Unknown. Bug?\n");
                break;
        }
 
@@ -1157,8 +1154,7 @@ static int _rtl8723e_set_media_status(struct ieee80211_hw *hw,
                        "Set Network type to AP!\n");
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                       "Network type %d not support!\n", type);
+               pr_err("Network type %d not support!\n", type);
                return 1;
                break;
        }
@@ -1256,7 +1252,7 @@ void rtl8723e_set_qos(struct ieee80211_hw *hw, int aci)
                rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
                break;
        default:
-               RT_ASSERT(false, "invalid aci: %d !\n", aci);
+               WARN_ONCE(true, "rtl8723ae: invalid aci: %d !\n", aci);
                break;
        }
 }
@@ -1793,13 +1789,12 @@ exit:
 static void _rtl8723e_hal_customized_behavior(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
-       pcipriv->ledctl.led_opendrain = true;
+       rtlpriv->ledctl.led_opendrain = true;
        switch (rtlhal->oem_id) {
        case RT_CID_819X_HP:
-               pcipriv->ledctl.led_opendrain = true;
+               rtlpriv->ledctl.led_opendrain = true;
                break;
        case RT_CID_819X_LENOVO:
        case RT_CID_DEFAULT:
@@ -1852,7 +1847,7 @@ void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw)
        } else {
                rtlefuse->autoload_failflag = true;
                _rtl8723e_read_adapter_info(hw, false);
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+               pr_err("Autoload ERR!!\n");
        }
        _rtl8723e_hal_customized_behavior(hw);
 }
@@ -2245,9 +2240,7 @@ void rtl8723e_set_key(struct ieee80211_hw *hw, u32 key_index,
                                        entry_id =
                                          rtl_cam_get_free_entry(hw, p_macaddr);
                                        if (entry_id >=  TOTAL_CAM_ENTRY) {
-                                               RT_TRACE(rtlpriv, COMP_SEC,
-                                                        DBG_EMERG,
-                                                        "Can not find free hw security cam entry\n");
+                                               pr_err("Can not find free hw security cam entry\n");
                                                return;
                                        }
                                } else {
index 77c10047cb2070a7b7630394c6f48c32775982c7..d567b0df0e9f68f9954ba9728a15b256f88c1fdb 100644 (file)
@@ -58,8 +58,8 @@ void rtl8723e_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
                rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", pled->ledpin);
+               pr_err("switch case %#x not processed\n",
+                      pled->ledpin);
                break;
        }
        pled->ledon = true;
@@ -68,7 +68,6 @@ void rtl8723e_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
 void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        u8 ledcfg;
 
        RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
@@ -81,7 +80,7 @@ void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                break;
        case LED_PIN_LED0:
                ledcfg &= 0xf0;
-               if (pcipriv->ledctl.led_opendrain) {
+               if (rtlpriv->ledctl.led_opendrain) {
                        ledcfg &= 0x90; /* Set to software control. */
                        rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3)));
                        ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
@@ -100,8 +99,8 @@ void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", pled->ledpin);
+               pr_err("switch case %#x not processed\n",
+                      pled->ledpin);
                break;
        }
        pled->ledon = false;
@@ -109,24 +108,26 @@ void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 
 void rtl8723e_init_sw_leds(struct ieee80211_hw *hw)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-       _rtl8723e_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0);
-       _rtl8723e_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       _rtl8723e_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+       _rtl8723e_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
 }
 
 static void _rtl8723e_sw_led_control(struct ieee80211_hw *hw,
                                     enum led_ctl_mode ledaction)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-       struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
+
        switch (ledaction) {
        case LED_CTL_POWER_ON:
        case LED_CTL_LINK:
        case LED_CTL_NO_LINK:
-               rtl8723e_sw_led_on(hw, pLed0);
+               rtl8723e_sw_led_on(hw, pled0);
                break;
        case LED_CTL_POWER_OFF:
-               rtl8723e_sw_led_off(hw, pLed0);
+               rtl8723e_sw_led_off(hw, pled0);
                break;
        default:
                break;
index 17b58cb32d55146f8d25e23c0a9faa0df32ec082..5cf29f5a4b5470b86e64f54ffc4bbae4256d7260 100644 (file)
@@ -133,7 +133,7 @@ static void _rtl8723e_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
                                             enum radio_path rfpath, u32 offset,
                                             u32 data)
 {
-       RT_ASSERT(false, "deprecated!\n");
+       WARN_ONCE(true, "rtl8723ae: _rtl8723e_phy_fw_rf_serial_write deprecated!\n");
 }
 
 static void _rtl8723e_phy_bb_config_1t(struct ieee80211_hw *hw)
@@ -213,7 +213,7 @@ static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
        rtstatus = _rtl8723e_phy_config_bb_with_headerfile(hw,
                                                BASEBAND_CONFIG_PHY_REG);
        if (rtstatus != true) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
+               pr_err("Write BB Reg Fail!!\n");
                return false;
        }
 
@@ -227,13 +227,13 @@ static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
                                        BASEBAND_CONFIG_PHY_REG);
        }
        if (rtstatus != true) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
+               pr_err("BB_PG Reg Fail!!\n");
                return false;
        }
        rtstatus =
          _rtl8723e_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB);
        if (rtstatus != true) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+               pr_err("AGC Table Fail\n");
                return false;
        }
        rtlphy->cck_high_power = (bool) (rtl_get_bbreg(hw,
@@ -749,8 +749,7 @@ void rtl8723e_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
                                                      (u8 *)&iotype);
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Unknown Scan Backup operation.\n");
+                       pr_err("Unknown Scan Backup operation.\n");
                        break;
                }
        }
@@ -791,8 +790,8 @@ void rtl8723e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
                rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
        }
 
@@ -816,8 +815,8 @@ void rtl8723e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
                               HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
        }
        rtl8723e_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
@@ -885,8 +884,8 @@ u8 rtl8723e_phy_sw_chnl(struct ieee80211_hw *hw)
                return 0;
        if (rtlphy->set_bwmode_inprogress)
                return 0;
-       RT_ASSERT((rtlphy->current_channel <= 14),
-                 "WIRELESS_MODE_G but channel>14");
+       WARN_ONCE((rtlphy->current_channel > 14),
+                 "rtl8723ae: WIRELESS_MODE_G but channel>14");
        rtlphy->sw_chnl_inprogress = true;
        rtlphy->sw_chnl_stage = 0;
        rtlphy->sw_chnl_step = 0;
@@ -954,8 +953,8 @@ static bool _rtl8723e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
 
        rfdependcmdcnt = 0;
 
-       RT_ASSERT((channel >= 1 && channel <= 14),
-                 "illegal channel for Zebra: %d\n", channel);
+       WARN_ONCE((channel < 1 || channel > 14),
+                 "rtl8723ae: illegal channel for Zebra: %d\n", channel);
 
        rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
                                         MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
@@ -977,8 +976,8 @@ static bool _rtl8723e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
                        currentcmd = &postcommoncmd[*step];
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Invalid 'stage' = %d, Check it!\n", *stage);
+                       pr_err("Invalid 'stage' = %d, Check it!\n",
+                              *stage);
                        return true;
                }
 
index 422771778e031648254a29627f741f97fb5f4483..89958b64b52de18360b2a3df65b91905bbaffd38 100644 (file)
@@ -51,8 +51,7 @@ void rtl8723e_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
                              rtlphy->rfreg_chnlval[0]);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", bandwidth);
+               pr_err("unknown bandwidth: %#X\n", bandwidth);
                break;
        }
 }
index c51a9e8234e92417877537e3ce5d566a120e2bd1..7bf9f255792048b9a03a8b6a6117f77c51bd5535 100644 (file)
@@ -145,8 +145,6 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
                 (u32)(PHIMR_RXFOVW |
                                0);
 
-       /* for debug level */
-       rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
        /* for LPS & IPS */
        rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -172,8 +170,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
        /* for firmware buf */
        rtlpriv->rtlhal.pfirmware = vzalloc(0x6000);
        if (!rtlpriv->rtlhal.pfirmware) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't alloc buffer for fw.\n");
+               pr_err("Can't alloc buffer for fw.\n");
                return 1;
        }
 
@@ -186,8 +183,7 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
                                      rtlpriv->io.dev, GFP_KERNEL, hw,
                                      rtl_fw_cb);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Failed to request firmware!\n");
+               pr_err("Failed to request firmware!\n");
                return 1;
        }
        return 0;
@@ -270,7 +266,8 @@ static struct rtl_mod_params rtl8723e_mod_params = {
        .inactiveps = true,
        .swctrl_lps = false,
        .fwctrl_lps = true,
-       .debug = DBG_EMERG,
+       .debug_level = 0,
+       .debug_mask = 0,
        .msi_support = false,
        .disable_watchdog = false,
 };
@@ -384,7 +381,8 @@ MODULE_DESCRIPTION("Realtek 8723E 802.11n PCI wireless");
 MODULE_FIRMWARE("rtlwifi/rtl8723efw.bin");
 
 module_param_named(swenc, rtl8723e_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl8723e_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl8723e_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl8723e_mod_params.debug_mask, ullong, 0644);
 module_param_named(ips, rtl8723e_mod_params.inactiveps, bool, 0444);
 module_param_named(swlps, rtl8723e_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl8723e_mod_params.fwctrl_lps, bool, 0444);
@@ -396,7 +394,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
 MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
 MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
 MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
 MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
 
 static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
index e93125ebed812af0afab94e40f1fdf3bf1c9a9b2..c9838f52a7ea8a5c30802a4c2824df41b4fe48f1 100644 (file)
@@ -617,7 +617,7 @@ void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
                        SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
                        break;
                default:
-                       RT_ASSERT(false, "ERR txdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8723ae: ERR txdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
@@ -636,7 +636,7 @@ void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
                        SET_RX_DESC_EOR(pdesc, 1);
                        break;
                default:
-                       RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8723ae: ERR rxdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
@@ -656,7 +656,7 @@ u32 rtl8723e_get_desc(u8 *pdesc, bool istx, u8 desc_name)
                        ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
                        break;
                default:
-                       RT_ASSERT(false, "ERR txdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8723ae: ERR txdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
@@ -672,7 +672,7 @@ u32 rtl8723e_get_desc(u8 *pdesc, bool istx, u8 desc_name)
                        ret = GET_RX_DESC_BUFF_ADDR(pdesc);
                        break;
                default:
-                       RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8723ae: ERR rxdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
index 8c5c27ce8e059ddf8c8b31dcdb049e2cbd79fb67..c7ee9ba5e26ea15e4152e2ed4110e946f4d5b78a 100644 (file)
@@ -97,8 +97,7 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
        while (!bwrite_sucess) {
                wait_writeh2c_limmit--;
                if (wait_writeh2c_limmit == 0) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Write H2C fail because no trigger for FW INT!\n");
+                       pr_err("Write H2C fail because no trigger for FW INT!\n");
                        break;
                }
 
@@ -121,8 +120,8 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
                        box_extreg = REG_HMEBOX_EXT_3;
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "switch case %#x not processed\n", boxnum);
+                       pr_err("switch case %#x not processed\n",
+                              boxnum);
                        break;
                }
 
@@ -194,8 +193,8 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
                        }
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "switch case %#x not processed\n", cmd_len);
+                       pr_err("switch case %#x not processed\n",
+                              cmd_len);
                        break;
                }
 
@@ -224,8 +223,8 @@ void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
        u32 tmp_cmdbuf[2];
 
        if (!rtlhal->fw_ready) {
-               RT_ASSERT(false,
-                         "return H2C cmd because of Fw download fail!!!\n");
+               WARN_ONCE(true,
+                         "rtl8723be: error H2C cmd because of Fw download fail!!!\n");
                return;
        }
 
@@ -586,9 +585,9 @@ void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw,
                               (u8 *)p2p_ps_offload);
 }
 
-static void _rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw,
-                                          u8 c2h_cmd_id,
-                                          u8 c2h_cmd_len, u8 *tmp_buf)
+void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw,
+                                  u8 c2h_cmd_id,
+                                  u8 c2h_cmd_len, u8 *tmp_buf)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
@@ -636,5 +635,15 @@ void rtl8723be_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len)
        RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_TRACE,
                      "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len);
 
-       _rtl8723be_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
+       switch (c2h_cmd_id) {
+       case C2H_8723B_BT_INFO:
+       case C2H_8723B_BT_MP:
+               rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
+               break;
+
+       default:
+               rtl8723be_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len,
+                                             tmp_buf);
+               break;
+       }
 }
index 067429669bdad6f3382136a87db11033d9d0eb6e..c652fa1339a760642a8c021c8200ba4777de49ed 100644 (file)
@@ -148,5 +148,6 @@ void rtl8723be_set_fw_media_status_rpt_cmd(struct ieee80211_hw *hw, u8 mstatus);
 void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
 void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
 void rtl8723be_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer, u8 len);
-
+void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id,
+                                  u8 c2h_cmd_len, u8 *tmp_buf);
 #endif
index aba60c3145c5f6f60b5e34cdbc7885d3042ae2f8..1acbfb86472cdab2e19a0e86a423eba7a3af41d9 100644 (file)
@@ -747,9 +747,8 @@ static bool _rtl8723be_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
                        break;
 
                if (count > POLLING_LLT_THRESHOLD) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Failed to polling write LLT done at address %d!\n",
-                                address);
+                       pr_err("Failed to polling write LLT done at address %d!\n",
+                              address);
                        status = false;
                        break;
                }
@@ -810,9 +809,8 @@ static bool _rtl8723be_llt_table_init(struct ieee80211_hw *hw)
 static void _rtl8723be_gen_refresh_led_state(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0);
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
 
        if (rtlpriv->rtlhal.up_first_time)
                return;
@@ -1383,7 +1381,7 @@ int rtl8723be_hw_init(struct ieee80211_hw *hw)
        }
        rtstatus = _rtl8723be_init_mac(hw);
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+               pr_err("Init MAC failed\n");
                err = 1;
                goto exit;
        }
@@ -1532,8 +1530,7 @@ static int _rtl8723be_set_media_status(struct ieee80211_hw *hw,
                         "Set Network type to AP!\n");
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Network type %d not support!\n", type);
+               pr_err("Network type %d not support!\n", type);
                return 1;
        }
 
@@ -1631,7 +1628,7 @@ void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci)
                rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
                break;
        default:
-               RT_ASSERT(false, "invalid aci: %d !\n", aci);
+               WARN_ONCE(true, "rtl8723be: invalid aci: %d !\n", aci);
                break;
        }
 }
@@ -2022,6 +2019,37 @@ static void _rtl8723be_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
                "eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
 }
 
+static u8 _rtl8723be_read_package_type(struct ieee80211_hw *hw)
+{
+       u8 package_type;
+       u8 value;
+
+       efuse_power_switch(hw, false, true);
+       if (!efuse_one_byte_read(hw, 0x1FB, &value))
+               value = 0;
+       efuse_power_switch(hw, false, false);
+
+       switch (value & 0x7) {
+       case 0x4:
+               package_type = PACKAGE_TFBGA79;
+               break;
+       case 0x5:
+               package_type = PACKAGE_TFBGA90;
+               break;
+       case 0x6:
+               package_type = PACKAGE_QFN68;
+               break;
+       case 0x7:
+               package_type = PACKAGE_TFBGA80;
+               break;
+       default:
+               package_type = PACKAGE_DEFAULT;
+               break;
+       }
+
+       return package_type;
+}
+
 static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
                                         bool pseudo_test)
 {
@@ -2080,6 +2108,8 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
                                                 rtlefuse->autoload_failflag,
                                                 hwinfo);
 
+       rtlhal->package_type = _rtl8723be_read_package_type(hw);
+
        /* set channel plan from efuse */
        rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
 
@@ -2197,13 +2227,12 @@ exit:
 static void _rtl8723be_hal_customized_behavior(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
-       pcipriv->ledctl.led_opendrain = true;
+       rtlpriv->ledctl.led_opendrain = true;
        switch (rtlhal->oem_id) {
        case RT_CID_819X_HP:
-               pcipriv->ledctl.led_opendrain = true;
+               rtlpriv->ledctl.led_opendrain = true;
                break;
        case RT_CID_819X_LENOVO:
        case RT_CID_DEFAULT:
@@ -2247,7 +2276,7 @@ void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw)
                rtlefuse->autoload_failflag = false;
                _rtl8723be_read_adapter_info(hw, false);
        } else {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+               pr_err("Autoload ERR!!\n");
        }
        _rtl8723be_hal_customized_behavior(hw);
 }
@@ -2584,9 +2613,7 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
                                        entry_id = rtl_cam_get_free_entry(hw,
                                                                p_macaddr);
                                        if (entry_id >=  TOTAL_CAM_ENTRY) {
-                                               RT_TRACE(rtlpriv, COMP_SEC,
-                                                        DBG_EMERG,
-                                                        "Can not find free hw security cam entry\n");
+                                               pr_err("Can not find free hw security cam entry\n");
                                                return;
                                        }
                                } else {
@@ -2657,16 +2684,23 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
                value = hwinfo[EEPROM_RF_BT_SETTING_8723B];
                rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
                rtlpriv->btcoexist.btc_info.ant_num = (value & 0x1);
+               rtlpriv->btcoexist.btc_info.single_ant_path =
+                        (value & 0x40);        /*0xc3[6]*/
        } else {
                rtlpriv->btcoexist.btc_info.btcoexist = 0;
                rtlpriv->btcoexist.btc_info.bt_type = BT_RTL8723B;
                rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
+               rtlpriv->btcoexist.btc_info.single_ant_path = 0;
        }
 
        /* override ant_num / ant_path */
-       if (mod_params->ant_sel)
+       if (mod_params->ant_sel) {
                rtlpriv->btcoexist.btc_info.ant_num =
                        (mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
+
+               rtlpriv->btcoexist.btc_info.single_ant_path =
+                       (mod_params->ant_sel == 1 ? 0 : 1);
+       }
 }
 
 void rtl8723be_bt_reg_init(struct ieee80211_hw *hw)
index 497913eb3b37bdba08af60820709db4c78c7deef..4f7890d62c21996c0632441453e995bb68420ee6 100644 (file)
@@ -57,8 +57,8 @@ void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
                rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", pled->ledpin);
+               pr_err("switch case %#x not processed\n",
+                      pled->ledpin);
                break;
        }
        pled->ledon = true;
@@ -67,7 +67,6 @@ void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
 void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        u8 ledcfg;
 
        RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
@@ -80,7 +79,7 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                break;
        case LED_PIN_LED0:
                ledcfg &= 0xf0;
-               if (pcipriv->ledctl.led_opendrain) {
+               if (rtlpriv->ledctl.led_opendrain) {
                        ledcfg &= 0x90; /* Set to software control. */
                        rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3)));
                        ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
@@ -99,8 +98,8 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", pled->ledpin);
+               pr_err("switch case %#x not processed\n",
+                      pled->ledpin);
                break;
        }
        pled->ledon = false;
@@ -108,16 +107,18 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 
 void rtl8723be_init_sw_leds(struct ieee80211_hw *hw)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-       _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led0), LED_PIN_LED0);
-       _rtl8723be_init_led(hw, &(pcipriv->ledctl.sw_led1), LED_PIN_LED1);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+       _rtl8723be_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+       _rtl8723be_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
 }
 
 static void _rtl8723be_sw_led_control(struct ieee80211_hw *hw,
                                      enum led_ctl_mode ledaction)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-       struct rtl_led *pled0 = &(pcipriv->ledctl.sw_led0);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
+
        switch (ledaction) {
        case LED_CTL_POWER_ON:
        case LED_CTL_LINK:
index 3cc2232f25caf6293c966a09dc86f1b063345b88..ab0f39e46e1bd0862aeb95456cbd10076d9d3957 100644 (file)
@@ -467,7 +467,7 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw)
        rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw,
                                                BASEBAND_CONFIG_PHY_REG);
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
+               pr_err("Write BB Reg Fail!!\n");
                return false;
        }
        _rtl8723be_phy_init_tx_power_by_rate(hw);
@@ -478,13 +478,13 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw)
        }
        phy_txpower_by_rate_config(hw);
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
+               pr_err("BB_PG Reg Fail!!\n");
                return false;
        }
        rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw,
                                                BASEBAND_CONFIG_AGC_TAB);
        if (!rtstatus) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+               pr_err("AGC Table Fail\n");
                return false;
        }
        rtlphy->cck_high_power = (bool)(rtl_get_bbreg(hw,
@@ -939,7 +939,7 @@ static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path,
                break;
 
        default:
-               RT_ASSERT(true, "Rate_Section is Illegal\n");
+               WARN_ONCE(true, "rtl8723be: Rate_Section is Illegal\n");
                break;
        }
 
@@ -1004,7 +1004,7 @@ static u8 _rtl8723be_get_txpower_by_rate(struct ieee80211_hw *hw,
                shift = 24;
                break;
        default:
-               RT_ASSERT(true, "Rate_Section is Illegal\n");
+               WARN_ONCE(true, "rtl8723be: Rate_Section is Illegal\n");
                break;
        }
        tx_pwr_diff = (u8)(rtlphy->tx_power_by_rate_offset[band][rfpath][tx_num]
@@ -1249,8 +1249,7 @@ void rtl8723be_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
                                                      (u8 *)&iotype);
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Unknown Scan Backup operation.\n");
+                       pr_err("Unknown Scan Backup operation.\n");
                        break;
                }
        }
@@ -1291,8 +1290,8 @@ void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
                rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
        }
 
@@ -1316,8 +1315,8 @@ void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
                               HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
        }
        rtl8723be_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
@@ -1387,8 +1386,8 @@ u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw)
                return 0;
        if (rtlphy->set_bwmode_inprogress)
                return 0;
-       RT_ASSERT((rtlphy->current_channel <= 14),
-                 "WIRELESS_MODE_G but channel>14");
+       WARN_ONCE((rtlphy->current_channel > 14),
+                 "rtl8723be: WIRELESS_MODE_G but channel>14");
        rtlphy->sw_chnl_inprogress = true;
        rtlphy->sw_chnl_stage = 0;
        rtlphy->sw_chnl_step = 0;
@@ -1438,8 +1437,8 @@ static bool _rtl8723be_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
 
        rfdependcmdcnt = 0;
 
-       RT_ASSERT((channel >= 1 && channel <= 14),
-                 "illegal channel for Zebra: %d\n", channel);
+       WARN_ONCE((channel < 1 || channel > 14),
+                 "rtl8723be: illegal channel for Zebra: %d\n", channel);
 
        rtl8723_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
                                         MAX_RFDEPENDCMD_CNT,
@@ -1462,8 +1461,8 @@ static bool _rtl8723be_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
                        currentcmd = &postcommoncmd[*step];
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Invalid 'stage' = %d, Check it!\n", *stage);
+                       pr_err("Invalid 'stage' = %d, Check it!\n",
+                              *stage);
                        return true;
                }
 
index 78f4f18d87b501ca7ce1e678d7ea806823ee0a26..48491454b8785df837f0d58475766a907fa6b2c1 100644 (file)
@@ -51,8 +51,7 @@ void rtl8723be_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
                              rtlphy->rfreg_chnlval[0]);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", bandwidth);
+               pr_err("unknown bandwidth: %#X\n", bandwidth);
                break;
        }
 }
index 847644d1f5f539ff984efb77131b57575df45b7d..92dbfa8f297f318a766506e50be2d102c312f470 100644 (file)
@@ -144,8 +144,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
                                     HSIMR_RON_INT_EN   |
                                     0);
 
-       /* for debug level */
-       rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
        /* for LPS & IPS */
        rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -179,8 +177,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
        /* for firmware buf */
        rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
        if (!rtlpriv->rtlhal.pfirmware) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't alloc buffer for fw.\n");
+               pr_err("Can't alloc buffer for fw.\n");
                return 1;
        }
 
@@ -190,8 +187,7 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
                                      rtlpriv->io.dev, GFP_KERNEL, hw,
                                      rtl_fw_cb);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Failed to request firmware!\n");
+               pr_err("Failed to request firmware!\n");
                return 1;
        }
        return 0;
@@ -264,6 +260,7 @@ static struct rtl_hal_ops rtl8723be_hal_ops = {
        .get_btc_status = rtl8723be_get_btc_status,
        .rx_command_packet = rtl8723be_rx_command_packet,
        .is_fw_header = is_fw_header,
+       .c2h_content_parsing = rtl8723be_c2h_content_parsing,
 };
 
 static struct rtl_mod_params rtl8723be_mod_params = {
@@ -273,7 +270,8 @@ static struct rtl_mod_params rtl8723be_mod_params = {
        .fwctrl_lps = true,
        .msi_support = false,
        .disable_watchdog = false,
-       .debug = DBG_EMERG,
+       .debug_level = 0,
+       .debug_mask = 0,
        .ant_sel = 0,
 };
 
@@ -388,7 +386,8 @@ MODULE_DESCRIPTION("Realtek 8723BE 802.11n PCI wireless");
 MODULE_FIRMWARE("rtlwifi/rtl8723befw.bin");
 
 module_param_named(swenc, rtl8723be_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl8723be_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl8723be_mod_params.debug_mask, ullong, 0644);
 module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
 module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
@@ -401,7 +400,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
 MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
 MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
 MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
 MODULE_PARM_DESC(disable_watchdog,
                 "Set to 1 to disable the watchdog (default 0)\n");
 MODULE_PARM_DESC(ant_sel, "Set to 1 or 2 to force antenna number (default 0)\n");
index 2175aecbb8f4173145adf10f058cc6308ed0a4fa..6f65003a895a572448446c21a22725191f3cb24d 100644 (file)
@@ -666,8 +666,8 @@ void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
                        SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val);
                        break;
                default:
-                       RT_ASSERT(false, "ERR txdesc :%d not process\n",
-                                         desc_name);
+                       WARN_ONCE(true, "rtl8723be: ERR txdesc :%d not processed\n",
+                                 desc_name);
                        break;
                }
        } else {
@@ -685,8 +685,8 @@ void rtl8723be_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
                        SET_RX_DESC_EOR(pdesc, 1);
                        break;
                default:
-                       RT_ASSERT(false, "ERR rxdesc :%d not process\n",
-                                         desc_name);
+                       WARN_ONCE(true, "rtl8723be: ERR rxdesc :%d not process\n",
+                                 desc_name);
                        break;
                }
        }
@@ -705,8 +705,8 @@ u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name)
                        ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
                        break;
                default:
-                       RT_ASSERT(false, "ERR txdesc :%d not process\n",
-                                         desc_name);
+                       WARN_ONCE(true, "rtl8723be: ERR txdesc :%d not process\n",
+                                 desc_name);
                        break;
                }
        } else {
@@ -721,7 +721,7 @@ u32 rtl8723be_get_desc(u8 *pdesc, bool istx, u8 desc_name)
                        ret = GET_RX_DESC_BUFF_ADDR(pdesc);
                        break;
                default:
-                       RT_ASSERT(false, "ERR rxdesc :%d not process\n",
+                       WARN_ONCE(true, "rtl8723be: ERR rxdesc :%d not processed\n",
                                  desc_name);
                        break;
                }
index 6e518625edbe774e6b996d6b5739d563b7e8d35e..ac573d69f6d67429bdfe9785f929c40ff96c4ae0 100644 (file)
@@ -26,6 +26,7 @@
 #include "../wifi.h"
 #include "../pci.h"
 #include "../base.h"
+#include "../efuse.h"
 #include "fw_common.h"
 #include <linux/module.h>
 
@@ -53,65 +54,6 @@ void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable)
 }
 EXPORT_SYMBOL_GPL(rtl8723_enable_fw_download);
 
-void rtl8723_fw_block_write(struct ieee80211_hw *hw,
-                           const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 blocksize = sizeof(u32);
-       u8 *bufferptr = (u8 *)buffer;
-       u32 *pu4byteptr = (u32 *)buffer;
-       u32 i, offset, blockcount, remainsize;
-
-       blockcount = size / blocksize;
-       remainsize = size % blocksize;
-
-       for (i = 0; i < blockcount; i++) {
-               offset = i * blocksize;
-               rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
-                               *(pu4byteptr + i));
-       }
-       if (remainsize) {
-               offset = blockcount * blocksize;
-               bufferptr += offset;
-               for (i = 0; i < remainsize; i++) {
-                       rtl_write_byte(rtlpriv,
-                                      (FW_8192C_START_ADDRESS + offset + i),
-                                      *(bufferptr + i));
-               }
-       }
-}
-EXPORT_SYMBOL_GPL(rtl8723_fw_block_write);
-
-void rtl8723_fw_page_write(struct ieee80211_hw *hw,
-                          u32 page, const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 value8;
-       u8 u8page = (u8) (page & 0x07);
-
-       value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
-
-       rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
-       rtl8723_fw_block_write(hw, buffer, size);
-}
-EXPORT_SYMBOL_GPL(rtl8723_fw_page_write);
-
-void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
-       u32 fwlen = *pfwlen;
-       u8 remain = (u8) (fwlen % 4);
-
-       remain = (remain == 0) ? 0 : (4 - remain);
-
-       while (remain > 0) {
-               pfwbuf[fwlen] = 0;
-               fwlen++;
-               remain--;
-       }
-       *pfwlen = fwlen;
-}
-EXPORT_SYMBOL(rtl8723_fill_dummy);
-
 void rtl8723_write_fw(struct ieee80211_hw *hw,
                      enum version_8723e version,
                      u8 *buffer, u32 size, u8 max_page)
@@ -123,26 +65,25 @@ void rtl8723_write_fw(struct ieee80211_hw *hw,
 
        RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
 
-       rtl8723_fill_dummy(bufferptr, &size);
+       rtl_fill_dummy(bufferptr, &size);
 
        page_nums = size / FW_8192C_PAGE_SIZE;
        remain_size = size % FW_8192C_PAGE_SIZE;
 
        if (page_nums > max_page) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Page numbers should not greater than %d\n", max_page);
+               pr_err("Page numbers should not greater than %d\n",
+                      max_page);
        }
        for (page = 0; page < page_nums; page++) {
                offset = page * FW_8192C_PAGE_SIZE;
-               rtl8723_fw_page_write(hw, page, (bufferptr + offset),
-                                     FW_8192C_PAGE_SIZE);
+               rtl_fw_page_write(hw, page, (bufferptr + offset),
+                                 FW_8192C_PAGE_SIZE);
        }
 
        if (remain_size) {
                offset = page_nums * FW_8192C_PAGE_SIZE;
                page = page_nums;
-               rtl8723_fw_page_write(hw, page, (bufferptr + offset),
-                                     remain_size);
+               rtl_fw_page_write(hw, page, (bufferptr + offset), remain_size);
        }
        RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n");
 }
@@ -209,14 +150,10 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be,
                 (!(value32 & FWDL_CHKSUM_RPT)));
 
        if (counter >= max_count) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "chksum report fail ! REG_MCUFWDL:0x%08x .\n",
-                        value32);
+               pr_err("chksum report fail ! REG_MCUFWDL:0x%08x .\n",
+                      value32);
                goto exit;
        }
-       RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
-
        value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL) | MCUFWDL_RDY;
        value32 &= ~WINTINI_RDY;
        rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
@@ -239,9 +176,8 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be,
 
        } while (counter++ < max_count);
 
-       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n",
-                value32);
+       pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n",
+              value32);
 
 exit:
        return err;
@@ -293,13 +229,8 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
        rtl8723_enable_fw_download(hw, false);
 
        err = rtl8723_fw_free_to_go(hw, is_8723be, max_count);
-       if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Firmware is not ready to run!\n");
-       } else {
-               RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
-                        "Firmware is ready to run!\n");
-       }
+       if (err)
+               pr_err("Firmware is not ready to run!\n");
        return 0;
 }
 EXPORT_SYMBOL_GPL(rtl8723_download_fw);
index 8ea372d1626e5e75432c363f1b592df04a710817..77c25a9762335a6faf8bb69443d6b271e05a9478 100644 (file)
@@ -28,7 +28,6 @@
 
 #define REG_SYS_FUNC_EN                                0x0002
 #define REG_MCUFWDL                            0x0080
-#define FW_8192C_START_ADDRESS                 0x1000
 #define FW_8192C_PAGE_SIZE                     4096
 #define FW_8723A_POLLING_TIMEOUT_COUNT         1000
 #define FW_8723B_POLLING_TIMEOUT_COUNT         6000
@@ -84,10 +83,6 @@ enum rtl8723be_cmd {
 void rtl8723ae_firmware_selfreset(struct ieee80211_hw *hw);
 void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw);
 void rtl8723_enable_fw_download(struct ieee80211_hw *hw, bool enable);
-void rtl8723_fw_block_write(struct ieee80211_hw *hw,
-                           const u8 *buffer, u32 size);
-void rtl8723_fw_page_write(struct ieee80211_hw *hw,
-                          u32 page, const u8 *buffer, u32 size);
 void rtl8723_write_fw(struct ieee80211_hw *hw,
                      enum version_8723e version,
                      u8 *buffer, u32 size, u8 max_page);
@@ -95,6 +90,5 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be, int count);
 int rtl8723_download_fw(struct ieee80211_hw *hw, bool is_8723be, int count);
 bool rtl8723_cmd_send_packet(struct ieee80211_hw *hw,
                             struct sk_buff *skb);
-void rtl8723_fill_dummy(u8 *pfwbuf, u32 *pfwlen);
 
 #endif
index 75cbd1509b52043863de0088ad18b307c412b85f..43d24e1ee5e67f5987bcf6b2951fe5dfc881d6b7 100644 (file)
@@ -99,7 +99,7 @@ u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
        offset &= 0xff;
        newoffset = offset;
        if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
+               pr_err("return all one\n");
                return 0xFFFFFFFF;
        }
        tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
@@ -147,7 +147,7 @@ void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw,
        struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
 
        if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
+               pr_err("stop\n");
                return;
        }
        offset &= 0xff;
@@ -283,7 +283,7 @@ bool rtl8723_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
        struct swchnlcmd *pcmd;
 
        if (cmdtable == NULL) {
-               RT_ASSERT(false, "cmdtable cannot be NULL.\n");
+               WARN_ONCE(true, "rtl8723-common: cmdtable cannot be NULL.\n");
                return false;
        }
 
index bdfd444955d25dad39ec4faa2f3d518b8a07e351..32900c51f024aa6192468fecb7c79984bd6b3f68 100644 (file)
@@ -604,8 +604,7 @@ static void rtl8821ae_dm_find_minimum_rssi(struct ieee80211_hw *hw)
        if ((mac->link_state < MAC80211_LINKED) &&
            (rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
                rtl_dm_dig->min_undec_pwdb_for_dm = 0;
-               RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
-                        "Not connected to any\n");
+               pr_debug("rtl8821ae: Not connected to any AP\n");
        }
        if (mac->link_state >= MAC80211_LINKED) {
                if (mac->opmode == NL80211_IFTYPE_AP ||
index b665446351a4f3f9497766758b6f32ef616418fa..a504dfae4ed37486ea90451bd000022c4eac56f4 100644 (file)
@@ -27,6 +27,7 @@
 #include "../pci.h"
 #include "../base.h"
 #include "../core.h"
+#include "../efuse.h"
 #include "reg.h"
 #include "def.h"
 #include "fw.h"
@@ -51,63 +52,6 @@ static void _rtl8821ae_enable_fw_download(struct ieee80211_hw *hw, bool enable)
        }
 }
 
-static void _rtl8821ae_fw_block_write(struct ieee80211_hw *hw,
-                                     const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u32 blocksize = sizeof(u32);
-       u8 *bufferptr = (u8 *)buffer;
-       u32 *pu4byteptr = (u32 *)buffer;
-       u32 i, offset, blockcount, remainsize;
-
-       blockcount = size / blocksize;
-       remainsize = size % blocksize;
-
-       for (i = 0; i < blockcount; i++) {
-               offset = i * blocksize;
-               rtl_write_dword(rtlpriv, (FW_8821AE_START_ADDRESS + offset),
-                               *(pu4byteptr + i));
-       }
-
-       if (remainsize) {
-               offset = blockcount * blocksize;
-               bufferptr += offset;
-               for (i = 0; i < remainsize; i++) {
-                       rtl_write_byte(rtlpriv, (FW_8821AE_START_ADDRESS +
-                                       offset + i), *(bufferptr + i));
-               }
-       }
-}
-
-static void _rtl8821ae_fw_page_write(struct ieee80211_hw *hw,
-                                    u32 page, const u8 *buffer, u32 size)
-{
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-       u8 value8;
-       u8 u8page = (u8)(page & 0x07);
-
-       value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
-
-       rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
-       _rtl8821ae_fw_block_write(hw, buffer, size);
-}
-
-static void _rtl8821ae_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
-{
-       u32 fwlen = *pfwlen;
-       u8 remain = (u8)(fwlen % 4);
-
-       remain = (remain == 0) ? 0 : (4 - remain);
-
-       while (remain > 0) {
-               pfwbuf[fwlen] = 0;
-               fwlen++;
-               remain--;
-       }
-
-       *pfwlen = fwlen;
-}
-
 static void _rtl8821ae_write_fw(struct ieee80211_hw *hw,
                                enum version_8821ae version,
                                u8 *buffer, u32 size)
@@ -119,27 +63,24 @@ static void _rtl8821ae_write_fw(struct ieee80211_hw *hw,
 
        RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
 
-       _rtl8821ae_fill_dummy(bufferptr, &size);
+       rtl_fill_dummy(bufferptr, &size);
 
        pagenums = size / FW_8821AE_PAGE_SIZE;
        remainsize = size % FW_8821AE_PAGE_SIZE;
 
-       if (pagenums > 8) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Page numbers should not greater then 8\n");
-       }
+       if (pagenums > 8)
+               pr_err("Page numbers should not greater then 8\n");
 
        for (page = 0; page < pagenums; page++) {
                offset = page * FW_8821AE_PAGE_SIZE;
-               _rtl8821ae_fw_page_write(hw, page, (bufferptr + offset),
-                                        FW_8821AE_PAGE_SIZE);
+               rtl_fw_page_write(hw, page, (bufferptr + offset),
+                                 FW_8821AE_PAGE_SIZE);
        }
 
        if (remainsize) {
                offset = pagenums * FW_8821AE_PAGE_SIZE;
                page = pagenums;
-               _rtl8821ae_fw_page_write(hw, page, (bufferptr + offset),
-                                        remainsize);
+               rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
        }
 }
 
@@ -161,10 +102,6 @@ static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw)
                          value32);
                goto exit;
        }
-
-       RT_TRACE(rtlpriv, COMP_FW, DBG_EMERG,
-                "Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32);
-
        value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
        value32 |= MCUFWDL_RDY;
        value32 &= ~WINTINI_RDY;
@@ -175,20 +112,14 @@ static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw)
        counter = 0;
        do {
                value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
-               if (value32 & WINTINI_RDY) {
-                       RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
-                                "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
-                                 value32);
-                       err = 0;
-                       goto exit;
-               }
+               if (value32 & WINTINI_RDY)
+                       return 0;
 
                udelay(FW_8821AE_POLLING_DELAY);
        } while (counter++ < FW_8821AE_POLLING_TIMEOUT_COUNT);
 
-       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                "Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n",
-                value32);
+       pr_err("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n",
+              value32);
 
 exit:
        return err;
@@ -510,8 +441,8 @@ void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw,
        u32 tmp_cmdbuf[2];
 
        if (!rtlhal->fw_ready) {
-               RT_ASSERT(false,
-                         "return H2C cmd because of Fw download fail!!!\n");
+               WARN_ONCE(true,
+                         "rtl8821ae: error H2C cmd because of Fw download fail!!!\n");
                return;
        }
 
@@ -1809,9 +1740,9 @@ static void rtl8821ae_c2h_ra_report_handler(struct ieee80211_hw *hw,
        rtl8821ae_dm_update_init_rate(hw, rate);
 }
 
-static void _rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw,
-                                          u8 c2h_cmd_id, u8 c2h_cmd_len,
-                                          u8 *tmp_buf)
+void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw,
+                                  u8 c2h_cmd_id, u8 c2h_cmd_len,
+                                  u8 *tmp_buf)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
@@ -1853,5 +1784,15 @@ void rtl8821ae_c2h_packet_handler(struct ieee80211_hw *hw, u8 *buffer,
 
        RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD,
                      "[C2H packet], Content Hex:\n", tmp_buf, c2h_cmd_len);
-       _rtl8821ae_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
+
+       switch (c2h_cmd_id) {
+       case C2H_8812_BT_INFO:
+               rtl_c2hcmd_enqueue(hw, c2h_cmd_id, c2h_cmd_len, tmp_buf);
+               break;
+
+       default:
+               rtl8821ae_c2h_content_parsing(hw, c2h_cmd_id, c2h_cmd_len,
+                                             tmp_buf);
+               break;
+       }
 }
index 8f5b4aade3c91f356748dffa41318ce557c10bd5..90a98ed879f7e839b6f85a12240a673f4a18c5b6 100644 (file)
@@ -329,4 +329,7 @@ void rtl8821ae_set_fw_disconnect_decision_ctrl_cmd(struct ieee80211_hw *hw,
 void rtl8821ae_set_fw_global_info_cmd(struct ieee80211_hw *hw);
 void rtl8821ae_c2h_packet_handler(struct ieee80211_hw *hw,
                                  u8 *buffer, u8 length);
+void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw,
+                                  u8 c2h_cmd_id, u8 c2h_cmd_len,
+                                  u8 *tmp_buf);
 #endif
index 1281ebe0c30ac5c546508e26528ffe51d6f02754..363d2f28da1fccca8ef986eea90a1d607b87a4d2 100644 (file)
@@ -822,9 +822,8 @@ static bool _rtl8821ae_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
                        break;
 
                if (count > POLLING_LLT_THRESHOLD) {
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Failed to polling write LLT done at address %d!\n",
-                                address);
+                       pr_err("Failed to polling write LLT done at address %d!\n",
+                              address);
                        status = false;
                        break;
                }
@@ -891,9 +890,8 @@ static bool _rtl8821ae_llt_table_init(struct ieee80211_hw *hw)
 static void _rtl8821ae_gen_refresh_led_state(struct ieee80211_hw *hw)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
-       struct rtl_led *pled0 = &pcipriv->ledctl.sw_led0;
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
        if (rtlpriv->rtlhal.up_first_time)
@@ -1128,7 +1126,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
        }
        if (0 == tmp) {
                read_addr = REG_DBI_RDATA + addr % 4;
-               ret = rtl_read_word(rtlpriv, read_addr);
+               ret = rtl_read_byte(rtlpriv, read_addr);
        }
        return ret;
 }
@@ -1927,7 +1925,7 @@ int rtl8821ae_hw_init(struct ieee80211_hw *hw)
 
        rtstatus = _rtl8821ae_init_mac(hw);
        if (rtstatus != true) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
+               pr_err("Init MAC failed\n");
                err = 1;
                return err;
        }
@@ -2174,8 +2172,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
                         "Set Network type to AP!\n");
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Network type %d not support!\n", type);
+               pr_err("Network type %d not support!\n", type);
                return 1;
        }
 
@@ -2249,7 +2246,7 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci)
                rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x2f3222);
                break;
        default:
-               RT_ASSERT(false, "invalid aci: %d !\n", aci);
+               WARN_ONCE(true, "rtl8821ae: invalid aci: %d !\n", aci);
                break;
        }
 }
@@ -2601,11 +2598,10 @@ static u8 _rtl8821ae_get_chnl_group(u8 chnl)
                        group = 12;
        else if (173 <= chnl && chnl <= 177)
                        group = 13;
-               else
-                       /*RT_TRACE(rtlpriv, COMP_EFUSE,DBG_LOUD,
-                               "5G, Channel %d in Group not found\n",chnl);*/
-                       RT_ASSERT(!COMP_EFUSE,
-                               "5G, Channel %d in Group not found\n", chnl);
+       else
+               WARN_ONCE(true,
+                         "rtl8821ae: 5G, Channel %d in Group not found\n",
+                         chnl);
        }
        return group;
 }
@@ -3101,7 +3097,6 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        int params[] = {RTL_EEPROM_ID, EEPROM_VID, EEPROM_DID,
                        EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR,
                        EEPROM_CHANNELPLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
@@ -3196,7 +3191,7 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
                "SWAS: bHwAntDiv = %x, TRxAntDivType = %x\n",
                rtlefuse->antenna_div_cfg, rtlefuse->antenna_div_type);
 
-       pcipriv->ledctl.led_opendrain = true;
+       rtlpriv->ledctl.led_opendrain = true;
 
        if (rtlhal->oem_id == RT_CID_DEFAULT) {
                switch (rtlefuse->eeprom_oemid) {
@@ -3227,10 +3222,10 @@ exit:
        struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
-       pcipriv->ledctl.led_opendrain = true;
+       rtlpriv->ledctl.led_opendrain = true;
        switch (rtlhal->oem_id) {
        case RT_CID_819X_HP:
-               pcipriv->ledctl.led_opendrain = true;
+               rtlpriv->ledctl.led_opendrain = true;
                break;
        case RT_CID_819X_LENOVO:
        case RT_CID_DEFAULT:
@@ -3276,7 +3271,7 @@ void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw)
                rtlefuse->autoload_failflag = false;
                _rtl8821ae_read_adapter_info(hw, false);
        } else {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Autoload ERR!!\n");
+               pr_err("Autoload ERR!!\n");
        }
        /*hal_ReadRFType_8812A()*/
        /* _rtl8821ae_hal_customized_behavior(hw); */
@@ -3951,8 +3946,7 @@ void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
                                if (mac->opmode == NL80211_IFTYPE_AP) {
                                        entry_id = rtl_cam_get_free_entry(hw, p_macaddr);
                                        if (entry_id >=  TOTAL_CAM_ENTRY) {
-                                               RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
-                                                        "Can not find free hwsecurity cam entry\n");
+                                               pr_err("an not find free hwsecurity cam entry\n");
                                                return;
                                        }
                                } else {
@@ -4135,8 +4129,9 @@ void rtl8821ae_add_wowlan_pattern(struct ieee80211_hw *hw,
                        count++;
                } while (tmp && count < 100);
 
-               RT_ASSERT((count < 100),
-                         "Write wake up frame mask FAIL %d value!\n", tmp);
+               WARN_ONCE((count >= 100),
+                         "rtl8821ae: Write wake up frame mask FAIL %d value!\n",
+                         tmp);
        }
        /* Disable Rx packet buffer access. */
        rtl_write_byte(rtlpriv, REG_PKT_BUFF_ACCESS_CTRL,
index fcb3b28c6b8f0ff44b464a9e444cdd4b9940c459..405c7541b3863f37d9baadea6521c8301dd2bd16 100644 (file)
@@ -101,7 +101,6 @@ void rtl8812ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
 void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
        u8 ledcfg;
 
        RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
@@ -114,7 +113,7 @@ void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                break;
        case LED_PIN_LED0:
                ledcfg &= 0xf0;
-               if (pcipriv->ledctl.led_opendrain) {
+               if (rtlpriv->ledctl.led_opendrain) {
                        ledcfg &= 0x90; /* Set to software control. */
                        rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg|BIT(3)));
                        ledcfg = rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG);
@@ -143,7 +142,6 @@ void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 {
        u16 ledreg = REG_LEDCFG1;
        struct rtl_priv *rtlpriv = rtl_priv(hw);
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
 
        switch (pled->ledpin) {
        case LED_PIN_LED0:
@@ -163,7 +161,7 @@ void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
                 "In SwLedOff,LedAddr:%X LEDPIN=%d\n",
                 ledreg, pled->ledpin);
        /*Open-drain arrangement for controlling the LED*/
-       if (pcipriv->ledctl.led_opendrain) {
+       if (rtlpriv->ledctl.led_opendrain) {
                u8 ledcfg = rtl_read_byte(rtlpriv, ledreg);
 
                ledreg &= 0xd0; /* Set to software control.*/
@@ -182,17 +180,17 @@ void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
 
 void rtl8821ae_init_sw_leds(struct ieee80211_hw *hw)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
 
-       _rtl8821ae_init_led(hw, &pcipriv->ledctl.sw_led0, LED_PIN_LED0);
-       _rtl8821ae_init_led(hw, &pcipriv->ledctl.sw_led1, LED_PIN_LED1);
+       _rtl8821ae_init_led(hw, &rtlpriv->ledctl.sw_led0, LED_PIN_LED0);
+       _rtl8821ae_init_led(hw, &rtlpriv->ledctl.sw_led1, LED_PIN_LED1);
 }
 
 static void _rtl8821ae_sw_led_control(struct ieee80211_hw *hw,
                                      enum led_ctl_mode ledaction)
 {
-       struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
-       struct rtl_led *pLed0 = &pcipriv->ledctl.sw_led0;
+       struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct rtl_led *pled0 = &rtlpriv->ledctl.sw_led0;
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 
        switch (ledaction) {
@@ -200,15 +198,15 @@ static void _rtl8821ae_sw_led_control(struct ieee80211_hw *hw,
        case LED_CTL_LINK:
        case LED_CTL_NO_LINK:
                if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
-                       rtl8812ae_sw_led_on(hw, pLed0);
+                       rtl8812ae_sw_led_on(hw, pled0);
                else
-                       rtl8821ae_sw_led_on(hw, pLed0);
+                       rtl8821ae_sw_led_on(hw, pled0);
                break;
        case LED_CTL_POWER_OFF:
                if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
-                       rtl8812ae_sw_led_off(hw, pLed0);
+                       rtl8812ae_sw_led_off(hw, pled0);
                else
-                       rtl8821ae_sw_led_off(hw, pLed0);
+                       rtl8821ae_sw_led_off(hw, pled0);
                break;
        default:
                break;
index 5dad402171c2d363aa2fbacd7f2ee14c122f7a5d..8da874cbec1a19353877f5cdf4855050f333bbc6 100644 (file)
@@ -215,7 +215,6 @@ void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
 static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
                                         enum radio_path rfpath, u32 offset)
 {
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        bool is_pi_mode = false;
        u32 retvalue = 0;
@@ -223,7 +222,7 @@ static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
        /* 2009/06/17 MH We can not execute IO for power
        save or other accident mode.*/
        if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "return all one\n");
+               pr_err("return all one\n");
                return 0xFFFFFFFF;
        }
        /* <20120809, Kordan> CCA OFF(when entering),
@@ -284,7 +283,7 @@ static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
        u32 newoffset;
 
        if (RT_CANNOT_IO(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "stop\n");
+               pr_err("stop\n");
                return;
        }
        offset &= 0xff;
@@ -989,7 +988,7 @@ static void _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(struct ieee8
        s8 temp_pwrlmt = 0;
 
        for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
-               for (bw = 0; bw < MAX_5G_BANDWITH_NUM; ++bw) {
+               for (bw = 0; bw < MAX_5G_BANDWIDTH_NUM; ++bw) {
                        for (channel = 0; channel < CHANNEL_MAX_NUMBER_5G; ++channel) {
                                for (rate_section = 0; rate_section < MAX_RATE_SECTION_NUM; ++rate_section) {
                                        temp_pwrlmt = rtlphy->txpwr_limit_5g[regulation]
@@ -1164,7 +1163,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211
        _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(hw);
 
        for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
-               for (bw = 0; bw < MAX_2_4G_BANDWITH_NUM; ++bw) {
+               for (bw = 0; bw < MAX_2_4G_BANDWIDTH_NUM; ++bw) {
                        for (channel = 0; channel < CHANNEL_MAX_NUMBER_2G; ++channel) {
                                for (rate_section = 0; rate_section < MAX_RATE_SECTION_NUM; ++rate_section) {
                                        /* obtain the base dBm values in 2.4G band
@@ -1220,7 +1219,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211
                }
        }
        for (regulation = 0; regulation < MAX_REGULATION_NUM; ++regulation) {
-               for (bw = 0; bw < MAX_5G_BANDWITH_NUM; ++bw) {
+               for (bw = 0; bw < MAX_5G_BANDWIDTH_NUM; ++bw) {
                        for (channel = 0; channel < CHANNEL_MAX_NUMBER_5G; ++channel) {
                                for (rate_section = 0; rate_section < MAX_RATE_SECTION_NUM; ++rate_section) {
                                        /* obtain the base dBm values in 5G band
@@ -1297,7 +1296,7 @@ static void _rtl8821ae_phy_init_txpower_limit(struct ieee80211_hw *hw)
                 "=====> _rtl8821ae_phy_init_txpower_limit()!\n");
 
        for (i = 0; i < MAX_REGULATION_NUM; ++i) {
-               for (j = 0; j < MAX_2_4G_BANDWITH_NUM; ++j)
+               for (j = 0; j < MAX_2_4G_BANDWIDTH_NUM; ++j)
                        for (k = 0; k < MAX_RATE_SECTION_NUM; ++k)
                                for (m = 0; m < CHANNEL_MAX_NUMBER_2G; ++m)
                                        for (l = 0; l < MAX_RF_PATH_NUM; ++l)
@@ -1306,7 +1305,7 @@ static void _rtl8821ae_phy_init_txpower_limit(struct ieee80211_hw *hw)
                                                        = MAX_POWER_INDEX;
        }
        for (i = 0; i < MAX_REGULATION_NUM; ++i) {
-               for (j = 0; j < MAX_5G_BANDWITH_NUM; ++j)
+               for (j = 0; j < MAX_5G_BANDWIDTH_NUM; ++j)
                        for (k = 0; k < MAX_RATE_SECTION_NUM; ++k)
                                for (m = 0; m < CHANNEL_MAX_NUMBER_5G; ++m)
                                        for (l = 0; l < MAX_RF_PATH_NUM; ++l)
@@ -1665,7 +1664,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
        rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw,
                                                       BASEBAND_CONFIG_PHY_REG);
        if (rtstatus != true) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Write BB Reg Fail!!\n");
+               pr_err("Write BB Reg Fail!!\n");
                return false;
        }
        _rtl8821ae_phy_init_tx_power_by_rate(hw);
@@ -1674,7 +1673,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
                                                    BASEBAND_CONFIG_PHY_REG);
        }
        if (rtstatus != true) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "BB_PG Reg Fail!!\n");
+               pr_err("BB_PG Reg Fail!!\n");
                return false;
        }
 
@@ -1688,7 +1687,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
                                                BASEBAND_CONFIG_AGC_TAB);
 
        if (rtstatus != true) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "AGC Table Fail\n");
+               pr_err("AGC Table Fail\n");
                return false;
        }
        rtlphy->cck_high_power = (bool)(rtl_get_bbreg(hw,
@@ -1870,8 +1869,8 @@ static u8 _rtl8821ae_get_rate_section_index(u32 regaddr)
        else if (regaddr >= 0xE20 && regaddr <= 0xE4C)
                index = (u8)((regaddr - 0xE20) / 4);
        else
-               RT_ASSERT(!COMP_INIT,
-                         "Invalid RegAddr 0x%x\n", regaddr);
+               WARN_ONCE(true,
+                         "rtl8821ae: Invalid RegAddr 0x%x\n", regaddr);
        return index;
 }
 
@@ -2064,8 +2063,7 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
                break;
        case RF90_PATH_C:
        case RF90_PATH_D:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", rfpath);
+               pr_err("switch case %#x not processed\n", rfpath);
                break;
        }
        return true;
@@ -2132,8 +2130,7 @@ bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
        case RF90_PATH_B:
        case RF90_PATH_C:
        case RF90_PATH_D:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", rfpath);
+               pr_err("switch case %#x not processed\n", rfpath);
                break;
        }
        return true;
@@ -2322,7 +2319,7 @@ static s8 _rtl8821ae_phy_get_ratesection_intxpower_byrate(u8 path, u8 rate)
                rate_section = 11;
                break;
        default:
-               RT_ASSERT(true, "Rate_Section is Illegal\n");
+               WARN_ONCE(true, "rtl8821ae: Rate_Section is Illegal\n");
                break;
        }
 
@@ -2588,7 +2585,7 @@ static s8 _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw,
                shift = 24;
                break;
        default:
-               RT_ASSERT(true, "Rate_Section is Illegal\n");
+               WARN_ONCE(true, "rtl8821ae: Rate_Section is Illegal\n");
                break;
        }
 
@@ -3336,8 +3333,7 @@ void rtl8821ae_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
                                                      (u8 *)&iotype);
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "Unknown Scan Backup operation.\n");
+                       pr_err("Unknown Scan Backup operation.\n");
                        break;
                }
        }
@@ -3378,8 +3374,7 @@ static u8 _rtl8821ae_phy_get_secondary_chnl(struct rtl_priv *rtlpriv)
                else if (mac->cur_80_prime_sc == PRIME_CHNL_OFFSET_UPPER)
                        sc_set_40 = VHT_DATA_SC_40_UPPER_OF_80MHZ;
                else
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                               "SCMapping: Not Correct Primary40MHz Setting\n");
+                       pr_err("SCMapping: Not Correct Primary40MHz Setting\n");
 
                if ((mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER) &&
                        (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_LOWER))
@@ -3394,16 +3389,14 @@ static u8 _rtl8821ae_phy_get_secondary_chnl(struct rtl_priv *rtlpriv)
                        (mac->cur_80_prime_sc == HAL_PRIME_CHNL_OFFSET_UPPER))
                        sc_set_20 = VHT_DATA_SC_20_UPPERST_OF_80MHZ;
                else
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                               "SCMapping: Not Correct Primary40MHz Setting\n");
+                       pr_err("SCMapping: Not Correct Primary40MHz Setting\n");
        } else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
                if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_UPPER)
                        sc_set_20 = VHT_DATA_SC_20_UPPER_OF_80MHZ;
                else if (mac->cur_40_prime_sc == PRIME_CHNL_OFFSET_LOWER)
                        sc_set_20 = VHT_DATA_SC_20_LOWER_OF_80MHZ;
                else
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "SCMapping: Not Correct Primary40MHz Setting\n");
+                       pr_err("SCMapping: Not Correct Primary40MHz Setting\n");
        }
        return (sc_set_40 << 4) | sc_set_20;
 }
@@ -3479,8 +3472,8 @@ void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
 
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", rtlphy->current_chan_bw);
+               pr_err("unknown bandwidth: %#X\n",
+                      rtlphy->current_chan_bw);
                break;
        }
 
@@ -4660,8 +4653,8 @@ bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
                        postprocessing = true;
                        break;
                default:
-                       RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                                "switch case %#x not processed\n", iotype);
+                       pr_err("switch case %#x not processed\n",
+                              iotype);
                        break;
                }
        } while (false);
@@ -4704,9 +4697,8 @@ static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw)
        case IO_CMD_PAUSE_BAND1_DM_BY_SCAN:
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n",
-                        rtlphy->current_io_type);
+               pr_err("switch case %#x not processed\n",
+                      rtlphy->current_io_type);
                break;
        }
        rtlphy->set_io_inprogress = false;
@@ -4811,8 +4803,8 @@ static bool _rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
                }
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "switch case %#x not processed\n", rfpwr_state);
+               pr_err("switch case %#x not processed\n",
+                      rfpwr_state);
                bresult = false;
                break;
        }
index c6ab957023e6357f4f677f61111381d756ca12f9..95489f41f8a082b2c7af9e5e5439abf762bbed6f 100644 (file)
@@ -34,8 +34,6 @@ static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
 
 void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
 {
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
-
        switch (bandwidth) {
        case HT_CHANNEL_WIDTH_20:
                rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, BIT(11)|BIT(10), 3);
@@ -50,8 +48,7 @@ void rtl8821ae_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
                rtl_set_rfreg(hw, RF90_PATH_B, RF_CHNLBW, BIT(11)|BIT(10), 0);
                break;
        default:
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "unknown bandwidth: %#X\n", bandwidth);
+               pr_err("unknown bandwidth: %#X\n", bandwidth);
                break;
        }
 }
index 297938e0effd54c47bcc0ff27ea5c5c1552ceb52..77cf3b2cd3f1f4c7ba93437996efe55ad3d94fb5 100644 (file)
@@ -160,8 +160,6 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
        rtlpriv->psc.wo_wlan_mode = WAKE_ON_MAGIC_PACKET |
                                    WAKE_ON_PATTERN_MATCH;
 
-       /* for debug level */
-       rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
        /* for LPS & IPS */
        rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
@@ -192,14 +190,12 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
        /* for firmware buf */
        rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
        if (!rtlpriv->rtlhal.pfirmware) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't alloc buffer for fw.\n");
+               pr_err("Can't alloc buffer for fw.\n");
                return 1;
        }
        rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000);
        if (!rtlpriv->rtlhal.wowlan_firmware) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't alloc buffer for wowlan fw.\n");
+               pr_err("Can't alloc buffer for wowlan fw.\n");
                return 1;
        }
 
@@ -218,8 +214,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
                                      rtlpriv->io.dev, GFP_KERNEL, hw,
                                      rtl_fw_cb);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Failed to request normal firmware!\n");
+               pr_err("Failed to request normal firmware!\n");
                return 1;
        }
        /*load wowlan firmware*/
@@ -229,8 +224,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
                                      rtlpriv->io.dev, GFP_KERNEL, hw,
                                      rtl_wowlan_fw_cb);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Failed to request wowlan firmware!\n");
+               pr_err("Failed to request wowlan firmware!\n");
                return 1;
        }
        return 0;
@@ -303,6 +297,7 @@ static struct rtl_hal_ops rtl8821ae_hal_ops = {
        .fill_h2c_cmd = rtl8821ae_fill_h2c_cmd,
        .get_btc_status = rtl8821ae_get_btc_status,
        .rx_command_packet = rtl8821ae_rx_command_packet,
+       .c2h_content_parsing = rtl8821ae_c2h_content_parsing,
        .add_wowlan_pattern = rtl8821ae_add_wowlan_pattern,
 };
 
@@ -313,7 +308,8 @@ static struct rtl_mod_params rtl8821ae_mod_params = {
        .fwctrl_lps = true,
        .msi_support = true,
        .int_clear = true,
-       .debug = DBG_EMERG,
+       .debug_level = 0,
+       .debug_mask = 0,
        .disable_watchdog = 0,
 };
 
@@ -434,7 +430,8 @@ MODULE_DESCRIPTION("Realtek 8821ae 802.11ac PCI wireless");
 MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin");
 
 module_param_named(swenc, rtl8821ae_mod_params.sw_crypto, bool, 0444);
-module_param_named(debug, rtl8821ae_mod_params.debug, int, 0444);
+module_param_named(debug_level, rtl8821ae_mod_params.debug_level, int, 0644);
+module_param_named(debug_mask, rtl8821ae_mod_params.debug_mask, ullong, 0644);
 module_param_named(ips, rtl8821ae_mod_params.inactiveps, bool, 0444);
 module_param_named(swlps, rtl8821ae_mod_params.swctrl_lps, bool, 0444);
 module_param_named(fwlps, rtl8821ae_mod_params.fwctrl_lps, bool, 0444);
@@ -447,7 +444,8 @@ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
 MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
 MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
 MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
-MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
 MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
 MODULE_PARM_DESC(int_clear, "Set to 0 to disable interrupt clear before set (default 1)\n");
 
index 27727186ba5f0fe07d188bbd48d7055750b531bc..108098152cf3c20f67e4b147c5bdb45759835449 100644 (file)
@@ -904,8 +904,9 @@ void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
                        SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *)val);
                        break;
                default:
-                       RT_ASSERT(false,
-                                 "ERR txdesc :%d not process\n", desc_name);
+                       WARN_ONCE(true,
+                                 "rtl8821ae: ERR txdesc :%d not processed\n",
+                                 desc_name);
                        break;
                }
        } else {
@@ -923,8 +924,9 @@ void rtl8821ae_set_desc(struct ieee80211_hw *hw, u8 *pdesc,
                        SET_RX_DESC_EOR(pdesc, 1);
                        break;
                default:
-                       RT_ASSERT(false,
-                                 "ERR rxdesc :%d not process\n", desc_name);
+                       WARN_ONCE(true,
+                                 "rtl8821ae: ERR rxdesc :%d not processed\n",
+                                 desc_name);
                        break;
                }
        }
@@ -943,8 +945,9 @@ u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name)
                        ret = GET_TX_DESC_TX_BUFFER_ADDRESS(pdesc);
                        break;
                default:
-                       RT_ASSERT(false,
-                                 "ERR txdesc :%d not process\n", desc_name);
+                       WARN_ONCE(true,
+                                 "rtl8821ae: ERR txdesc :%d not processed\n",
+                                 desc_name);
                        break;
                }
        } else {
@@ -959,8 +962,9 @@ u32 rtl8821ae_get_desc(u8 *pdesc, bool istx, u8 desc_name)
                        ret = GET_RX_DESC_BUFF_ADDR(pdesc);
                        break;
                default:
-                       RT_ASSERT(false,
-                                 "ERR rxdesc :%d not process\n", desc_name);
+                       WARN_ONCE(true,
+                                 "rtl8821ae: ERR rxdesc :%d not processed\n",
+                                 desc_name);
                        break;
                }
        }
index 49015b05f3d1a048ee6e3ac00426ea76cd5bab44..4d989b8ab185681aa500b4e7de7fc34260f904a9 100644 (file)
@@ -421,14 +421,12 @@ static void _rtl_rx_completed(struct urb *urb);
 static int _rtl_prep_rx_urb(struct ieee80211_hw *hw, struct rtl_usb *rtlusb,
                              struct urb *urb, gfp_t gfp_mask)
 {
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
        void *buf;
 
        buf = usb_alloc_coherent(rtlusb->udev, rtlusb->rx_max_size, gfp_mask,
                                 &urb->transfer_dma);
        if (!buf) {
-               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
-                        "Failed to usb_alloc_coherent!!\n");
+               pr_err("Failed to usb_alloc_coherent!!\n");
                return -ENOMEM;
        }
 
@@ -613,8 +611,6 @@ static unsigned int _rtl_rx_get_padding(struct ieee80211_hdr *hdr,
 static void _rtl_rx_completed(struct urb *_urb)
 {
        struct rtl_usb *rtlusb = (struct rtl_usb *)_urb->context;
-       struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
        int err = 0;
 
        if (unlikely(IS_USB_STOP(rtlusb)))
@@ -628,17 +624,15 @@ static void _rtl_rx_completed(struct urb *_urb)
                struct ieee80211_hdr *hdr;
 
                if (size < RTL_RX_DESC_SIZE + sizeof(struct ieee80211_hdr)) {
-                       RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
-                                "Too short packet from bulk IN! (len: %d)\n",
-                                size);
+                       pr_err("Too short packet from bulk IN! (len: %d)\n",
+                              size);
                        goto resubmit;
                }
 
                qlen = skb_queue_len(&rtlusb->rx_queue);
                if (qlen >= __RX_SKB_MAX_QUEUED) {
-                       RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
-                                "Pending RX skbuff queue full! (qlen: %d)\n",
-                                qlen);
+                       pr_err("Pending RX skbuff queue full! (qlen: %d)\n",
+                              qlen);
                        goto resubmit;
                }
 
@@ -647,8 +641,7 @@ static void _rtl_rx_completed(struct urb *_urb)
 
                skb = dev_alloc_skb(size + __RADIO_TAP_SIZE_RSV + padding);
                if (!skb) {
-                       RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
-                                "Can't allocate skb for bulk IN!\n");
+                       pr_err("Can't allocate skb for bulk IN!\n");
                        goto resubmit;
                }
 
@@ -725,7 +718,6 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw)
        struct urb *urb;
        int err;
        int i;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
 
        WARN_ON(0 == rtlusb->rx_urb_num);
@@ -740,8 +732,7 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw)
 
                err = _rtl_prep_rx_urb(hw, rtlusb, urb, GFP_KERNEL);
                if (err < 0) {
-                       RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
-                                "Failed to prep_rx_urb!!\n");
+                       pr_err("Failed to prep_rx_urb!!\n");
                        usb_free_urb(urb);
                        goto err_out;
                }
@@ -827,19 +818,36 @@ static void rtl_usb_stop(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
        struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+       struct urb *urb;
 
        /* should after adapter start and interrupt enable. */
        set_hal_stop(rtlhal);
        cancel_work_sync(&rtlpriv->works.fill_h2c_cmd);
        /* Enable software */
        SET_USB_STOP(rtlusb);
+
+       /* free pre-allocated URBs from rtl_usb_start() */
+       usb_kill_anchored_urbs(&rtlusb->rx_submitted);
+
+       tasklet_kill(&rtlusb->rx_work_tasklet);
+       cancel_work_sync(&rtlpriv->works.lps_change_work);
+
+       flush_workqueue(rtlpriv->works.rtl_wq);
+
+       skb_queue_purge(&rtlusb->rx_queue);
+
+       while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) {
+               usb_free_coherent(urb->dev, urb->transfer_buffer_length,
+                               urb->transfer_buffer, urb->transfer_dma);
+               usb_free_urb(urb);
+       }
+
        rtlpriv->cfg->ops->hw_disable(hw);
 }
 
 static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb)
 {
        int err;
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
 
        usb_anchor_urb(_urb, &rtlusb->tx_submitted);
@@ -847,8 +855,7 @@ static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb)
        if (err < 0) {
                struct sk_buff *skb;
 
-               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
-                        "Failed to submit urb\n");
+               pr_err("Failed to submit urb\n");
                usb_unanchor_urb(_urb);
                skb = (struct sk_buff *)_urb->context;
                kfree_skb(skb);
@@ -859,7 +866,6 @@ static void _rtl_submit_tx_urb(struct ieee80211_hw *hw, struct urb *_urb)
 static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb,
                        struct sk_buff *skb)
 {
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
        struct ieee80211_tx_info *txinfo;
 
@@ -870,8 +876,7 @@ static int _usb_tx_post(struct ieee80211_hw *hw, struct urb *urb,
        txinfo->flags |= IEEE80211_TX_STAT_ACK;
 
        if (urb->status) {
-               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
-                        "Urb has error status 0x%X\n", urb->status);
+               pr_err("Urb has error status 0x%X\n", urb->status);
                goto out;
        }
        /*  TODO:       statistics */
@@ -919,7 +924,6 @@ static struct urb *_rtl_usb_tx_urb_setup(struct ieee80211_hw *hw,
 static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
                       enum rtl_txq qnum)
 {
-       struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
        u32 ep_num;
        struct urb *_urb = NULL;
@@ -927,8 +931,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
 
        WARN_ON(NULL == rtlusb->usb_tx_aggregate_hdl);
        if (unlikely(IS_USB_STOP(rtlusb))) {
-               RT_TRACE(rtlpriv, COMP_USB, DBG_EMERG,
-                        "USB device is stopping...\n");
+               pr_err("USB device is stopping...\n");
                kfree_skb(skb);
                return;
        }
@@ -936,8 +939,7 @@ static void _rtl_usb_transmit(struct ieee80211_hw *hw, struct sk_buff *skb,
        _skb = skb;
        _urb = _rtl_usb_tx_urb_setup(hw, _skb, ep_num);
        if (unlikely(!_urb)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't allocate urb. Drop skb!\n");
+               pr_err("Can't allocate urb. Drop skb!\n");
                kfree_skb(skb);
                return;
        }
@@ -1059,7 +1061,7 @@ int rtl_usb_probe(struct usb_interface *intf,
        hw = ieee80211_alloc_hw(sizeof(struct rtl_priv) +
                                sizeof(struct rtl_usb_priv), &rtl_ops);
        if (!hw) {
-               RT_ASSERT(false, "ieee80211 alloc failed\n");
+               WARN_ONCE(true, "rtl_usb: ieee80211 alloc failed\n");
                return -ENOMEM;
        }
        rtlpriv = hw->priv;
@@ -1090,7 +1092,6 @@ int rtl_usb_probe(struct usb_interface *intf,
        rtlpriv->rtlhal.interface = INTF_USB;
        rtlpriv->cfg = rtl_hal_cfg;
        rtlpriv->intf_ops = &rtl_usb_ops;
-       rtl_dbgp_flag_init(hw);
        /* Init IO handler */
        _rtl_usb_io_handler_init(&udev->dev, hw);
        rtlpriv->cfg->ops->read_chip_version(hw);
@@ -1103,20 +1104,18 @@ int rtl_usb_probe(struct usb_interface *intf,
        /* Init mac80211 sw */
        err = rtl_init_core(hw);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't allocate sw for mac80211\n");
+               pr_err("Can't allocate sw for mac80211\n");
                goto error_out;
        }
        if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
+               pr_err("Can't init_sw_vars\n");
                goto error_out;
        }
        rtlpriv->cfg->ops->init_sw_leds(hw);
 
        err = ieee80211_register_hw(hw);
        if (err) {
-               RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
-                        "Can't register mac80211 hw.\n");
+               pr_err("Can't register mac80211 hw.\n");
                err = -ENODEV;
                goto error_out;
        }
index a6d43d2ecd369435b15b6d2dc7ac810afff0a44b..c91cec04bfaf64cff00b449b96f0f847e8b6b100 100644 (file)
@@ -146,8 +146,8 @@ struct rtl_usb {
 };
 
 struct rtl_usb_priv {
+       struct bt_coexist_info bt_coexist;
        struct rtl_usb dev;
-       struct rtl_led_ctl ledctl;
 };
 
 #define rtl_usbpriv(hw)         (((struct rtl_usb_priv *)(rtl_priv(hw))->priv))
index dafe486f844867a1f96637af5fff774bf3ee3afd..65ef42b376515dfac7de588ba5ea67504f8c6630 100644 (file)
@@ -157,8 +157,8 @@ enum rtl8192c_h2c_cmd {
 #define MAX_REGULATION_NUM             4
 #define MAX_RF_PATH_NUM                        4
 #define MAX_RATE_SECTION_NUM           6
-#define MAX_2_4G_BANDWITH_NUM          4
-#define MAX_5G_BANDWITH_NUM            4
+#define MAX_2_4G_BANDWIDTH_NUM         4
+#define MAX_5G_BANDWIDTH_NUM           4
 #define        MAX_RF_PATH                     4
 #define        MAX_CHNL_GROUP_24G              6
 #define        MAX_CHNL_GROUP_5G               14
@@ -925,6 +925,14 @@ enum wolpattern_type {
        UNKNOWN_TYPE = 4,
 };
 
+enum package_type {
+       PACKAGE_DEFAULT,
+       PACKAGE_QFN68,
+       PACKAGE_TFBGA90,
+       PACKAGE_TFBGA80,
+       PACKAGE_TFBGA79
+};
+
 struct octet_string {
        u8 *octet;
        u16 length;
@@ -1257,12 +1265,12 @@ struct rtl_phy {
        u8 cur_bw40_txpwridx;
 
        s8 txpwr_limit_2_4g[MAX_REGULATION_NUM]
-                          [MAX_2_4G_BANDWITH_NUM]
+                          [MAX_2_4G_BANDWIDTH_NUM]
                           [MAX_RATE_SECTION_NUM]
                           [CHANNEL_MAX_NUMBER_2G]
                           [MAX_RF_PATH_NUM];
        s8 txpwr_limit_5g[MAX_REGULATION_NUM]
-                        [MAX_5G_BANDWITH_NUM]
+                        [MAX_5G_BANDWIDTH_NUM]
                         [MAX_RATE_SECTION_NUM]
                         [CHANNEL_MAX_NUMBER_5G]
                         [MAX_RF_PATH_NUM];
@@ -1509,6 +1517,7 @@ struct rtl_hal {
        u32 version;            /*version of chip */
        u8 state;               /*stop 0, start 1 */
        u8 board_type;
+       u8 package_type;
        u8 external_pa;
 
        u8 pa_mode;
@@ -2193,6 +2202,8 @@ struct rtl_hal_ops {
                                   struct rtl_wow_pattern *rtl_pattern,
                                   u8 index);
        u16 (*get_available_desc)(struct ieee80211_hw *hw, u8 q_idx);
+       void (*c2h_content_parsing)(struct ieee80211_hw *hw, u8 tag, u8 len,
+                                   u8 *val);
 };
 
 struct rtl_intf_ops {
@@ -2221,11 +2232,13 @@ struct rtl_intf_ops {
 };
 
 struct rtl_mod_params {
+       /* default: 0,0 */
+       u64 debug_mask;
        /* default: 0 = using hardware encryption */
        bool sw_crypto;
 
        /* default: 0 = DBG_EMERG (0)*/
-       int debug;
+       int debug_level;
 
        /* default: 1 = using no linked power save */
        bool inactiveps;
@@ -2306,6 +2319,7 @@ struct rtl_locks {
        spinlock_t waitq_lock;
        spinlock_t entry_list_lock;
        spinlock_t usb_lock;
+       spinlock_t c2hcmd_lock;
 
        /*FW clock change */
        spinlock_t fw_ps_lock;
@@ -2335,6 +2349,7 @@ struct rtl_works {
        struct workqueue_struct *rtl_wq;
        struct delayed_work watchdog_wq;
        struct delayed_work ips_nic_off_wq;
+       struct delayed_work c2hcmd_wq;
 
        /* For SW LPS */
        struct delayed_work ps_work;
@@ -2345,16 +2360,6 @@ struct rtl_works {
        struct work_struct fill_h2c_cmd;
 };
 
-struct rtl_debug {
-       u32 dbgp_type[DBGP_TYPE_MAX];
-       int global_debuglevel;
-       u64 global_debugcomponents;
-
-       /* add for proc debug */
-       struct proc_dir_entry *proc_dir;
-       char proc_name[20];
-};
-
 #define MIMO_PS_STATIC                 0
 #define MIMO_PS_DYNAMIC                        1
 #define MIMO_PS_NOLIMIT                        3
@@ -2462,6 +2467,7 @@ struct rtl_btc_info {
        u8 bt_type;
        u8 btcoexist;
        u8 ant_num;
+       u8 single_ant_path;
 };
 
 struct bt_coexist_info {
@@ -2551,6 +2557,13 @@ struct proxim {
        u8  (*proxim_get_var)(struct ieee80211_hw *hw, u8 type);
 };
 
+struct rtl_c2hcmd {
+       struct list_head list;
+       u8 tag;
+       u8 len;
+       u8 *val;
+};
+
 struct rtl_priv {
        struct ieee80211_hw *hw;
        struct completion firmware_loading_complete;
@@ -2570,6 +2583,7 @@ struct rtl_priv {
        struct rtl_dm dm;
        struct rtl_security sec;
        struct rtl_efuse efuse;
+       struct rtl_led_ctl ledctl;
 
        struct rtl_ps_ctl psc;
        struct rate_adaptive ra;
@@ -2583,7 +2597,9 @@ struct rtl_priv {
        /* sta entry list for ap adhoc or mesh */
        struct list_head entry_list;
 
-       struct rtl_debug dbg;
+       /* c2hcmd list for kthread level access */
+       struct list_head c2hcmd_list;
+
        int max_fw_size;
 
        /*
@@ -2713,23 +2729,14 @@ enum bt_radio_shared {
        (le32_to_cpu(_val))
 
 /* Read data from memory */
-#define READEF1BYTE(_ptr)      \
+#define READEF1BYTE(_ptr)      \
        EF1BYTE(*((u8 *)(_ptr)))
 /* Read le16 data from memory and convert to host ordering */
-#define READEF2BYTE(_ptr)      \
+#define READEF2BYTE(_ptr)      \
        EF2BYTE(*(_ptr))
-#define READEF4BYTE(_ptr)      \
+#define READEF4BYTE(_ptr)      \
        EF4BYTE(*(_ptr))
 
-/* Write data to memory */
-#define WRITEEF1BYTE(_ptr, _val)       \
-       (*((u8 *)(_ptr))) = EF1BYTE(_val)
-/* Write le16 data to memory in host ordering */
-#define WRITEEF2BYTE(_ptr, _val)       \
-       (*((u16 *)(_ptr))) = EF2BYTE(_val)
-#define WRITEEF4BYTE(_ptr, _val)       \
-       (*((u32 *)(_ptr))) = EF2BYTE(_val)
-
 /* Create a bit mask
  * Examples:
  * BIT_LEN_MASK_32(0) => 0x00000000
@@ -2810,14 +2817,14 @@ value to host byte ordering.*/
  * Set subfield of little-endian 4-byte value to specified value.
  */
 #define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
-       *((u32 *)(__pstart)) = \
-       ( \
+       *((__le32 *)(__pstart)) = \
+       cpu_to_le32( \
                LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
                ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
        );
 #define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
-       *((u16 *)(__pstart)) = \
-       ( \
+       *((__le16 *)(__pstart)) = \
+       cpu_to_le16( \
                LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
                ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
        );
index 603c90470225d92a11a1e8e3b691bfecaf9058e9..785334f7a5386e82c32cd6fe41dee6d36b37b969 100644 (file)
@@ -3187,7 +3187,7 @@ static void rndis_do_cqm(struct usbnet *usbdev, s32 rssi)
                return;
 
        priv->last_cqm_event_rssi = rssi;
-       cfg80211_cqm_rssi_notify(usbdev->net, event, GFP_KERNEL);
+       cfg80211_cqm_rssi_notify(usbdev->net, event, rssi, GFP_KERNEL);
 }
 
 #define DEVICE_POLLER_JIFFIES (HZ)
index dadaa73ab49d7f098d4056214252cf627c0cbd51..e3216473aecb7a7e27afb707a66e3b00ddd3164d 100644 (file)
@@ -877,7 +877,7 @@ static void rsi_perform_cqm(struct rsi_common *common,
 
        common->cqm_info.last_cqm_event_rssi = rssi;
        rsi_dbg(INFO_ZONE, "CQM: Notifying event: %d\n", event);
-       ieee80211_cqm_rssi_notify(adapter->vifs[0], event, GFP_KERNEL);
+       ieee80211_cqm_rssi_notify(adapter->vifs[0], event, rssi, GFP_KERNEL);
 
        return;
 }
index daf06a4f842ed2e787e41390ab92d1364569d860..a52224836a2bb65ba03fa5f7c3838c7f60346a7d 100644 (file)
@@ -1019,7 +1019,7 @@ void cw1200_event_handler(struct work_struct *work)
                                NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW :
                                NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
                        pr_debug("[CQM] RSSI event: %d.\n", rcpi_rssi);
-                       ieee80211_cqm_rssi_notify(priv->vif, cqm_evt,
+                       ieee80211_cqm_rssi_notify(priv->vif, cqm_evt, rcpi_rssi,
                                                  GFP_KERNEL);
                        break;
                }
index d0593bc1f1a929449ef0dff7956d2c8681be2e35..f5acd24d0e2b142d96ef1e24cdd94a7252d66b52 100644 (file)
@@ -150,7 +150,7 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
                                     "ROAMING_TRIGGER_LOW_RSSI_EVENT");
                        ieee80211_cqm_rssi_notify(wl->vif,
                                NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
-                               GFP_KERNEL);
+                               0, GFP_KERNEL);
                }
 
                if (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID) {
@@ -158,7 +158,7 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
                                     "ROAMING_TRIGGER_REGAINED_RSSI_EVENT");
                        ieee80211_cqm_rssi_notify(wl->vif,
                                NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
-                               GFP_KERNEL);
+                               0, GFP_KERNEL);
                }
        }
 
index 7f672f6879d046ee69982f2d38f5cbf19e2e5dc9..58e148d7bc7b66f4d819209cb42b89c998496cbb 100644 (file)
@@ -281,7 +281,7 @@ static ssize_t dynamic_ps_timeout_write(struct file *file,
        }
 
        if (value < 1 || value > 65535) {
-               wl1271_warning("dyanmic_ps_timeout is not in valid range");
+               wl1271_warning("dynamic_ps_timeout is not in valid range");
                return -ERANGE;
        }
 
index 4b59f67724dead0621174da68b2e6167506b3b95..f2e90d223d94848ba4c7ee762a5c2d3ec10a99b2 100644 (file)
@@ -129,7 +129,8 @@ void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr)
 
                vif = wl12xx_wlvif_to_vif(wlvif);
                if (event != wlvif->last_rssi_event)
-                       ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
+                       ieee80211_cqm_rssi_notify(vif, event, metric,
+                                                 GFP_KERNEL);
                wlvif->last_rssi_event = event;
        }
 }
index e536aa01b937a958684c81a8a8c228dc44975b42..a21fda910529a45aff033ccbda6bfbdd758a66c9 100644 (file)
@@ -3202,6 +3202,21 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
                        if (ret < 0)
                                goto out_sleep;
                }
+
+               /*
+                * If interface in AP mode and created with allmulticast then disable
+                * the firmware filters so that all multicast packets are passed
+                * This is mandatory for MDNS based discovery protocols 
+                */
+               if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
+                       if (*total & FIF_ALLMULTI) {
+                               ret = wl1271_acx_group_address_tbl(wl, wlvif,
+                                                       false,
+                                                       NULL, 0);
+                               if (ret < 0)
+                                       goto out_sleep;
+                       }
+               }
        }
 
        /*
index 47fe7f96a242794caf0150f367ae8300bd1d2c48..287023ef4a782f33375d337ae97ba21671b6327d 100644 (file)
@@ -81,13 +81,6 @@ static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr,
 
        sdio_claim_host(func);
 
-       if (unlikely(dump)) {
-               printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr);
-               print_hex_dump(KERN_DEBUG, "wlcore_sdio: READ ",
-                               DUMP_PREFIX_OFFSET, 16, 1,
-                               buf, len, false);
-       }
-
        if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
                ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
                dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
@@ -107,6 +100,13 @@ static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr,
        if (WARN_ON(ret))
                dev_err(child->parent, "sdio read failed (%d)\n", ret);
 
+       if (unlikely(dump)) {
+               printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr);
+               print_hex_dump(KERN_DEBUG, "wlcore_sdio: READ ",
+                              DUMP_PREFIX_OFFSET, 16, 1,
+                              buf, len, false);
+       }
+
        return ret;
 }
 
index 50fa1692d98516acc8257f7480c4d9ddc9f443ea..a2d326760a7274d24dae82b288513132615cd7d4 100644 (file)
@@ -104,7 +104,7 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
        work_done = xenvif_tx_action(queue, budget);
 
        if (work_done < budget) {
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
                xenvif_napi_schedule_or_enable_events(queue);
        }
 
index 1e4125a98291245f5e806a79247a92ca1418092b..d23703d9724998c7d3e724c489cca3a491910b6b 100644 (file)
@@ -1059,7 +1059,7 @@ err:
        if (work_done < budget) {
                int more_to_do = 0;
 
-               napi_complete(napi);
+               napi_complete_done(napi, work_done);
 
                RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
                if (more_to_do)
@@ -1081,8 +1081,8 @@ static int xennet_change_mtu(struct net_device *dev, int mtu)
        return 0;
 }
 
-static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
-                                                   struct rtnl_link_stats64 *tot)
+static void xennet_get_stats64(struct net_device *dev,
+                              struct rtnl_link_stats64 *tot)
 {
        struct netfront_info *np = netdev_priv(dev);
        int cpu;
@@ -1113,8 +1113,6 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
 
        tot->rx_errors  = dev->stats.rx_errors;
        tot->tx_dropped = dev->stats.tx_dropped;
-
-       return tot;
 }
 
 static void xennet_release_tx_bufs(struct netfront_queue *queue)
index 1ccce1cd6aca45d183fb65841c8893cbe4fcb029..63d8e18fb6b143c512f92c4c0a1817bb884bcb04 100644 (file)
@@ -1432,6 +1432,11 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
        return 0;
 }
 
+static int pci_bus_num_vf(struct device *dev)
+{
+       return pci_num_vf(to_pci_dev(dev));
+}
+
 struct bus_type pci_bus_type = {
        .name           = "pci",
        .match          = pci_bus_match,
@@ -1443,6 +1448,7 @@ struct bus_type pci_bus_type = {
        .bus_groups     = pci_bus_groups,
        .drv_groups     = pci_drv_groups,
        .pm             = PCI_PM_OPS_PTR,
+       .num_vf         = pci_bus_num_vf,
 };
 EXPORT_SYMBOL(pci_bus_type);
 
index 9c13381b69662c499d0e9a679ce3eeb71c3c6d3d..e8142803a1a74454ead1e6549a0a35b79adfca97 100644 (file)
@@ -221,18 +221,17 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
        mutex_init(&ptp->pincfg_mux);
        init_waitqueue_head(&ptp->tsev_wq);
 
+       err = ptp_populate_pin_groups(ptp);
+       if (err)
+               goto no_pin_groups;
+
        /* Create a new device in our class. */
-       ptp->dev = device_create(ptp_class, parent, ptp->devid, ptp,
-                                "ptp%d", ptp->index);
+       ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
+                                            ptp, ptp->pin_attr_groups,
+                                            "ptp%d", ptp->index);
        if (IS_ERR(ptp->dev))
                goto no_device;
 
-       dev_set_drvdata(ptp->dev, ptp);
-
-       err = ptp_populate_sysfs(ptp);
-       if (err)
-               goto no_sysfs;
-
        /* Register a new PPS source. */
        if (info->pps) {
                struct pps_source_info pps;
@@ -260,10 +259,10 @@ no_clock:
        if (ptp->pps_source)
                pps_unregister_source(ptp->pps_source);
 no_pps:
-       ptp_cleanup_sysfs(ptp);
-no_sysfs:
        device_destroy(ptp_class, ptp->devid);
 no_device:
+       ptp_cleanup_pin_groups(ptp);
+no_pin_groups:
        mutex_destroy(&ptp->tsevq_mux);
        mutex_destroy(&ptp->pincfg_mux);
        ida_simple_remove(&ptp_clocks_map, index);
@@ -282,8 +281,9 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
        /* Release the clock's resources. */
        if (ptp->pps_source)
                pps_unregister_source(ptp->pps_source);
-       ptp_cleanup_sysfs(ptp);
+
        device_destroy(ptp_class, ptp->devid);
+       ptp_cleanup_pin_groups(ptp);
 
        posix_clock_unregister(&ptp->clock);
        return 0;
index 9c5d41421b6510f55298b70020f33578e82e97e7..d95888974d0c67f1e4cf4d3c2229ba4e901a2d87 100644 (file)
@@ -54,6 +54,8 @@ struct ptp_clock {
        struct device_attribute *pin_dev_attr;
        struct attribute **pin_attr;
        struct attribute_group pin_attr_group;
+       /* 1st entry is a pointer to the real group, 2nd is NULL terminator */
+       const struct attribute_group *pin_attr_groups[2];
 };
 
 /*
@@ -94,8 +96,7 @@ uint ptp_poll(struct posix_clock *pc,
 
 extern const struct attribute_group *ptp_groups[];
 
-int ptp_cleanup_sysfs(struct ptp_clock *ptp);
-
-int ptp_populate_sysfs(struct ptp_clock *ptp);
+int ptp_populate_pin_groups(struct ptp_clock *ptp);
+void ptp_cleanup_pin_groups(struct ptp_clock *ptp);
 
 #endif
index 53d43954a9740a5520229a1e8798516805a73c19..48401dfcd999a7f9ff31f68c7846964f9bc42937 100644 (file)
@@ -46,27 +46,6 @@ PTP_SHOW_INT(n_periodic_outputs, n_per_out);
 PTP_SHOW_INT(n_programmable_pins, n_pins);
 PTP_SHOW_INT(pps_available, pps);
 
-static struct attribute *ptp_attrs[] = {
-       &dev_attr_clock_name.attr,
-       &dev_attr_max_adjustment.attr,
-       &dev_attr_n_alarms.attr,
-       &dev_attr_n_external_timestamps.attr,
-       &dev_attr_n_periodic_outputs.attr,
-       &dev_attr_n_programmable_pins.attr,
-       &dev_attr_pps_available.attr,
-       NULL,
-};
-
-static const struct attribute_group ptp_group = {
-       .attrs = ptp_attrs,
-};
-
-const struct attribute_group *ptp_groups[] = {
-       &ptp_group,
-       NULL,
-};
-
-
 static ssize_t extts_enable_store(struct device *dev,
                                  struct device_attribute *attr,
                                  const char *buf, size_t count)
@@ -91,6 +70,7 @@ static ssize_t extts_enable_store(struct device *dev,
 out:
        return err;
 }
+static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store);
 
 static ssize_t extts_fifo_show(struct device *dev,
                               struct device_attribute *attr, char *page)
@@ -124,6 +104,7 @@ out:
        mutex_unlock(&ptp->tsevq_mux);
        return cnt;
 }
+static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL);
 
 static ssize_t period_store(struct device *dev,
                            struct device_attribute *attr,
@@ -151,6 +132,7 @@ static ssize_t period_store(struct device *dev,
 out:
        return err;
 }
+static DEVICE_ATTR(period, 0220, NULL, period_store);
 
 static ssize_t pps_enable_store(struct device *dev,
                                struct device_attribute *attr,
@@ -177,6 +159,57 @@ static ssize_t pps_enable_store(struct device *dev,
 out:
        return err;
 }
+static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store);
+
+static struct attribute *ptp_attrs[] = {
+       &dev_attr_clock_name.attr,
+
+       &dev_attr_max_adjustment.attr,
+       &dev_attr_n_alarms.attr,
+       &dev_attr_n_external_timestamps.attr,
+       &dev_attr_n_periodic_outputs.attr,
+       &dev_attr_n_programmable_pins.attr,
+       &dev_attr_pps_available.attr,
+
+       &dev_attr_extts_enable.attr,
+       &dev_attr_fifo.attr,
+       &dev_attr_period.attr,
+       &dev_attr_pps_enable.attr,
+       NULL
+};
+
+static umode_t ptp_is_attribute_visible(struct kobject *kobj,
+                                       struct attribute *attr, int n)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct ptp_clock *ptp = dev_get_drvdata(dev);
+       struct ptp_clock_info *info = ptp->info;
+       umode_t mode = attr->mode;
+
+       if (attr == &dev_attr_extts_enable.attr ||
+           attr == &dev_attr_fifo.attr) {
+               if (!info->n_ext_ts)
+                       mode = 0;
+       } else if (attr == &dev_attr_period.attr) {
+               if (!info->n_per_out)
+                       mode = 0;
+       } else if (attr == &dev_attr_pps_enable.attr) {
+               if (!info->pps)
+                       mode = 0;
+       }
+
+       return mode;
+}
+
+static const struct attribute_group ptp_group = {
+       .is_visible     = ptp_is_attribute_visible,
+       .attrs          = ptp_attrs,
+};
+
+const struct attribute_group *ptp_groups[] = {
+       &ptp_group,
+       NULL
+};
 
 static int ptp_pin_name2index(struct ptp_clock *ptp, const char *name)
 {
@@ -235,47 +268,20 @@ static ssize_t ptp_pin_store(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store);
-static DEVICE_ATTR(fifo,         0444, extts_fifo_show, NULL);
-static DEVICE_ATTR(period,       0220, NULL, period_store);
-static DEVICE_ATTR(pps_enable,   0220, NULL, pps_enable_store);
-
-int ptp_cleanup_sysfs(struct ptp_clock *ptp)
+int ptp_populate_pin_groups(struct ptp_clock *ptp)
 {
-       struct device *dev = ptp->dev;
-       struct ptp_clock_info *info = ptp->info;
-
-       if (info->n_ext_ts) {
-               device_remove_file(dev, &dev_attr_extts_enable);
-               device_remove_file(dev, &dev_attr_fifo);
-       }
-       if (info->n_per_out)
-               device_remove_file(dev, &dev_attr_period);
-
-       if (info->pps)
-               device_remove_file(dev, &dev_attr_pps_enable);
-
-       if (info->n_pins) {
-               sysfs_remove_group(&dev->kobj, &ptp->pin_attr_group);
-               kfree(ptp->pin_attr);
-               kfree(ptp->pin_dev_attr);
-       }
-       return 0;
-}
-
-static int ptp_populate_pins(struct ptp_clock *ptp)
-{
-       struct device *dev = ptp->dev;
        struct ptp_clock_info *info = ptp->info;
        int err = -ENOMEM, i, n_pins = info->n_pins;
 
-       ptp->pin_dev_attr = kzalloc(n_pins * sizeof(*ptp->pin_dev_attr),
+       if (!n_pins)
+               return 0;
+
+       ptp->pin_dev_attr = kcalloc(n_pins, sizeof(*ptp->pin_dev_attr),
                                    GFP_KERNEL);
        if (!ptp->pin_dev_attr)
                goto no_dev_attr;
 
-       ptp->pin_attr = kzalloc((1 + n_pins) * sizeof(struct attribute *),
-                               GFP_KERNEL);
+       ptp->pin_attr = kcalloc(1 + n_pins, sizeof(*ptp->pin_attr), GFP_KERNEL);
        if (!ptp->pin_attr)
                goto no_pin_attr;
 
@@ -292,61 +298,18 @@ static int ptp_populate_pins(struct ptp_clock *ptp)
        ptp->pin_attr_group.name = "pins";
        ptp->pin_attr_group.attrs = ptp->pin_attr;
 
-       err = sysfs_create_group(&dev->kobj, &ptp->pin_attr_group);
-       if (err)
-               goto no_group;
+       ptp->pin_attr_groups[0] = &ptp->pin_attr_group;
+
        return 0;
 
-no_group:
-       kfree(ptp->pin_attr);
 no_pin_attr:
        kfree(ptp->pin_dev_attr);
 no_dev_attr:
        return err;
 }
 
-int ptp_populate_sysfs(struct ptp_clock *ptp)
+void ptp_cleanup_pin_groups(struct ptp_clock *ptp)
 {
-       struct device *dev = ptp->dev;
-       struct ptp_clock_info *info = ptp->info;
-       int err;
-
-       if (info->n_ext_ts) {
-               err = device_create_file(dev, &dev_attr_extts_enable);
-               if (err)
-                       goto out1;
-               err = device_create_file(dev, &dev_attr_fifo);
-               if (err)
-                       goto out2;
-       }
-       if (info->n_per_out) {
-               err = device_create_file(dev, &dev_attr_period);
-               if (err)
-                       goto out3;
-       }
-       if (info->pps) {
-               err = device_create_file(dev, &dev_attr_pps_enable);
-               if (err)
-                       goto out4;
-       }
-       if (info->n_pins) {
-               err = ptp_populate_pins(ptp);
-               if (err)
-                       goto out5;
-       }
-       return 0;
-out5:
-       if (info->pps)
-               device_remove_file(dev, &dev_attr_pps_enable);
-out4:
-       if (info->n_per_out)
-               device_remove_file(dev, &dev_attr_period);
-out3:
-       if (info->n_ext_ts)
-               device_remove_file(dev, &dev_attr_fifo);
-out2:
-       if (info->n_ext_ts)
-               device_remove_file(dev, &dev_attr_extts_enable);
-out1:
-       return err;
+       kfree(ptp->pin_attr);
+       kfree(ptp->pin_dev_attr);
 }
index 6d4b68c483f3dffed55d4f8508564d6a2bf9bef2..e7addea8741b799066644052cba4e9a99f3a3335 100644 (file)
@@ -281,8 +281,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
 #define QETH_HIGH_WATERMARK_PACK 5
 #define QETH_WATERMARK_PACK_FUZZ 1
 
-#define QETH_IP_HEADER_SIZE 40
-
 /* large receive scatter gather copy break */
 #define QETH_RX_SG_CB (PAGE_SIZE >> 1)
 #define QETH_RX_PULL_LEN 256
@@ -674,8 +672,6 @@ struct qeth_card_info {
        int broadcast_capable;
        int unique_id;
        struct qeth_card_blkt blkt;
-       __u32 csum_mask;
-       __u32 tx_csum_mask;
        enum qeth_ipa_promisc_modes promisc_mode;
        __u32 diagass_support;
        __u32 hwtrap;
@@ -917,7 +913,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
 int qeth_core_hardsetup_card(struct qeth_card *);
 void qeth_print_status_message(struct qeth_card *);
 int qeth_init_qdio_queues(struct qeth_card *);
-int qeth_send_startlan(struct qeth_card *);
 int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
                  int (*reply_cb)
                  (struct qeth_card *, struct qeth_reply *, unsigned long),
index e33558313834dc5e118d478fb5595452f2f6c9ed..315d8a2db7c066a0b8eb3739021edc9fde698c19 100644 (file)
@@ -2944,7 +2944,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
 }
 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
 
-int qeth_send_startlan(struct qeth_card *card)
+static int qeth_send_startlan(struct qeth_card *card)
 {
        int rc;
        struct qeth_cmd_buffer *iob;
@@ -2957,7 +2957,6 @@ int qeth_send_startlan(struct qeth_card *card)
        rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
        return rc;
 }
-EXPORT_SYMBOL_GPL(qeth_send_startlan);
 
 static int qeth_default_setadapterparms_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
@@ -5087,6 +5086,20 @@ retriable:
                goto out;
        }
 
+       rc = qeth_send_startlan(card);
+       if (rc) {
+               QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+               if (rc == IPA_RC_LAN_OFFLINE) {
+                       dev_warn(&card->gdev->dev,
+                               "The LAN is offline\n");
+                       card->lan_online = 0;
+               } else {
+                       rc = -ENODEV;
+                       goto out;
+               }
+       } else
+               card->lan_online = 1;
+
        card->options.ipa4.supported_funcs = 0;
        card->options.ipa6.supported_funcs = 0;
        card->options.adp.supported_funcs = 0;
@@ -5098,14 +5111,14 @@ retriable:
        if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
                rc = qeth_query_setadapterparms(card);
                if (rc < 0) {
-                       QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+                       QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
                        goto out;
                }
        }
        if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
                rc = qeth_query_setdiagass(card);
                if (rc < 0) {
-                       QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+                       QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
                        goto out;
                }
        }
@@ -5289,18 +5302,6 @@ int qeth_setassparms_cb(struct qeth_card *card,
                if (cmd->hdr.prot_version == QETH_PROT_IPV6)
                        card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
        }
-       if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
-           cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
-               card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
-               QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask);
-       }
-       if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
-           cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
-               card->info.tx_csum_mask =
-                       cmd->data.setassparms.data.flags_32bit;
-               QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask);
-       }
-
        return 0;
 }
 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
@@ -6060,23 +6061,96 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
 }
 EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
 
+/* Callback to handle checksum offload command reply from OSA card.
+ * Verify that required features have been enabled on the card.
+ * Return error in hdr->return_code as this value is checked by caller.
+ *
+ * Always returns zero to indicate no further messages from the OSA card.
+ */
+static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
+                                       struct qeth_reply *reply,
+                                       unsigned long data)
+{
+       struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+       struct qeth_checksum_cmd *chksum_cb =
+                               (struct qeth_checksum_cmd *)reply->param;
+
+       QETH_CARD_TEXT(card, 4, "chkdoccb");
+       if (cmd->hdr.return_code)
+               return 0;
+
+       memset(chksum_cb, 0, sizeof(*chksum_cb));
+       if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
+               chksum_cb->supported =
+                               cmd->data.setassparms.data.chksum.supported;
+               QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported);
+       }
+       if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) {
+               chksum_cb->supported =
+                               cmd->data.setassparms.data.chksum.supported;
+               chksum_cb->enabled =
+                               cmd->data.setassparms.data.chksum.enabled;
+               QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported);
+               QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled);
+       }
+       return 0;
+}
+
+/* Send command to OSA card and check results. */
+static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
+                                    enum qeth_ipa_funcs ipa_func,
+                                    __u16 cmd_code, long data,
+                                    struct qeth_checksum_cmd *chksum_cb)
+{
+       struct qeth_cmd_buffer *iob;
+       int rc = -ENOMEM;
+
+       QETH_CARD_TEXT(card, 4, "chkdocmd");
+       iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
+                                      sizeof(__u32), QETH_PROT_IPV4);
+       if (iob)
+               rc = qeth_send_setassparms(card, iob, sizeof(__u32), data,
+                                          qeth_ipa_checksum_run_cmd_cb,
+                                          chksum_cb);
+       return rc;
+}
+
 static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
 {
-       long rxtx_arg;
+       const __u32 required_features = QETH_IPA_CHECKSUM_IP_HDR |
+                                       QETH_IPA_CHECKSUM_UDP |
+                                       QETH_IPA_CHECKSUM_TCP;
+       struct qeth_checksum_cmd chksum_cb;
        int rc;
 
-       rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_START, 0);
+       rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0,
+                                      &chksum_cb);
+       if (!rc) {
+               if ((required_features & chksum_cb.supported) !=
+                   required_features)
+                       rc = -EIO;
+               else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) &&
+                        cstype == IPA_INBOUND_CHECKSUM)
+                       dev_warn(&card->gdev->dev,
+                                "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
+                                QETH_CARD_IFNAME(card));
+       }
        if (rc) {
+               qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0);
                dev_warn(&card->gdev->dev,
                         "Starting HW checksumming for %s failed, using SW checksumming\n",
                         QETH_CARD_IFNAME(card));
                return rc;
        }
-       rxtx_arg = (cstype == IPA_OUTBOUND_CHECKSUM) ? card->info.tx_csum_mask
-                                                    : card->info.csum_mask;
-       rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_ENABLE,
-                                         rxtx_arg);
+       rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
+                                      chksum_cb.supported, &chksum_cb);
+       if (!rc) {
+               if ((required_features & chksum_cb.enabled) !=
+                   required_features)
+                       rc = -EIO;
+       }
        if (rc) {
+               qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0);
                dev_warn(&card->gdev->dev,
                         "Enabling HW checksumming for %s failed, using SW checksumming\n",
                         QETH_CARD_IFNAME(card));
@@ -6090,19 +6164,10 @@ static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
 
 static int qeth_set_ipa_csum(struct qeth_card *card, int on, int cstype)
 {
-       int rc;
-
-       if (on) {
-               rc = qeth_send_checksum_on(card, cstype);
-               if (rc)
-                       return -EIO;
-       } else {
-               rc = qeth_send_simple_setassparms(card, cstype,
-                                                 IPA_CMD_ASS_STOP, 0);
-               if (rc)
-                       return -EIO;
-       }
-       return 0;
+       int rc = (on) ? qeth_send_checksum_on(card, cstype)
+                     : qeth_send_simple_setassparms(card, cstype,
+                                                    IPA_CMD_ASS_STOP, 0);
+       return rc ? -EIO : 0;
 }
 
 static int qeth_set_ipa_tso(struct qeth_card *card, int on)
index 6cccc9a49edea429189337273e1a0c0378925362..bc69d0a338ad715316b5e1aa271969e07c8da799 100644 (file)
@@ -352,11 +352,28 @@ struct qeth_arp_query_info {
        char *udata;
 };
 
+/* IPA set assist segmentation bit definitions for receive and
+ * transmit checksum offloading.
+ */
+enum qeth_ipa_checksum_bits {
+       QETH_IPA_CHECKSUM_IP_HDR        = 0x0002,
+       QETH_IPA_CHECKSUM_UDP           = 0x0008,
+       QETH_IPA_CHECKSUM_TCP           = 0x0010,
+       QETH_IPA_CHECKSUM_LP2LP         = 0x0020
+};
+
+/* IPA Assist checksum offload reply layout. */
+struct qeth_checksum_cmd {
+       __u32 supported;
+       __u32 enabled;
+} __packed;
+
 /* SETASSPARMS IPA Command: */
 struct qeth_ipacmd_setassparms {
        struct qeth_ipacmd_setassparms_hdr hdr;
        union {
                __u32 flags_32bit;
+               struct qeth_checksum_cmd chksum;
                struct qeth_arp_cache_entry add_arp_entry;
                struct qeth_arp_query_data query_arp;
                __u8 ip[16];
index 9c921c2833f16ea29113bd6cc017d9d2ae01b72d..bea483307618996240cb90cc3382950ab8b38354 100644 (file)
@@ -27,9 +27,6 @@
 
 static int qeth_l2_set_offline(struct ccwgroup_device *);
 static int qeth_l2_stop(struct net_device *);
-static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
-static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
-                          enum qeth_ipa_cmds);
 static void qeth_l2_set_rx_mode(struct net_device *);
 static int qeth_l2_recover(void *);
 static void qeth_bridgeport_query_support(struct qeth_card *card);
@@ -165,13 +162,70 @@ static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
        return rc;
 }
 
+static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
+                          enum qeth_ipa_cmds ipacmd)
+{
+       struct qeth_ipa_cmd *cmd;
+       struct qeth_cmd_buffer *iob;
+
+       QETH_CARD_TEXT(card, 2, "L2sdmac");
+       iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
+       cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+       cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
+       memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
+       return qeth_setdel_makerc(card, qeth_send_ipa_cmd(card, iob,
+                                       NULL, NULL));
+}
+
+static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
+{
+       int rc;
+
+       QETH_CARD_TEXT(card, 2, "L2Setmac");
+       rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
+       if (rc == 0) {
+               card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+               memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+               dev_info(&card->gdev->dev,
+                       "MAC address %pM successfully registered on device %s\n",
+                       card->dev->dev_addr, card->dev->name);
+       } else {
+               card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+               switch (rc) {
+               case -EEXIST:
+                       dev_warn(&card->gdev->dev,
+                               "MAC address %pM already exists\n", mac);
+                       break;
+               case -EPERM:
+                       dev_warn(&card->gdev->dev,
+                               "MAC address %pM is not authorized\n", mac);
+                       break;
+               }
+       }
+       return rc;
+}
+
+static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
+{
+       int rc;
+
+       QETH_CARD_TEXT(card, 2, "L2Delmac");
+       if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
+               return 0;
+       rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC);
+       if (rc == 0)
+               card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+       return rc;
+}
+
 static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
 {
        int rc;
 
        QETH_CARD_TEXT(card, 2, "L2Sgmac");
-       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
-                                       IPA_CMD_SETGMAC));
+       rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC);
        if (rc == -EEXIST)
                QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
                        mac, QETH_CARD_IFNAME(card));
@@ -186,8 +240,7 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
        int rc;
 
        QETH_CARD_TEXT(card, 2, "L2Dgmac");
-       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
-                                       IPA_CMD_DELGMAC));
+       rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC);
        if (rc)
                QETH_DBF_MESSAGE(2,
                        "Could not delete group MAC %pM on %s: %d\n",
@@ -195,28 +248,27 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
        return rc;
 }
 
-static inline u32 qeth_l2_mac_hash(const u8 *addr)
+static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
 {
-       return get_unaligned((u32 *)(&addr[2]));
+       if (mac->is_uc) {
+               return qeth_l2_send_setdelmac(card, mac->mac_addr,
+                                               IPA_CMD_SETVMAC);
+       } else {
+               return qeth_l2_send_setgroupmac(card, mac->mac_addr);
+       }
 }
 
-static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
+static int qeth_l2_remove_mac(struct qeth_card *card, struct qeth_mac *mac)
 {
-
-       int rc;
-
        if (mac->is_uc) {
-               rc = qeth_setdel_makerc(card,
-                               qeth_l2_send_setdelmac(card, mac->mac_addr,
-                                               IPA_CMD_SETVMAC));
+               return qeth_l2_send_setdelmac(card, mac->mac_addr,
+                                               IPA_CMD_DELVMAC);
        } else {
-               rc = qeth_setdel_makerc(card,
-                               qeth_l2_send_setgroupmac(card, mac->mac_addr));
+               return qeth_l2_send_delgroupmac(card, mac->mac_addr);
        }
-       return rc;
 }
 
-static void qeth_l2_del_all_macs(struct qeth_card *card, int del)
+static void qeth_l2_del_all_macs(struct qeth_card *card)
 {
        struct qeth_mac *mac;
        struct hlist_node *tmp;
@@ -224,19 +276,17 @@ static void qeth_l2_del_all_macs(struct qeth_card *card, int del)
 
        spin_lock_bh(&card->mclock);
        hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
-               if (del) {
-                       if (mac->is_uc)
-                               qeth_l2_send_setdelmac(card, mac->mac_addr,
-                                               IPA_CMD_DELVMAC);
-                       else
-                               qeth_l2_send_delgroupmac(card, mac->mac_addr);
-               }
                hash_del(&mac->hnode);
                kfree(mac);
        }
        spin_unlock_bh(&card->mclock);
 }
 
+static inline u32 qeth_l2_mac_hash(const u8 *addr)
+{
+       return get_unaligned((u32 *)(&addr[2]));
+}
+
 static inline int qeth_l2_get_cast_type(struct qeth_card *card,
                        struct sk_buff *skb)
 {
@@ -425,7 +475,7 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
                card->state = CARD_STATE_SOFTSETUP;
        }
        if (card->state == CARD_STATE_SOFTSETUP) {
-               qeth_l2_del_all_macs(card, 0);
+               qeth_l2_del_all_macs(card);
                qeth_clear_ipacmd_list(card);
                card->state = CARD_STATE_HARDSETUP;
        }
@@ -577,65 +627,6 @@ out:
        return work_done;
 }
 
-static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
-                          enum qeth_ipa_cmds ipacmd)
-{
-       struct qeth_ipa_cmd *cmd;
-       struct qeth_cmd_buffer *iob;
-
-       QETH_CARD_TEXT(card, 2, "L2sdmac");
-       iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
-       if (!iob)
-               return -ENOMEM;
-       cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
-       memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
-       return qeth_send_ipa_cmd(card, iob, NULL, NULL);
-}
-
-static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
-{
-       int rc;
-
-       QETH_CARD_TEXT(card, 2, "L2Setmac");
-       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
-                                       IPA_CMD_SETVMAC));
-       if (rc == 0) {
-               card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-               memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
-               dev_info(&card->gdev->dev,
-                       "MAC address %pM successfully registered on device %s\n",
-                       card->dev->dev_addr, card->dev->name);
-       } else {
-               card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
-               switch (rc) {
-               case -EEXIST:
-                       dev_warn(&card->gdev->dev,
-                               "MAC address %pM already exists\n", mac);
-                       break;
-               case -EPERM:
-                       dev_warn(&card->gdev->dev,
-                               "MAC address %pM is not authorized\n", mac);
-                       break;
-               }
-       }
-       return rc;
-}
-
-static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
-{
-       int rc;
-
-       QETH_CARD_TEXT(card, 2, "L2Delmac");
-       if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
-               return 0;
-       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
-                                       IPA_CMD_DELVMAC));
-       if (rc == 0)
-               card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
-       return rc;
-}
-
 static int qeth_l2_request_initial_mac(struct qeth_card *card)
 {
        int rc = 0;
@@ -794,14 +785,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
 
        hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
                if (mac->disp_flag == QETH_DISP_ADDR_DELETE) {
-                       if (!mac->is_uc)
-                               rc = qeth_l2_send_delgroupmac(card,
-                                               mac->mac_addr);
-                       else {
-                               rc = qeth_l2_send_setdelmac(card, mac->mac_addr,
-                                               IPA_CMD_DELVMAC);
-                       }
-
+                       qeth_l2_remove_mac(card, mac);
                        hash_del(&mac->hnode);
                        kfree(mac);
 
@@ -1193,21 +1177,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        /* softsetup */
        QETH_DBF_TEXT(SETUP, 2, "softsetp");
 
-       rc = qeth_send_startlan(card);
-       if (rc) {
-               QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
-               if (rc == 0xe080) {
-                       dev_warn(&card->gdev->dev,
-                               "The LAN is offline\n");
-                       card->lan_online = 0;
-                       goto contin;
-               }
-               rc = -ENODEV;
-               goto out_remove;
-       } else
-               card->lan_online = 1;
-
-contin:
        if ((card->info.type == QETH_CARD_TYPE_OSD) ||
            (card->info.type == QETH_CARD_TYPE_OSX)) {
                rc = qeth_l2_start_ipassists(card);
index ac37d050e765bad4e96a72336c41a9ae18236313..06d0addcc058dcccd4333a8f3edcdd96fccb2de4 100644 (file)
@@ -3227,21 +3227,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        /* softsetup */
        QETH_DBF_TEXT(SETUP, 2, "softsetp");
 
-       rc = qeth_send_startlan(card);
-       if (rc) {
-               QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
-               if (rc == 0xe080) {
-                       dev_warn(&card->gdev->dev,
-                               "The LAN is offline\n");
-                       card->lan_online = 0;
-                       goto contin;
-               }
-               rc = -ENODEV;
-               goto out_remove;
-       } else
-               card->lan_online = 1;
-
-contin:
        rc = qeth_l3_setadapter_parms(card);
        if (rc)
                QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
index 0e00a5ce0f000f292ffe77584b031b97ba12f553..05e9471e3d3fc0052e7866786486a8590f7b1129 100644 (file)
@@ -250,9 +250,6 @@ static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
        if (card->info.type != QETH_CARD_TYPE_IQD)
                return -EPERM;
 
-       if (card->state == CARD_STATE_DOWN)
-               return -EPERM;
-
        memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid));
        EBCASC(tmp_hsuid, 8);
        return sprintf(buf, "%s\n", tmp_hsuid);
@@ -692,15 +689,15 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
                        enum qeth_prot_versions proto)
 {
        struct qeth_ipaddr *ipaddr;
-       struct hlist_node  *tmp;
        char addr_str[40];
+       int str_len = 0;
        int entry_len; /* length of 1 entry string, differs between v4 and v6 */
-       int i = 0;
+       int i;
 
        entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
        entry_len += 2; /* \n + terminator */
        spin_lock_bh(&card->ip_lock);
-       hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
+       hash_for_each(card->ip_htable, i, ipaddr, hnode) {
                if (ipaddr->proto != proto)
                        continue;
                if (ipaddr->type != QETH_IP_TYPE_VIPA)
@@ -708,16 +705,17 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
                /* String must not be longer than PAGE_SIZE. So we check if
                 * string length gets near PAGE_SIZE. Then we can savely display
                 * the next IPv6 address (worst case, compared to IPv4) */
-               if ((PAGE_SIZE - i) <= entry_len)
+               if ((PAGE_SIZE - str_len) <= entry_len)
                        break;
                qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
                        addr_str);
-               i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+               str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
+                                   addr_str);
        }
        spin_unlock_bh(&card->ip_lock);
-       i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+       str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
 
-       return i;
+       return str_len;
 }
 
 static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
@@ -854,15 +852,15 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
                       enum qeth_prot_versions proto)
 {
        struct qeth_ipaddr *ipaddr;
-       struct hlist_node *tmp;
        char addr_str[40];
+       int str_len = 0;
        int entry_len; /* length of 1 entry string, differs between v4 and v6 */
-       int i = 0;
+       int i;
 
        entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
        entry_len += 2; /* \n + terminator */
        spin_lock_bh(&card->ip_lock);
-       hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
+       hash_for_each(card->ip_htable, i, ipaddr, hnode) {
                if (ipaddr->proto != proto)
                        continue;
                if (ipaddr->type != QETH_IP_TYPE_RXIP)
@@ -870,16 +868,17 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
                /* String must not be longer than PAGE_SIZE. So we check if
                 * string length gets near PAGE_SIZE. Then we can savely display
                 * the next IPv6 address (worst case, compared to IPv4) */
-               if ((PAGE_SIZE - i) <= entry_len)
+               if ((PAGE_SIZE - str_len) <= entry_len)
                        break;
                qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
                        addr_str);
-               i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+               str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
+                                   addr_str);
        }
        spin_unlock_bh(&card->ip_lock);
-       i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+       str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
 
-       return i;
+       return str_len;
 }
 
 static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
index c639d5a02656abf9678f1ef358c4166e5db04a66..4b9e7761a97ba63505d0065f3ef7d7ad588ced33 100644 (file)
@@ -2282,7 +2282,7 @@ static int _bnx2fc_create(struct net_device *netdev,
        }
 
        /* obtain physical netdev */
-       if (netdev->priv_flags & IFF_802_1Q_VLAN)
+       if (is_vlan_dev(netdev))
                phys_dev = vlan_dev_real_dev(netdev);
 
        /* verify if the physical device is a netxtreme2 device */
@@ -2320,7 +2320,7 @@ static int _bnx2fc_create(struct net_device *netdev,
                goto ifput_err;
        }
 
-       if (netdev->priv_flags & IFF_802_1Q_VLAN) {
+       if (is_vlan_dev(netdev)) {
                vlan_id = vlan_dev_vlan_id(netdev);
                interface->vlan_enabled = 1;
        }
@@ -2538,7 +2538,7 @@ static bool bnx2fc_match(struct net_device *netdev)
        struct net_device *phys_dev = netdev;
 
        mutex_lock(&bnx2fc_dev_lock);
-       if (netdev->priv_flags & IFF_802_1Q_VLAN)
+       if (is_vlan_dev(netdev))
                phys_dev = vlan_dev_real_dev(netdev);
 
        if (bnx2fc_hba_lookup(phys_dev)) {
index 9167bcd9fffe9b3a5fea9005671239a96551aace..bd7d39ecbd2470246a58a8ed3a3fd11367df814f 100644 (file)
@@ -223,7 +223,7 @@ struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
        struct cxgbi_device *cdev, *tmp;
        int i;
 
-       if (ndev->priv_flags & IFF_802_1Q_VLAN) {
+       if (is_vlan_dev(ndev)) {
                vdev = ndev;
                ndev = vlan_dev_real_dev(ndev);
                log_debug(1 << CXGBI_DBG_DEV,
@@ -256,7 +256,7 @@ struct cxgbi_device *cxgbi_device_find_by_netdev_rcu(struct net_device *ndev,
        struct cxgbi_device *cdev;
        int i;
 
-       if (ndev->priv_flags & IFF_802_1Q_VLAN) {
+       if (is_vlan_dev(ndev)) {
                vdev = ndev;
                ndev = vlan_dev_real_dev(ndev);
                pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
@@ -290,7 +290,7 @@ static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
        struct cxgbi_device *cdev, *tmp;
        int i;
 
-       if (ndev->priv_flags & IFF_802_1Q_VLAN) {
+       if (is_vlan_dev(ndev)) {
                vdev = ndev;
                ndev = vlan_dev_real_dev(ndev);
                pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
index 59150cad03532e1e1ee15ad0d8fcdf35229d07c3..79160ffae483ccea874038784b4516c1a2eab281 100644 (file)
@@ -326,8 +326,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
 
        /* look for SAN MAC address, if multiple SAN MACs exist, only
         * use the first one for SPMA */
-       real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
-               vlan_dev_real_dev(netdev) : netdev;
+       real_dev = is_vlan_dev(netdev) ? vlan_dev_real_dev(netdev) : netdev;
        fcoe->realdev = real_dev;
        rcu_read_lock();
        for_each_dev_addr(real_dev, ha) {
@@ -730,7 +729,7 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
        ctlr = fcoe_to_ctlr(fcoe);
 
        /* Figure out the VLAN ID, if any */
-       if (netdev->priv_flags & IFF_802_1Q_VLAN)
+       if (is_vlan_dev(netdev))
                lport->vlan = vlan_dev_vlan_id(netdev);
        else
                lport->vlan = 0;
@@ -959,13 +958,13 @@ static inline int fcoe_em_config(struct fc_lport *lport)
         * Reuse existing offload em instance in case
         * it is already allocated on real eth device
         */
-       if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+       if (is_vlan_dev(fcoe->netdev))
                cur_real_dev = vlan_dev_real_dev(fcoe->netdev);
        else
                cur_real_dev = fcoe->netdev;
 
        list_for_each_entry(oldfcoe, &fcoe_hostlist, list) {
-               if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+               if (is_vlan_dev(oldfcoe->netdev))
                        old_real_dev = vlan_dev_real_dev(oldfcoe->netdev);
                else
                        old_real_dev = oldfcoe->netdev;
@@ -1563,7 +1562,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
        skb->protocol = htons(ETH_P_FCOE);
        skb->priority = fcoe->priority;
 
-       if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
+       if (is_vlan_dev(fcoe->netdev) &&
            fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) {
                /* must set skb->dev before calling vlan_put_tag */
                skb->dev = fcoe->realdev;
@@ -1794,7 +1793,7 @@ fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
        struct net_device *real_dev;
 
        list_for_each_entry(fcoe, &fcoe_hostlist, list) {
-               if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+               if (is_vlan_dev(fcoe->netdev))
                        real_dev = vlan_dev_real_dev(fcoe->netdev);
                else
                        real_dev = fcoe->netdev;
index 1fbd495e5e635a66dbaa5b86baa3cadacfa33762..c7652c35be1950a1d015d7e5372e6d2efaa8363a 100644 (file)
@@ -461,7 +461,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
                        skb->protocol = eth_type_trans(skb, skb->dev);
                        priv->nstats.rx_packets++;
                        priv->nstats.rx_bytes += rx_ind_size;
-                       skb->dev->last_rx = jiffies;
                        netif_rx(skb);
                } else {
                        priv->nstats.rx_dropped++;
@@ -494,7 +493,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
                        skb->protocol = eth_type_trans(skb, skb->dev);
                        priv->nstats.rx_packets++;
                        priv->nstats.rx_bytes += rx_ind_size;
-                       skb->dev->last_rx = jiffies;
                        netif_rx(skb);
                } else {
                        priv->nstats.rx_dropped++;
index fb0928a4fb97ed9053114a7e90d876d4f5710dae..781ef623233e769eb70a649a33049a2c00cb86cc 100644 (file)
@@ -155,7 +155,6 @@ static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, int code,
                skb_reserve(skb, BYTE_OFFSET);
                skb_put(skb, length);
                skb->protocol = eth_type_trans(skb, skb->dev);
-               skb->dev->last_rx = jiffies;
                netif_rx(skb);
                /* Fill rx ring */
                skb_data = xlr_alloc_skb();
@@ -397,14 +396,6 @@ static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
                        TX_DROP_FRAME_COUNTER);
 }
 
-static struct rtnl_link_stats64 *xlr_get_stats64(struct net_device *ndev,
-                                                struct rtnl_link_stats64 *stats
-                                                )
-{
-       xlr_stats(ndev, stats);
-       return stats;
-}
-
 static const struct net_device_ops xlr_netdev_ops = {
        .ndo_open = xlr_net_open,
        .ndo_stop = xlr_net_stop,
@@ -412,7 +403,7 @@ static const struct net_device_ops xlr_netdev_ops = {
        .ndo_select_queue = xlr_net_select_queue,
        .ndo_set_mac_address = xlr_net_set_mac_addr,
        .ndo_set_rx_mode = xlr_set_rx_mode,
-       .ndo_get_stats64 = xlr_get_stats64,
+       .ndo_get_stats64 = xlr_stats,
 };
 
 /*
index f0900d1c4d7b5fe63a04a917b60b37d0dba923b5..fc849d4a1b5d201cbedb7d718fa551990367b6f6 100644 (file)
@@ -429,7 +429,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
 
        if (rx_count < budget) {
                /* No more work */
-               napi_complete(napi);
+               napi_complete_done(napi, rx_count);
                enable_irq(rx_group->irq);
        }
        return rx_count;
index 6b4c20872323136ea5a69f6cc8eb98b9361edb8d..0b805320509103a3dd4669e9c16033c1ccf8f503 100644 (file)
@@ -23,6 +23,7 @@
 #endif /* CONFIG_XFRM */
 
 #include <linux/atomic.h>
+#include <net/sch_generic.h>
 
 #include <asm/octeon/octeon.h>
 
@@ -369,9 +370,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
 
 #ifdef CONFIG_NET_SCHED
        skb->tc_index = 0;
-#ifdef CONFIG_NET_CLS_ACT
-       skb->tc_verd = 0;
-#endif /* CONFIG_NET_CLS_ACT */
+       skb_reset_tc(skb);
 #endif /* CONFIG_NET_SCHED */
 #endif /* REUSE_SKBUFFS_WITHOUT_FREE */
 
index e5ba7d1a809fdb30922bb1649e58eb4f426bf45f..43a77745e6fbe51a170da4f7cc921a9d44ea9f07 100644 (file)
@@ -1375,7 +1375,6 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
                ieee->LinkDetectInfo.NumRecvDataInPeriod++;
                ieee->LinkDetectInfo.NumRxOkInPeriod++;
        }
-       dev->last_rx = jiffies;
 
        /* Data frame - extract src/dst addresses */
        rtllib_rx_extract_addr(ieee, hdr, dst, src, bssid);
index 82f654305414cb9ede533b5459842a0ee7e8a5a7..b1f2fdfcb718560865874e8bbdb0d948df5b474a 100644 (file)
@@ -1103,11 +1103,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
                stats = hostap_get_stats(dev);
                from_assoc_ap = 1;
        }
-#endif
-
-       dev->last_rx = jiffies;
 
-#ifdef NOT_YET
        if ((ieee->iw_mode == IW_MODE_MASTER ||
             ieee->iw_mode == IW_MODE_REPEAT) &&
            !from_assoc_ap) {
index c1f674f5268c2421c7f925d45f150a139264980f..ca3743d273e0862e45324d155a8cc3960e3f036d 100644 (file)
@@ -1657,7 +1657,7 @@ static int visornic_poll(struct napi_struct *napi, int budget)
 
        /* If there aren't any more packets to receive stop the poll */
        if (rx_count < budget)
-               napi_complete(napi);
+               napi_complete_done(napi, rx_count);
 
        return rx_count;
 }
index 4fe037aeef12975e56919e0cc01e4e472f4fe415..6134eba5cad4693997faf8d2d77947ff7e8b0713 100644 (file)
@@ -3409,7 +3409,6 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
                        &usbin->rxfrm.desc.frame_control, hdrlen);
 
                skb->dev = wlandev->netdev;
-               skb->dev->last_rx = jiffies;
 
                /* And set the frame length properly */
                skb_trim(skb, data_len + hdrlen);
index 73fcf07254fefd8c25d37295bf3d2375d9c978af..53dbbd69e552582f84257742bc825e0078311191 100644 (file)
@@ -252,7 +252,6 @@ static int p80211_convert_to_ether(struct wlandevice *wlandev,
        }
 
        if (skb_p80211_to_ether(wlandev, wlandev->ethconv, skb) == 0) {
-               skb->dev->last_rx = jiffies;
                wlandev->netdev->stats.rx_packets++;
                wlandev->netdev->stats.rx_bytes += skb->len;
                netif_rx_ni(skb);
@@ -287,7 +286,6 @@ static void p80211netdev_rx_bh(unsigned long arg)
                                skb->ip_summed = CHECKSUM_NONE;
                                skb->pkt_type = PACKET_OTHERHOST;
                                skb->protocol = htons(ETH_P_80211_RAW);
-                               dev->last_rx = jiffies;
 
                                dev->stats.rx_packets++;
                                dev->stats.rx_bytes += skb->len;
index 450f51deb2a2ae18137ede36d4a3e8c188fd7352..eab274d17b5cbdfdf649756ede874fb16f0764ef 100644 (file)
@@ -17,6 +17,7 @@
  ******************************************************************************/
 
 #include <crypto/hash.h>
+#include <linux/module.h>
 #include <linux/string.h>
 #include <linux/kthread.h>
 #include <linux/idr.h>
index 40764ecad9ce16724535b9867bb6a1aa7758403b..cfdecea5078f4095546adcee397213cd0f2b67b1 100644 (file)
@@ -1,6 +1,6 @@
 config VHOST_NET
        tristate "Host kernel accelerator for virtio net"
-       depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP)
+       depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP)
        select VHOST
        ---help---
          This kernel module can be loaded in host kernel to accelerate
index 5dc34653274ae3655cf2ba97ab9c8dce9e8a2632..2fe35354f20e5ea82509fc9859435029a5a2dfcc 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/if_arp.h>
 #include <linux/if_tun.h>
 #include <linux/if_macvlan.h>
+#include <linux/if_tap.h>
 #include <linux/if_vlan.h>
 
 #include <net/sock.h>
@@ -351,6 +352,15 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
        return r;
 }
 
+static bool vhost_exceeds_maxpend(struct vhost_net *net)
+{
+       struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+       struct vhost_virtqueue *vq = &nvq->vq;
+
+       return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV
+               == nvq->done_idx;
+}
+
 /* Expects to be always run from workqueue - which acts as
  * read-size critical section for our kind of RCU. */
 static void handle_tx(struct vhost_net *net)
@@ -394,8 +404,7 @@ static void handle_tx(struct vhost_net *net)
                /* If more outstanding DMAs, queue the work.
                 * Handle upend_idx wrap around
                 */
-               if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND)
-                             % UIO_MAXIOV == nvq->done_idx))
+               if (unlikely(vhost_exceeds_maxpend(net)))
                        break;
 
                head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
@@ -454,6 +463,16 @@ static void handle_tx(struct vhost_net *net)
                        msg.msg_control = NULL;
                        ubufs = NULL;
                }
+
+               total_len += len;
+               if (total_len < VHOST_NET_WEIGHT &&
+                   !vhost_vq_avail_empty(&net->dev, vq) &&
+                   likely(!vhost_exceeds_maxpend(net))) {
+                       msg.msg_flags |= MSG_MORE;
+               } else {
+                       msg.msg_flags &= ~MSG_MORE;
+               }
+
                /* TODO: Check specific error and bomb out unless ENOBUFS? */
                err = sock->ops->sendmsg(sock, &msg, len);
                if (unlikely(err < 0)) {
@@ -472,7 +491,6 @@ static void handle_tx(struct vhost_net *net)
                        vhost_add_used_and_signal(&net->dev, vq, head, 0);
                else
                        vhost_zerocopy_signal_used(net, vq);
-               total_len += len;
                vhost_net_tx_packet(net);
                if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
                        vhost_poll_queue(&vq->poll);
@@ -943,7 +961,7 @@ static struct socket *get_tap_socket(int fd)
        sock = tun_get_socket(file);
        if (!IS_ERR(sock))
                return sock;
-       sock = macvtap_get_socket(file);
+       sock = tap_get_socket(file);
        if (IS_ERR(sock))
                fput(file);
        return sock;
index 8f99fe08de02e7b48725a99d682055c03056b82a..4269e621e254ab7acc38c81f2aafe1775e9dbd81 100644 (file)
@@ -2239,11 +2239,15 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
        __virtio16 avail_idx;
        int r;
 
+       if (vq->avail_idx != vq->last_avail_idx)
+               return false;
+
        r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
-       if (r)
+       if (unlikely(r))
                return false;
+       vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
 
-       return vhost16_to_cpu(vq, avail_idx) == vq->avail_idx;
+       return vq->avail_idx == vq->last_avail_idx;
 }
 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
 
index 7062bb0975a521f1a28125c092c2f0d0bbd35787..400d70b6937948cc5a8aac698efc64fcf49c1dde 100644 (file)
@@ -100,11 +100,6 @@ static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env)
                              dev->id.device, dev->id.vendor);
 }
 
-static void add_status(struct virtio_device *dev, unsigned status)
-{
-       dev->config->set_status(dev, dev->config->get_status(dev) | status);
-}
-
 void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
                                         unsigned int fbit)
 {
@@ -145,14 +140,15 @@ void virtio_config_changed(struct virtio_device *dev)
 }
 EXPORT_SYMBOL_GPL(virtio_config_changed);
 
-static void virtio_config_disable(struct virtio_device *dev)
+void virtio_config_disable(struct virtio_device *dev)
 {
        spin_lock_irq(&dev->config_lock);
        dev->config_enabled = false;
        spin_unlock_irq(&dev->config_lock);
 }
+EXPORT_SYMBOL_GPL(virtio_config_disable);
 
-static void virtio_config_enable(struct virtio_device *dev)
+void virtio_config_enable(struct virtio_device *dev)
 {
        spin_lock_irq(&dev->config_lock);
        dev->config_enabled = true;
@@ -161,8 +157,15 @@ static void virtio_config_enable(struct virtio_device *dev)
        dev->config_change_pending = false;
        spin_unlock_irq(&dev->config_lock);
 }
+EXPORT_SYMBOL_GPL(virtio_config_enable);
+
+void virtio_add_status(struct virtio_device *dev, unsigned int status)
+{
+       dev->config->set_status(dev, dev->config->get_status(dev) | status);
+}
+EXPORT_SYMBOL_GPL(virtio_add_status);
 
-static int virtio_finalize_features(struct virtio_device *dev)
+int virtio_finalize_features(struct virtio_device *dev)
 {
        int ret = dev->config->finalize_features(dev);
        unsigned status;
@@ -173,7 +176,7 @@ static int virtio_finalize_features(struct virtio_device *dev)
        if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1))
                return 0;
 
-       add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
+       virtio_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
        status = dev->config->get_status(dev);
        if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
                dev_err(&dev->dev, "virtio: device refuses features: %x\n",
@@ -182,6 +185,7 @@ static int virtio_finalize_features(struct virtio_device *dev)
        }
        return 0;
 }
+EXPORT_SYMBOL_GPL(virtio_finalize_features);
 
 static int virtio_dev_probe(struct device *_d)
 {
@@ -193,7 +197,7 @@ static int virtio_dev_probe(struct device *_d)
        u64 driver_features_legacy;
 
        /* We have a driver! */
-       add_status(dev, VIRTIO_CONFIG_S_DRIVER);
+       virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
 
        /* Figure out what features the device supports. */
        device_features = dev->config->get_features(dev);
@@ -247,7 +251,7 @@ static int virtio_dev_probe(struct device *_d)
 
        return 0;
 err:
-       add_status(dev, VIRTIO_CONFIG_S_FAILED);
+       virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
        return err;
 
 }
@@ -265,7 +269,7 @@ static int virtio_dev_remove(struct device *_d)
        WARN_ON_ONCE(dev->config->get_status(dev));
 
        /* Acknowledge the device's existence again. */
-       add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+       virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
        return 0;
 }
 
@@ -316,7 +320,7 @@ int register_virtio_device(struct virtio_device *dev)
        dev->config->reset(dev);
 
        /* Acknowledge that we've seen the device. */
-       add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+       virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
 
        INIT_LIST_HEAD(&dev->vqs);
 
@@ -325,7 +329,7 @@ int register_virtio_device(struct virtio_device *dev)
        err = device_register(&dev->dev);
 out:
        if (err)
-               add_status(dev, VIRTIO_CONFIG_S_FAILED);
+               virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
        return err;
 }
 EXPORT_SYMBOL_GPL(register_virtio_device);
@@ -365,18 +369,18 @@ int virtio_device_restore(struct virtio_device *dev)
        dev->config->reset(dev);
 
        /* Acknowledge that we've seen the device. */
-       add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+       virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
 
        /* Maybe driver failed before freeze.
         * Restore the failed status, for debugging. */
        if (dev->failed)
-               add_status(dev, VIRTIO_CONFIG_S_FAILED);
+               virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
 
        if (!drv)
                return 0;
 
        /* We have a driver! */
-       add_status(dev, VIRTIO_CONFIG_S_DRIVER);
+       virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER);
 
        ret = virtio_finalize_features(dev);
        if (ret)
@@ -389,14 +393,14 @@ int virtio_device_restore(struct virtio_device *dev)
        }
 
        /* Finally, tell the device we're all set */
-       add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
+       virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
 
        virtio_config_enable(dev);
 
        return 0;
 
 err:
-       add_status(dev, VIRTIO_CONFIG_S_FAILED);
+       virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED);
        return ret;
 }
 EXPORT_SYMBOL_GPL(virtio_device_restore);
index 1e9d2f84e5b53cf3a97f7863e3260adc4f8312eb..b29447e03ede0d638950fa0dd64d908004156ea6 100644 (file)
@@ -343,7 +343,7 @@ void afs_dispatch_give_up_callbacks(struct work_struct *work)
         *   had callbacks entirely, and the server will call us later to break
         *   them
         */
-       afs_fs_give_up_callbacks(server, &afs_async_call);
+       afs_fs_give_up_callbacks(server, true);
 }
 
 /*
index d764236072b192d33a0b8eedb3821d7391991067..2edbdcbf6432add190464b5a5f414592953c944a 100644 (file)
@@ -24,65 +24,86 @@ static int afs_deliver_cb_callback(struct afs_call *);
 static int afs_deliver_cb_probe_uuid(struct afs_call *);
 static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *);
 static void afs_cm_destructor(struct afs_call *);
+static void SRXAFSCB_CallBack(struct work_struct *);
+static void SRXAFSCB_InitCallBackState(struct work_struct *);
+static void SRXAFSCB_Probe(struct work_struct *);
+static void SRXAFSCB_ProbeUuid(struct work_struct *);
+static void SRXAFSCB_TellMeAboutYourself(struct work_struct *);
+
+#define CM_NAME(name) \
+       const char afs_SRXCB##name##_name[] __tracepoint_string =       \
+               "CB." #name
 
 /*
  * CB.CallBack operation type
  */
+static CM_NAME(CallBack);
 static const struct afs_call_type afs_SRXCBCallBack = {
-       .name           = "CB.CallBack",
+       .name           = afs_SRXCBCallBack_name,
        .deliver        = afs_deliver_cb_callback,
        .abort_to_error = afs_abort_to_error,
        .destructor     = afs_cm_destructor,
+       .work           = SRXAFSCB_CallBack,
 };
 
 /*
  * CB.InitCallBackState operation type
  */
+static CM_NAME(InitCallBackState);
 static const struct afs_call_type afs_SRXCBInitCallBackState = {
-       .name           = "CB.InitCallBackState",
+       .name           = afs_SRXCBInitCallBackState_name,
        .deliver        = afs_deliver_cb_init_call_back_state,
        .abort_to_error = afs_abort_to_error,
        .destructor     = afs_cm_destructor,
+       .work           = SRXAFSCB_InitCallBackState,
 };
 
 /*
  * CB.InitCallBackState3 operation type
  */
+static CM_NAME(InitCallBackState3);
 static const struct afs_call_type afs_SRXCBInitCallBackState3 = {
-       .name           = "CB.InitCallBackState3",
+       .name           = afs_SRXCBInitCallBackState3_name,
        .deliver        = afs_deliver_cb_init_call_back_state3,
        .abort_to_error = afs_abort_to_error,
        .destructor     = afs_cm_destructor,
+       .work           = SRXAFSCB_InitCallBackState,
 };
 
 /*
  * CB.Probe operation type
  */
+static CM_NAME(Probe);
 static const struct afs_call_type afs_SRXCBProbe = {
-       .name           = "CB.Probe",
+       .name           = afs_SRXCBProbe_name,
        .deliver        = afs_deliver_cb_probe,
        .abort_to_error = afs_abort_to_error,
        .destructor     = afs_cm_destructor,
+       .work           = SRXAFSCB_Probe,
 };
 
 /*
  * CB.ProbeUuid operation type
  */
+static CM_NAME(ProbeUuid);
 static const struct afs_call_type afs_SRXCBProbeUuid = {
-       .name           = "CB.ProbeUuid",
+       .name           = afs_SRXCBProbeUuid_name,
        .deliver        = afs_deliver_cb_probe_uuid,
        .abort_to_error = afs_abort_to_error,
        .destructor     = afs_cm_destructor,
+       .work           = SRXAFSCB_ProbeUuid,
 };
 
 /*
  * CB.TellMeAboutYourself operation type
  */
+static CM_NAME(TellMeAboutYourself);
 static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
-       .name           = "CB.TellMeAboutYourself",
+       .name           = afs_SRXCBTellMeAboutYourself_name,
        .deliver        = afs_deliver_cb_tell_me_about_yourself,
        .abort_to_error = afs_abort_to_error,
        .destructor     = afs_cm_destructor,
+       .work           = SRXAFSCB_TellMeAboutYourself,
 };
 
 /*
@@ -153,6 +174,7 @@ static void SRXAFSCB_CallBack(struct work_struct *work)
        afs_send_empty_reply(call);
 
        afs_break_callbacks(call->server, call->count, call->request);
+       afs_put_call(call);
        _leave("");
 }
 
@@ -274,9 +296,7 @@ static int afs_deliver_cb_callback(struct afs_call *call)
                return -ENOTCONN;
        call->server = server;
 
-       INIT_WORK(&call->work, SRXAFSCB_CallBack);
-       queue_work(afs_wq, &call->work);
-       return 0;
+       return afs_queue_call_work(call);
 }
 
 /*
@@ -290,6 +310,7 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work)
 
        afs_init_callback_state(call->server);
        afs_send_empty_reply(call);
+       afs_put_call(call);
        _leave("");
 }
 
@@ -320,9 +341,7 @@ static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
                return -ENOTCONN;
        call->server = server;
 
-       INIT_WORK(&call->work, SRXAFSCB_InitCallBackState);
-       queue_work(afs_wq, &call->work);
-       return 0;
+       return afs_queue_call_work(call);
 }
 
 /*
@@ -332,7 +351,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
 {
        struct sockaddr_rxrpc srx;
        struct afs_server *server;
-       struct afs_uuid *r;
+       struct uuid_v1 *r;
        unsigned loop;
        __be32 *b;
        int ret;
@@ -362,15 +381,15 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
                }
 
                _debug("unmarshall UUID");
-               call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
+               call->request = kmalloc(sizeof(struct uuid_v1), GFP_KERNEL);
                if (!call->request)
                        return -ENOMEM;
 
                b = call->buffer;
                r = call->request;
-               r->time_low                     = ntohl(b[0]);
-               r->time_mid                     = ntohl(b[1]);
-               r->time_hi_and_version          = ntohl(b[2]);
+               r->time_low                     = b[0];
+               r->time_mid                     = htons(ntohl(b[1]));
+               r->time_hi_and_version          = htons(ntohl(b[2]));
                r->clock_seq_hi_and_reserved    = ntohl(b[3]);
                r->clock_seq_low                = ntohl(b[4]);
 
@@ -394,9 +413,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
                return -ENOTCONN;
        call->server = server;
 
-       INIT_WORK(&call->work, SRXAFSCB_InitCallBackState);
-       queue_work(afs_wq, &call->work);
-       return 0;
+       return afs_queue_call_work(call);
 }
 
 /*
@@ -408,6 +425,7 @@ static void SRXAFSCB_Probe(struct work_struct *work)
 
        _enter("");
        afs_send_empty_reply(call);
+       afs_put_call(call);
        _leave("");
 }
 
@@ -427,9 +445,7 @@ static int afs_deliver_cb_probe(struct afs_call *call)
        /* no unmarshalling required */
        call->state = AFS_CALL_REPLYING;
 
-       INIT_WORK(&call->work, SRXAFSCB_Probe);
-       queue_work(afs_wq, &call->work);
-       return 0;
+       return afs_queue_call_work(call);
 }
 
 /*
@@ -438,7 +454,7 @@ static int afs_deliver_cb_probe(struct afs_call *call)
 static void SRXAFSCB_ProbeUuid(struct work_struct *work)
 {
        struct afs_call *call = container_of(work, struct afs_call, work);
-       struct afs_uuid *r = call->request;
+       struct uuid_v1 *r = call->request;
 
        struct {
                __be32  match;
@@ -452,6 +468,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
                reply.match = htonl(1);
 
        afs_send_simple_reply(call, &reply, sizeof(reply));
+       afs_put_call(call);
        _leave("");
 }
 
@@ -460,7 +477,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
  */
 static int afs_deliver_cb_probe_uuid(struct afs_call *call)
 {
-       struct afs_uuid *r;
+       struct uuid_v1 *r;
        unsigned loop;
        __be32 *b;
        int ret;
@@ -486,15 +503,15 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
                }
 
                _debug("unmarshall UUID");
-               call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
+               call->request = kmalloc(sizeof(struct uuid_v1), GFP_KERNEL);
                if (!call->request)
                        return -ENOMEM;
 
                b = call->buffer;
                r = call->request;
-               r->time_low                     = ntohl(b[0]);
-               r->time_mid                     = ntohl(b[1]);
-               r->time_hi_and_version          = ntohl(b[2]);
+               r->time_low                     = b[0];
+               r->time_mid                     = htons(ntohl(b[1]));
+               r->time_hi_and_version          = htons(ntohl(b[2]));
                r->clock_seq_hi_and_reserved    = ntohl(b[3]);
                r->clock_seq_low                = ntohl(b[4]);
 
@@ -510,9 +527,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
 
        call->state = AFS_CALL_REPLYING;
 
-       INIT_WORK(&call->work, SRXAFSCB_ProbeUuid);
-       queue_work(afs_wq, &call->work);
-       return 0;
+       return afs_queue_call_work(call);
 }
 
 /*
@@ -554,9 +569,9 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work)
        memset(&reply, 0, sizeof(reply));
        reply.ia.nifs = htonl(nifs);
 
-       reply.ia.uuid[0] = htonl(afs_uuid.time_low);
-       reply.ia.uuid[1] = htonl(afs_uuid.time_mid);
-       reply.ia.uuid[2] = htonl(afs_uuid.time_hi_and_version);
+       reply.ia.uuid[0] = afs_uuid.time_low;
+       reply.ia.uuid[1] = htonl(ntohs(afs_uuid.time_mid));
+       reply.ia.uuid[2] = htonl(ntohs(afs_uuid.time_hi_and_version));
        reply.ia.uuid[3] = htonl((s8) afs_uuid.clock_seq_hi_and_reserved);
        reply.ia.uuid[4] = htonl((s8) afs_uuid.clock_seq_low);
        for (loop = 0; loop < 6; loop++)
@@ -574,7 +589,7 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work)
        reply.cap.capcount = htonl(1);
        reply.cap.caps[0] = htonl(AFS_CAP_ERROR_TRANSLATION);
        afs_send_simple_reply(call, &reply, sizeof(reply));
-
+       afs_put_call(call);
        _leave("");
 }
 
@@ -594,7 +609,5 @@ static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call)
        /* no unmarshalling required */
        call->state = AFS_CALL_REPLYING;
 
-       INIT_WORK(&call->work, SRXAFSCB_TellMeAboutYourself);
-       queue_work(afs_wq, &call->work);
-       return 0;
+       return afs_queue_call_work(call);
 }
index 6344aee4ac4bff8e768fc7c344ec7ccea3670b98..ba7b71fba34bcc4cd5f8b8a305ace06a388ac607 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/pagemap.h>
 #include <linux/writeback.h>
 #include <linux/gfp.h>
+#include <linux/task_io_accounting_ops.h>
 #include "internal.h"
 
 static int afs_readpage(struct file *file, struct page *page);
@@ -101,6 +102,21 @@ int afs_release(struct inode *inode, struct file *file)
        return 0;
 }
 
+/*
+ * Dispose of a ref to a read record.
+ */
+void afs_put_read(struct afs_read *req)
+{
+       int i;
+
+       if (atomic_dec_and_test(&req->usage)) {
+               for (i = 0; i < req->nr_pages; i++)
+                       if (req->pages[i])
+                               put_page(req->pages[i]);
+               kfree(req);
+       }
+}
+
 #ifdef CONFIG_AFS_FSCACHE
 /*
  * deal with notification that a page was read from the cache
@@ -126,9 +142,8 @@ int afs_page_filler(void *data, struct page *page)
 {
        struct inode *inode = page->mapping->host;
        struct afs_vnode *vnode = AFS_FS_I(inode);
+       struct afs_read *req;
        struct key *key = data;
-       size_t len;
-       off_t offset;
        int ret;
 
        _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
@@ -164,12 +179,23 @@ int afs_page_filler(void *data, struct page *page)
                _debug("cache said ENOBUFS");
        default:
        go_on:
-               offset = page->index << PAGE_SHIFT;
-               len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);
+               req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
+                             GFP_KERNEL);
+               if (!req)
+                       goto enomem;
+
+               atomic_set(&req->usage, 1);
+               req->pos = (loff_t)page->index << PAGE_SHIFT;
+               req->len = min_t(size_t, i_size_read(inode) - req->pos,
+                                PAGE_SIZE);
+               req->nr_pages = 1;
+               req->pages[0] = page;
+               get_page(page);
 
                /* read the contents of the file from the server into the
                 * page */
-               ret = afs_vnode_fetch_data(vnode, key, offset, len, page);
+               ret = afs_vnode_fetch_data(vnode, key, req);
+               afs_put_read(req);
                if (ret < 0) {
                        if (ret == -ENOENT) {
                                _debug("got NOENT from server"
@@ -201,6 +227,8 @@ int afs_page_filler(void *data, struct page *page)
        _leave(" = 0");
        return 0;
 
+enomem:
+       ret = -ENOMEM;
 error:
        SetPageError(page);
        unlock_page(page);
@@ -234,6 +262,131 @@ static int afs_readpage(struct file *file, struct page *page)
        return ret;
 }
 
+/*
+ * Make pages available as they're filled.
+ */
+static void afs_readpages_page_done(struct afs_call *call, struct afs_read *req)
+{
+#ifdef CONFIG_AFS_FSCACHE
+       struct afs_vnode *vnode = call->reply;
+#endif
+       struct page *page = req->pages[req->index];
+
+       req->pages[req->index] = NULL;
+       SetPageUptodate(page);
+
+       /* send the page to the cache */
+#ifdef CONFIG_AFS_FSCACHE
+       if (PageFsCache(page) &&
+           fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) {
+               fscache_uncache_page(vnode->cache, page);
+               BUG_ON(PageFsCache(page));
+       }
+#endif
+       unlock_page(page);
+       put_page(page);
+}
+
+/*
+ * Read a contiguous set of pages.
+ */
+static int afs_readpages_one(struct file *file, struct address_space *mapping,
+                            struct list_head *pages)
+{
+       struct afs_vnode *vnode = AFS_FS_I(mapping->host);
+       struct afs_read *req;
+       struct list_head *p;
+       struct page *first, *page;
+       struct key *key = file->private_data;
+       pgoff_t index;
+       int ret, n, i;
+
+       /* Count the number of contiguous pages at the front of the list.  Note
+        * that the list goes prev-wards rather than next-wards.
+        */
+       first = list_entry(pages->prev, struct page, lru);
+       index = first->index + 1;
+       n = 1;
+       for (p = first->lru.prev; p != pages; p = p->prev) {
+               page = list_entry(p, struct page, lru);
+               if (page->index != index)
+                       break;
+               index++;
+               n++;
+       }
+
+       req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *) * n,
+                     GFP_NOFS);
+       if (!req)
+               return -ENOMEM;
+
+       atomic_set(&req->usage, 1);
+       req->page_done = afs_readpages_page_done;
+       req->pos = first->index;
+       req->pos <<= PAGE_SHIFT;
+
+       /* Transfer the pages to the request.  We add them in until one fails
+        * to add to the LRU and then we stop (as that'll make a hole in the
+        * contiguous run.
+        *
+        * Note that it's possible for the file size to change whilst we're
+        * doing this, but we rely on the server returning less than we asked
+        * for if the file shrank.  We also rely on this to deal with a partial
+        * page at the end of the file.
+        */
+       do {
+               page = list_entry(pages->prev, struct page, lru);
+               list_del(&page->lru);
+               index = page->index;
+               if (add_to_page_cache_lru(page, mapping, index,
+                                         readahead_gfp_mask(mapping))) {
+#ifdef CONFIG_AFS_FSCACHE
+                       fscache_uncache_page(vnode->cache, page);
+#endif
+                       put_page(page);
+                       break;
+               }
+
+               req->pages[req->nr_pages++] = page;
+               req->len += PAGE_SIZE;
+       } while (req->nr_pages < n);
+
+       if (req->nr_pages == 0) {
+               kfree(req);
+               return 0;
+       }
+
+       ret = afs_vnode_fetch_data(vnode, key, req);
+       if (ret < 0)
+               goto error;
+
+       task_io_account_read(PAGE_SIZE * req->nr_pages);
+       afs_put_read(req);
+       return 0;
+
+error:
+       if (ret == -ENOENT) {
+               _debug("got NOENT from server"
+                      " - marking file deleted and stale");
+               set_bit(AFS_VNODE_DELETED, &vnode->flags);
+               ret = -ESTALE;
+       }
+
+       for (i = 0; i < req->nr_pages; i++) {
+               page = req->pages[i];
+               if (page) {
+#ifdef CONFIG_AFS_FSCACHE
+                       fscache_uncache_page(vnode->cache, page);
+#endif
+                       SetPageError(page);
+                       unlock_page(page);
+               }
+       }
+
+       afs_put_read(req);
+       return ret;
+}
+
 /*
  * read a set of pages
  */
@@ -287,8 +440,11 @@ static int afs_readpages(struct file *file, struct address_space *mapping,
                return ret;
        }
 
-       /* load the missing pages from the network */
-       ret = read_cache_pages(mapping, pages, afs_page_filler, key);
+       while (!list_empty(pages)) {
+               ret = afs_readpages_one(file, mapping, pages);
+               if (ret < 0)
+                       break;
+       }
 
        _leave(" = %d [netting]", ret);
        return ret;
index 31c616ab9b400a66dfbcd39c12dd87665f88acf9..ac8e766978dc440e8690fbf44333d41f9894f92a 100644 (file)
@@ -275,7 +275,7 @@ int afs_fs_fetch_file_status(struct afs_server *server,
                             struct key *key,
                             struct afs_vnode *vnode,
                             struct afs_volsync *volsync,
-                            const struct afs_wait_mode *wait_mode)
+                            bool async)
 {
        struct afs_call *call;
        __be32 *bp;
@@ -300,7 +300,7 @@ int afs_fs_fetch_file_status(struct afs_server *server,
        bp[2] = htonl(vnode->fid.vnode);
        bp[3] = htonl(vnode->fid.unique);
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -309,15 +309,19 @@ int afs_fs_fetch_file_status(struct afs_server *server,
 static int afs_deliver_fs_fetch_data(struct afs_call *call)
 {
        struct afs_vnode *vnode = call->reply;
+       struct afs_read *req = call->reply3;
        const __be32 *bp;
-       struct page *page;
+       unsigned int size;
        void *buffer;
        int ret;
 
-       _enter("{%u}", call->unmarshall);
+       _enter("{%u,%zu/%u;%u/%llu}",
+              call->unmarshall, call->offset, call->count,
+              req->remain, req->actual_len);
 
        switch (call->unmarshall) {
        case 0:
+               req->actual_len = 0;
                call->offset = 0;
                call->unmarshall++;
                if (call->operation_ID != FSFETCHDATA64) {
@@ -334,10 +338,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
                if (ret < 0)
                        return ret;
 
-               call->count = ntohl(call->tmp);
-               _debug("DATA length MSW: %u", call->count);
-               if (call->count > 0)
-                       return -EBADMSG;
+               req->actual_len = ntohl(call->tmp);
+               req->actual_len <<= 32;
                call->offset = 0;
                call->unmarshall++;
 
@@ -349,26 +351,52 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
                if (ret < 0)
                        return ret;
 
-               call->count = ntohl(call->tmp);
-               _debug("DATA length: %u", call->count);
-               if (call->count > PAGE_SIZE)
+               req->actual_len |= ntohl(call->tmp);
+               _debug("DATA length: %llu", req->actual_len);
+               /* Check that the server didn't want to send us extra.  We
+                * might want to just discard instead, but that requires
+                * cooperation from AF_RXRPC.
+                */
+               if (req->actual_len > req->len)
                        return -EBADMSG;
-               call->offset = 0;
+
+               req->remain = req->actual_len;
+               call->offset = req->pos & (PAGE_SIZE - 1);
+               req->index = 0;
+               if (req->actual_len == 0)
+                       goto no_more_data;
                call->unmarshall++;
 
+       begin_page:
+               if (req->remain > PAGE_SIZE - call->offset)
+                       size = PAGE_SIZE - call->offset;
+               else
+                       size = req->remain;
+               call->count = call->offset + size;
+               ASSERTCMP(call->count, <=, PAGE_SIZE);
+               req->remain -= size;
+
                /* extract the returned data */
        case 3:
-               _debug("extract data");
-               if (call->count > 0) {
-                       page = call->reply3;
-                       buffer = kmap(page);
-                       ret = afs_extract_data(call, buffer,
-                                              call->count, true);
-                       kunmap(page);
-                       if (ret < 0)
-                               return ret;
+               _debug("extract data %u/%llu %zu/%u",
+                      req->remain, req->actual_len, call->offset, call->count);
+
+               buffer = kmap(req->pages[req->index]);
+               ret = afs_extract_data(call, buffer, call->count, true);
+               kunmap(req->pages[req->index]);
+               if (ret < 0)
+                       return ret;
+               if (call->offset == PAGE_SIZE) {
+                       if (req->page_done)
+                               req->page_done(call, req);
+                       if (req->remain > 0) {
+                               req->index++;
+                               call->offset = 0;
+                               goto begin_page;
+                       }
                }
 
+       no_more_data:
                call->offset = 0;
                call->unmarshall++;
 
@@ -393,17 +421,25 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
        }
 
        if (call->count < PAGE_SIZE) {
-               _debug("clear");
-               page = call->reply3;
-               buffer = kmap(page);
+               buffer = kmap(req->pages[req->index]);
                memset(buffer + call->count, 0, PAGE_SIZE - call->count);
-               kunmap(page);
+               kunmap(req->pages[req->index]);
+               if (req->page_done)
+                       req->page_done(call, req);
        }
 
        _leave(" = 0 [done]");
        return 0;
 }
 
+static void afs_fetch_data_destructor(struct afs_call *call)
+{
+       struct afs_read *req = call->reply3;
+
+       afs_put_read(req);
+       afs_flat_call_destructor(call);
+}
+
 /*
  * FS.FetchData operation type
  */
@@ -411,14 +447,14 @@ static const struct afs_call_type afs_RXFSFetchData = {
        .name           = "FS.FetchData",
        .deliver        = afs_deliver_fs_fetch_data,
        .abort_to_error = afs_abort_to_error,
-       .destructor     = afs_flat_call_destructor,
+       .destructor     = afs_fetch_data_destructor,
 };
 
 static const struct afs_call_type afs_RXFSFetchData64 = {
        .name           = "FS.FetchData64",
        .deliver        = afs_deliver_fs_fetch_data,
        .abort_to_error = afs_abort_to_error,
-       .destructor     = afs_flat_call_destructor,
+       .destructor     = afs_fetch_data_destructor,
 };
 
 /*
@@ -427,17 +463,14 @@ static const struct afs_call_type afs_RXFSFetchData64 = {
 static int afs_fs_fetch_data64(struct afs_server *server,
                               struct key *key,
                               struct afs_vnode *vnode,
-                              off_t offset, size_t length,
-                              struct page *buffer,
-                              const struct afs_wait_mode *wait_mode)
+                              struct afs_read *req,
+                              bool async)
 {
        struct afs_call *call;
        __be32 *bp;
 
        _enter("");
 
-       ASSERTCMP(length, <, ULONG_MAX);
-
        call = afs_alloc_flat_call(&afs_RXFSFetchData64, 32, (21 + 3 + 6) * 4);
        if (!call)
                return -ENOMEM;
@@ -445,7 +478,7 @@ static int afs_fs_fetch_data64(struct afs_server *server,
        call->key = key;
        call->reply = vnode;
        call->reply2 = NULL; /* volsync */
-       call->reply3 = buffer;
+       call->reply3 = req;
        call->service_id = FS_SERVICE;
        call->port = htons(AFS_FS_PORT);
        call->operation_ID = FSFETCHDATA64;
@@ -456,12 +489,13 @@ static int afs_fs_fetch_data64(struct afs_server *server,
        bp[1] = htonl(vnode->fid.vid);
        bp[2] = htonl(vnode->fid.vnode);
        bp[3] = htonl(vnode->fid.unique);
-       bp[4] = htonl(upper_32_bits(offset));
-       bp[5] = htonl((u32) offset);
+       bp[4] = htonl(upper_32_bits(req->pos));
+       bp[5] = htonl(lower_32_bits(req->pos));
        bp[6] = 0;
-       bp[7] = htonl((u32) length);
+       bp[7] = htonl(lower_32_bits(req->len));
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       atomic_inc(&req->usage);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -470,16 +504,16 @@ static int afs_fs_fetch_data64(struct afs_server *server,
 int afs_fs_fetch_data(struct afs_server *server,
                      struct key *key,
                      struct afs_vnode *vnode,
-                     off_t offset, size_t length,
-                     struct page *buffer,
-                     const struct afs_wait_mode *wait_mode)
+                     struct afs_read *req,
+                     bool async)
 {
        struct afs_call *call;
        __be32 *bp;
 
-       if (upper_32_bits(offset) || upper_32_bits(offset + length))
-               return afs_fs_fetch_data64(server, key, vnode, offset, length,
-                                          buffer, wait_mode);
+       if (upper_32_bits(req->pos) ||
+           upper_32_bits(req->len) ||
+           upper_32_bits(req->pos + req->len))
+               return afs_fs_fetch_data64(server, key, vnode, req, async);
 
        _enter("");
 
@@ -490,7 +524,7 @@ int afs_fs_fetch_data(struct afs_server *server,
        call->key = key;
        call->reply = vnode;
        call->reply2 = NULL; /* volsync */
-       call->reply3 = buffer;
+       call->reply3 = req;
        call->service_id = FS_SERVICE;
        call->port = htons(AFS_FS_PORT);
        call->operation_ID = FSFETCHDATA;
@@ -501,10 +535,11 @@ int afs_fs_fetch_data(struct afs_server *server,
        bp[1] = htonl(vnode->fid.vid);
        bp[2] = htonl(vnode->fid.vnode);
        bp[3] = htonl(vnode->fid.unique);
-       bp[4] = htonl(offset);
-       bp[5] = htonl(length);
+       bp[4] = htonl(lower_32_bits(req->pos));
+       bp[5] = htonl(lower_32_bits(req->len));
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       atomic_inc(&req->usage);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -533,7 +568,7 @@ static const struct afs_call_type afs_RXFSGiveUpCallBacks = {
  * - the callbacks are held in the server->cb_break ring
  */
 int afs_fs_give_up_callbacks(struct afs_server *server,
-                            const struct afs_wait_mode *wait_mode)
+                            bool async)
 {
        struct afs_call *call;
        size_t ncallbacks;
@@ -587,7 +622,7 @@ int afs_fs_give_up_callbacks(struct afs_server *server,
        ASSERT(ncallbacks > 0);
        wake_up_nr(&server->cb_break_waitq, ncallbacks);
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -638,7 +673,7 @@ int afs_fs_create(struct afs_server *server,
                  struct afs_fid *newfid,
                  struct afs_file_status *newstatus,
                  struct afs_callback *newcb,
-                 const struct afs_wait_mode *wait_mode)
+                 bool async)
 {
        struct afs_call *call;
        size_t namesz, reqsz, padsz;
@@ -683,7 +718,7 @@ int afs_fs_create(struct afs_server *server,
        *bp++ = htonl(mode & S_IALLUGO); /* unix mode */
        *bp++ = 0; /* segment size */
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -728,7 +763,7 @@ int afs_fs_remove(struct afs_server *server,
                  struct afs_vnode *vnode,
                  const char *name,
                  bool isdir,
-                 const struct afs_wait_mode *wait_mode)
+                 bool async)
 {
        struct afs_call *call;
        size_t namesz, reqsz, padsz;
@@ -763,7 +798,7 @@ int afs_fs_remove(struct afs_server *server,
                bp = (void *) bp + padsz;
        }
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -809,7 +844,7 @@ int afs_fs_link(struct afs_server *server,
                struct afs_vnode *dvnode,
                struct afs_vnode *vnode,
                const char *name,
-               const struct afs_wait_mode *wait_mode)
+               bool async)
 {
        struct afs_call *call;
        size_t namesz, reqsz, padsz;
@@ -848,7 +883,7 @@ int afs_fs_link(struct afs_server *server,
        *bp++ = htonl(vnode->fid.vnode);
        *bp++ = htonl(vnode->fid.unique);
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -897,7 +932,7 @@ int afs_fs_symlink(struct afs_server *server,
                   const char *contents,
                   struct afs_fid *newfid,
                   struct afs_file_status *newstatus,
-                  const struct afs_wait_mode *wait_mode)
+                  bool async)
 {
        struct afs_call *call;
        size_t namesz, reqsz, padsz, c_namesz, c_padsz;
@@ -952,7 +987,7 @@ int afs_fs_symlink(struct afs_server *server,
        *bp++ = htonl(S_IRWXUGO); /* unix mode */
        *bp++ = 0; /* segment size */
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -1001,7 +1036,7 @@ int afs_fs_rename(struct afs_server *server,
                  const char *orig_name,
                  struct afs_vnode *new_dvnode,
                  const char *new_name,
-                 const struct afs_wait_mode *wait_mode)
+                 bool async)
 {
        struct afs_call *call;
        size_t reqsz, o_namesz, o_padsz, n_namesz, n_padsz;
@@ -1055,7 +1090,7 @@ int afs_fs_rename(struct afs_server *server,
                bp = (void *) bp + n_padsz;
        }
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -1110,7 +1145,7 @@ static int afs_fs_store_data64(struct afs_server *server,
                               pgoff_t first, pgoff_t last,
                               unsigned offset, unsigned to,
                               loff_t size, loff_t pos, loff_t i_size,
-                              const struct afs_wait_mode *wait_mode)
+                              bool async)
 {
        struct afs_vnode *vnode = wb->vnode;
        struct afs_call *call;
@@ -1159,7 +1194,7 @@ static int afs_fs_store_data64(struct afs_server *server,
        *bp++ = htonl(i_size >> 32);
        *bp++ = htonl((u32) i_size);
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -1168,7 +1203,7 @@ static int afs_fs_store_data64(struct afs_server *server,
 int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
                      pgoff_t first, pgoff_t last,
                      unsigned offset, unsigned to,
-                     const struct afs_wait_mode *wait_mode)
+                     bool async)
 {
        struct afs_vnode *vnode = wb->vnode;
        struct afs_call *call;
@@ -1194,7 +1229,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
 
        if (pos >> 32 || i_size >> 32 || size >> 32 || (pos + size) >> 32)
                return afs_fs_store_data64(server, wb, first, last, offset, to,
-                                          size, pos, i_size, wait_mode);
+                                          size, pos, i_size, async);
 
        call = afs_alloc_flat_call(&afs_RXFSStoreData,
                                   (4 + 6 + 3) * 4,
@@ -1233,7 +1268,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
        *bp++ = htonl(size);
        *bp++ = htonl(i_size);
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -1295,7 +1330,7 @@ static const struct afs_call_type afs_RXFSStoreData64_as_Status = {
  */
 static int afs_fs_setattr_size64(struct afs_server *server, struct key *key,
                                 struct afs_vnode *vnode, struct iattr *attr,
-                                const struct afs_wait_mode *wait_mode)
+                                bool async)
 {
        struct afs_call *call;
        __be32 *bp;
@@ -1334,7 +1369,7 @@ static int afs_fs_setattr_size64(struct afs_server *server, struct key *key,
        *bp++ = htonl(attr->ia_size >> 32);     /* new file length */
        *bp++ = htonl((u32) attr->ia_size);
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -1343,7 +1378,7 @@ static int afs_fs_setattr_size64(struct afs_server *server, struct key *key,
  */
 static int afs_fs_setattr_size(struct afs_server *server, struct key *key,
                               struct afs_vnode *vnode, struct iattr *attr,
-                              const struct afs_wait_mode *wait_mode)
+                              bool async)
 {
        struct afs_call *call;
        __be32 *bp;
@@ -1354,7 +1389,7 @@ static int afs_fs_setattr_size(struct afs_server *server, struct key *key,
        ASSERT(attr->ia_valid & ATTR_SIZE);
        if (attr->ia_size >> 32)
                return afs_fs_setattr_size64(server, key, vnode, attr,
-                                            wait_mode);
+                                            async);
 
        call = afs_alloc_flat_call(&afs_RXFSStoreData_as_Status,
                                   (4 + 6 + 3) * 4,
@@ -1382,7 +1417,7 @@ static int afs_fs_setattr_size(struct afs_server *server, struct key *key,
        *bp++ = 0;                              /* size of write */
        *bp++ = htonl(attr->ia_size);           /* new file length */
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -1391,14 +1426,14 @@ static int afs_fs_setattr_size(struct afs_server *server, struct key *key,
  */
 int afs_fs_setattr(struct afs_server *server, struct key *key,
                   struct afs_vnode *vnode, struct iattr *attr,
-                  const struct afs_wait_mode *wait_mode)
+                  bool async)
 {
        struct afs_call *call;
        __be32 *bp;
 
        if (attr->ia_valid & ATTR_SIZE)
                return afs_fs_setattr_size(server, key, vnode, attr,
-                                          wait_mode);
+                                          async);
 
        _enter(",%x,{%x:%u},,",
               key_serial(key), vnode->fid.vid, vnode->fid.vnode);
@@ -1424,7 +1459,7 @@ int afs_fs_setattr(struct afs_server *server, struct key *key,
 
        xdr_encode_AFS_StoreStatus(&bp, attr);
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -1626,7 +1661,7 @@ int afs_fs_get_volume_status(struct afs_server *server,
                             struct key *key,
                             struct afs_vnode *vnode,
                             struct afs_volume_status *vs,
-                            const struct afs_wait_mode *wait_mode)
+                            bool async)
 {
        struct afs_call *call;
        __be32 *bp;
@@ -1656,7 +1691,7 @@ int afs_fs_get_volume_status(struct afs_server *server,
        bp[0] = htonl(FSGETVOLUMESTATUS);
        bp[1] = htonl(vnode->fid.vid);
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -1718,7 +1753,7 @@ int afs_fs_set_lock(struct afs_server *server,
                    struct key *key,
                    struct afs_vnode *vnode,
                    afs_lock_type_t type,
-                   const struct afs_wait_mode *wait_mode)
+                   bool async)
 {
        struct afs_call *call;
        __be32 *bp;
@@ -1742,7 +1777,7 @@ int afs_fs_set_lock(struct afs_server *server,
        *bp++ = htonl(vnode->fid.unique);
        *bp++ = htonl(type);
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -1751,7 +1786,7 @@ int afs_fs_set_lock(struct afs_server *server,
 int afs_fs_extend_lock(struct afs_server *server,
                       struct key *key,
                       struct afs_vnode *vnode,
-                      const struct afs_wait_mode *wait_mode)
+                      bool async)
 {
        struct afs_call *call;
        __be32 *bp;
@@ -1774,7 +1809,7 @@ int afs_fs_extend_lock(struct afs_server *server,
        *bp++ = htonl(vnode->fid.vnode);
        *bp++ = htonl(vnode->fid.unique);
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
 
 /*
@@ -1783,7 +1818,7 @@ int afs_fs_extend_lock(struct afs_server *server,
 int afs_fs_release_lock(struct afs_server *server,
                        struct key *key,
                        struct afs_vnode *vnode,
-                       const struct afs_wait_mode *wait_mode)
+                       bool async)
 {
        struct afs_call *call;
        __be32 *bp;
@@ -1806,5 +1841,5 @@ int afs_fs_release_lock(struct afs_server *server,
        *bp++ = htonl(vnode->fid.vnode);
        *bp++ = htonl(vnode->fid.unique);
 
-       return afs_make_call(&server->addr, call, GFP_NOFS, wait_mode);
+       return afs_make_call(&server->addr, call, GFP_NOFS, async);
 }
index 535a38d2c1d06f752cd6beae54766c1a9d899527..8acf3670e75649d0c62de3dd068d56da37d5b97a 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/sched.h>
 #include <linux/fscache.h>
 #include <linux/backing-dev.h>
+#include <linux/uuid.h>
 #include <net/af_rxrpc.h>
 
 #include "afs.h"
@@ -51,31 +52,22 @@ struct afs_mount_params {
        struct key              *key;           /* key to use for secure mounting */
 };
 
-/*
- * definition of how to wait for the completion of an operation
- */
-struct afs_wait_mode {
-       /* RxRPC received message notification */
-       rxrpc_notify_rx_t notify_rx;
-
-       /* synchronous call waiter and call dispatched notification */
-       int (*wait)(struct afs_call *call);
-
-       /* asynchronous call completion */
-       void (*async_complete)(void *reply, int error);
+enum afs_call_state {
+       AFS_CALL_REQUESTING,    /* request is being sent for outgoing call */
+       AFS_CALL_AWAIT_REPLY,   /* awaiting reply to outgoing call */
+       AFS_CALL_AWAIT_OP_ID,   /* awaiting op ID on incoming call */
+       AFS_CALL_AWAIT_REQUEST, /* awaiting request data on incoming call */
+       AFS_CALL_REPLYING,      /* replying to incoming call */
+       AFS_CALL_AWAIT_ACK,     /* awaiting final ACK of incoming call */
+       AFS_CALL_COMPLETE,      /* Completed or failed */
 };
-
-extern const struct afs_wait_mode afs_sync_call;
-extern const struct afs_wait_mode afs_async_call;
-
 /*
  * a record of an in-progress RxRPC call
  */
 struct afs_call {
        const struct afs_call_type *type;       /* type of call */
-       const struct afs_wait_mode *wait_mode;  /* completion wait mode */
        wait_queue_head_t       waitq;          /* processes awaiting completion */
-       struct work_struct      async_work;     /* asynchronous work processor */
+       struct work_struct      async_work;     /* async I/O processor */
        struct work_struct      work;           /* actual work processor */
        struct rxrpc_call       *rxcall;        /* RxRPC call handle */
        struct key              *key;           /* security for this call */
@@ -91,15 +83,8 @@ struct afs_call {
        pgoff_t                 first;          /* first page in mapping to deal with */
        pgoff_t                 last;           /* last page in mapping to deal with */
        size_t                  offset;         /* offset into received data store */
-       enum {                                  /* call state */
-               AFS_CALL_REQUESTING,    /* request is being sent for outgoing call */
-               AFS_CALL_AWAIT_REPLY,   /* awaiting reply to outgoing call */
-               AFS_CALL_AWAIT_OP_ID,   /* awaiting op ID on incoming call */
-               AFS_CALL_AWAIT_REQUEST, /* awaiting request data on incoming call */
-               AFS_CALL_REPLYING,      /* replying to incoming call */
-               AFS_CALL_AWAIT_ACK,     /* awaiting final ACK of incoming call */
-               AFS_CALL_COMPLETE,      /* Completed or failed */
-       }                       state;
+       atomic_t                usage;
+       enum afs_call_state     state;
        int                     error;          /* error code */
        u32                     abort_code;     /* Remote abort ID or 0 */
        unsigned                request_size;   /* size of request data */
@@ -110,6 +95,7 @@ struct afs_call {
        bool                    incoming;       /* T if incoming call */
        bool                    send_pages;     /* T if data from mapping should be sent */
        bool                    need_attention; /* T if RxRPC poked us */
+       bool                    async;          /* T if asynchronous */
        u16                     service_id;     /* RxRPC service ID to call */
        __be16                  port;           /* target UDP port */
        u32                     operation_ID;   /* operation ID for an incoming call */
@@ -131,6 +117,25 @@ struct afs_call_type {
 
        /* clean up a call */
        void (*destructor)(struct afs_call *call);
+
+       /* Work function */
+       void (*work)(struct work_struct *work);
+};
+
+/*
+ * Record of an outstanding read operation on a vnode.
+ */
+struct afs_read {
+       loff_t                  pos;            /* Where to start reading */
+       loff_t                  len;            /* How much to read */
+       loff_t                  actual_len;     /* How much we're actually getting */
+       atomic_t                usage;
+       unsigned int            remain;         /* Amount remaining */
+       unsigned int            index;          /* Which page we're reading into */
+       unsigned int            pg_offset;      /* Offset in page we're at */
+       unsigned int            nr_pages;
+       void (*page_done)(struct afs_call *, struct afs_read *);
+       struct page             *pages[];
 };
 
 /*
@@ -403,30 +408,6 @@ struct afs_interface {
        unsigned        mtu;            /* MTU of interface */
 };
 
-/*
- * UUID definition [internet draft]
- * - the timestamp is a 60-bit value, split 32/16/12, and goes in 100ns
- *   increments since midnight 15th October 1582
- *   - add AFS_UUID_TO_UNIX_TIME to convert unix time in 100ns units to UUID
- *     time
- * - the clock sequence is a 14-bit counter to avoid duplicate times
- */
-struct afs_uuid {
-       u32             time_low;                       /* low part of timestamp */
-       u16             time_mid;                       /* mid part of timestamp */
-       u16             time_hi_and_version;            /* high part of timestamp and version  */
-#define AFS_UUID_TO_UNIX_TIME  0x01b21dd213814000ULL
-#define AFS_UUID_TIMEHI_MASK   0x0fff
-#define AFS_UUID_VERSION_TIME  0x1000  /* time-based UUID */
-#define AFS_UUID_VERSION_NAME  0x3000  /* name-based UUID */
-#define AFS_UUID_VERSION_RANDOM        0x4000  /* (pseudo-)random generated UUID */
-       u8              clock_seq_hi_and_reserved;      /* clock seq hi and variant */
-#define AFS_UUID_CLOCKHI_MASK  0x3f
-#define AFS_UUID_VARIANT_STD   0x80
-       u8              clock_seq_low;                  /* clock seq low */
-       u8              node[6];                        /* spatially unique node ID (MAC addr) */
-};
-
 /*****************************************************************************/
 /*
  * cache.c
@@ -494,6 +475,7 @@ extern const struct file_operations afs_file_operations;
 extern int afs_open(struct inode *, struct file *);
 extern int afs_release(struct inode *, struct file *);
 extern int afs_page_filler(void *, struct page *);
+extern void afs_put_read(struct afs_read *);
 
 /*
  * flock.c
@@ -509,50 +491,37 @@ extern int afs_flock(struct file *, int, struct file_lock *);
  */
 extern int afs_fs_fetch_file_status(struct afs_server *, struct key *,
                                    struct afs_vnode *, struct afs_volsync *,
-                                   const struct afs_wait_mode *);
-extern int afs_fs_give_up_callbacks(struct afs_server *,
-                                   const struct afs_wait_mode *);
+                                   bool);
+extern int afs_fs_give_up_callbacks(struct afs_server *, bool);
 extern int afs_fs_fetch_data(struct afs_server *, struct key *,
-                            struct afs_vnode *, off_t, size_t, struct page *,
-                            const struct afs_wait_mode *);
+                            struct afs_vnode *, struct afs_read *, bool);
 extern int afs_fs_create(struct afs_server *, struct key *,
                         struct afs_vnode *, const char *, umode_t,
                         struct afs_fid *, struct afs_file_status *,
-                        struct afs_callback *,
-                        const struct afs_wait_mode *);
+                        struct afs_callback *, bool);
 extern int afs_fs_remove(struct afs_server *, struct key *,
-                        struct afs_vnode *, const char *, bool,
-                        const struct afs_wait_mode *);
+                        struct afs_vnode *, const char *, bool, bool);
 extern int afs_fs_link(struct afs_server *, struct key *, struct afs_vnode *,
-                      struct afs_vnode *, const char *,
-                      const struct afs_wait_mode *);
+                      struct afs_vnode *, const char *, bool);
 extern int afs_fs_symlink(struct afs_server *, struct key *,
                          struct afs_vnode *, const char *, const char *,
-                         struct afs_fid *, struct afs_file_status *,
-                         const struct afs_wait_mode *);
+                         struct afs_fid *, struct afs_file_status *, bool);
 extern int afs_fs_rename(struct afs_server *, struct key *,
                         struct afs_vnode *, const char *,
-                        struct afs_vnode *, const char *,
-                        const struct afs_wait_mode *);
+                        struct afs_vnode *, const char *, bool);
 extern int afs_fs_store_data(struct afs_server *, struct afs_writeback *,
-                            pgoff_t, pgoff_t, unsigned, unsigned,
-                            const struct afs_wait_mode *);
+                            pgoff_t, pgoff_t, unsigned, unsigned, bool);
 extern int afs_fs_setattr(struct afs_server *, struct key *,
-                         struct afs_vnode *, struct iattr *,
-                         const struct afs_wait_mode *);
+                         struct afs_vnode *, struct iattr *, bool);
 extern int afs_fs_get_volume_status(struct afs_server *, struct key *,
                                    struct afs_vnode *,
-                                   struct afs_volume_status *,
-                                   const struct afs_wait_mode *);
+                                   struct afs_volume_status *, bool);
 extern int afs_fs_set_lock(struct afs_server *, struct key *,
-                          struct afs_vnode *, afs_lock_type_t,
-                          const struct afs_wait_mode *);
+                          struct afs_vnode *, afs_lock_type_t, bool);
 extern int afs_fs_extend_lock(struct afs_server *, struct key *,
-                             struct afs_vnode *,
-                             const struct afs_wait_mode *);
+                             struct afs_vnode *, bool);
 extern int afs_fs_release_lock(struct afs_server *, struct key *,
-                              struct afs_vnode *,
-                              const struct afs_wait_mode *);
+                              struct afs_vnode *, bool);
 
 /*
  * inode.c
@@ -573,7 +542,7 @@ extern int afs_drop_inode(struct inode *);
  * main.c
  */
 extern struct workqueue_struct *afs_wq;
-extern struct afs_uuid afs_uuid;
+extern struct uuid_v1 afs_uuid;
 
 /*
  * misc.c
@@ -591,6 +560,11 @@ extern struct vfsmount *afs_d_automount(struct path *);
 extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *);
 extern void afs_mntpt_kill_timer(void);
 
+/*
+ * netdevices.c
+ */
+extern int afs_get_ipv4_interfaces(struct afs_interface *, size_t, bool);
+
 /*
  * proc.c
  */
@@ -603,11 +577,13 @@ extern void afs_proc_cell_remove(struct afs_cell *);
  * rxrpc.c
  */
 extern struct socket *afs_socket;
+extern atomic_t afs_outstanding_calls;
 
 extern int afs_open_socket(void);
 extern void afs_close_socket(void);
-extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t,
-                        const struct afs_wait_mode *);
+extern void afs_put_call(struct afs_call *);
+extern int afs_queue_call_work(struct afs_call *);
+extern int afs_make_call(struct in_addr *, struct afs_call *, gfp_t, bool);
 extern struct afs_call *afs_alloc_flat_call(const struct afs_call_type *,
                                            size_t, size_t);
 extern void afs_flat_call_destructor(struct afs_call *);
@@ -652,22 +628,15 @@ extern void __exit afs_purge_servers(void);
 extern int afs_fs_init(void);
 extern void afs_fs_exit(void);
 
-/*
- * use-rtnetlink.c
- */
-extern int afs_get_ipv4_interfaces(struct afs_interface *, size_t, bool);
-extern int afs_get_MAC_address(u8 *, size_t);
-
 /*
  * vlclient.c
  */
 extern int afs_vl_get_entry_by_name(struct in_addr *, struct key *,
                                    const char *, struct afs_cache_vlocation *,
-                                   const struct afs_wait_mode *);
+                                   bool);
 extern int afs_vl_get_entry_by_id(struct in_addr *, struct key *,
                                  afs_volid_t, afs_voltype_t,
-                                 struct afs_cache_vlocation *,
-                                 const struct afs_wait_mode *);
+                                 struct afs_cache_vlocation *, bool);
 
 /*
  * vlocation.c
@@ -699,7 +668,7 @@ extern void afs_vnode_finalise_status_update(struct afs_vnode *,
 extern int afs_vnode_fetch_status(struct afs_vnode *, struct afs_vnode *,
                                  struct key *);
 extern int afs_vnode_fetch_data(struct afs_vnode *, struct key *,
-                               off_t, size_t, struct page *);
+                               struct afs_read *);
 extern int afs_vnode_create(struct afs_vnode *, struct key *, const char *,
                            umode_t, struct afs_fid *, struct afs_file_status *,
                            struct afs_callback *, struct afs_server **);
@@ -756,6 +725,8 @@ extern int afs_fsync(struct file *, loff_t, loff_t, int);
 /*
  * debug tracing
  */
+#include <trace/events/afs.h>
+
 extern unsigned afs_debug;
 
 #define dbgprintk(FMT,...) \
index 0b187ef3b5b7a2909b6e07c2dd4cd57d9569789b..51d7d17bca5756b9e4dbbb05408689e2d936a477 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/completion.h>
 #include <linux/sched.h>
 #include <linux/random.h>
+#define CREATE_TRACE_POINTS
 #include "internal.h"
 
 MODULE_DESCRIPTION("AFS Client File System");
@@ -30,52 +31,9 @@ static char *rootcell;
 module_param(rootcell, charp, 0);
 MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list");
 
-struct afs_uuid afs_uuid;
+struct uuid_v1 afs_uuid;
 struct workqueue_struct *afs_wq;
 
-/*
- * get a client UUID
- */
-static int __init afs_get_client_UUID(void)
-{
-       struct timespec ts;
-       u64 uuidtime;
-       u16 clockseq;
-       int ret;
-
-       /* read the MAC address of one of the external interfaces and construct
-        * a UUID from it */
-       ret = afs_get_MAC_address(afs_uuid.node, sizeof(afs_uuid.node));
-       if (ret < 0)
-               return ret;
-
-       getnstimeofday(&ts);
-       uuidtime = (u64) ts.tv_sec * 1000 * 1000 * 10;
-       uuidtime += ts.tv_nsec / 100;
-       uuidtime += AFS_UUID_TO_UNIX_TIME;
-       afs_uuid.time_low = uuidtime;
-       afs_uuid.time_mid = uuidtime >> 32;
-       afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK;
-       afs_uuid.time_hi_and_version |= AFS_UUID_VERSION_TIME;
-
-       get_random_bytes(&clockseq, 2);
-       afs_uuid.clock_seq_low = clockseq;
-       afs_uuid.clock_seq_hi_and_reserved =
-               (clockseq >> 8) & AFS_UUID_CLOCKHI_MASK;
-       afs_uuid.clock_seq_hi_and_reserved |= AFS_UUID_VARIANT_STD;
-
-       _debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
-              afs_uuid.time_low,
-              afs_uuid.time_mid,
-              afs_uuid.time_hi_and_version,
-              afs_uuid.clock_seq_hi_and_reserved,
-              afs_uuid.clock_seq_low,
-              afs_uuid.node[0], afs_uuid.node[1], afs_uuid.node[2],
-              afs_uuid.node[3], afs_uuid.node[4], afs_uuid.node[5]);
-
-       return 0;
-}
-
 /*
  * initialise the AFS client FS module
  */
@@ -85,9 +43,7 @@ static int __init afs_init(void)
 
        printk(KERN_INFO "kAFS: Red Hat AFS client v0.1 registering.\n");
 
-       ret = afs_get_client_UUID();
-       if (ret < 0)
-               return ret;
+       generate_random_uuid((unsigned char *)&afs_uuid);
 
        /* create workqueue */
        ret = -ENOMEM;
index 7ad36506c256a55d54fc96dc6e3c323e82c4c230..40b2bab3e401445492f91e11661617bda1328f37 100644 (file)
 #include <net/net_namespace.h>
 #include "internal.h"
 
-/*
- * get a MAC address from a random ethernet interface that has a real one
- * - the buffer will normally be 6 bytes in size
- */
-int afs_get_MAC_address(u8 *mac, size_t maclen)
-{
-       struct net_device *dev;
-       int ret = -ENODEV;
-
-       BUG_ON(maclen != ETH_ALEN);
-
-       rtnl_lock();
-       dev = __dev_getfirstbyhwtype(&init_net, ARPHRD_ETHER);
-       if (dev) {
-               memcpy(mac, dev->dev_addr, maclen);
-               ret = 0;
-       }
-       rtnl_unlock();
-       return ret;
-}
-
 /*
  * get a list of this system's interface IPv4 addresses, netmasks and MTUs
  * - maxbufs must be at least 1
index 25f05a8d21b195fffb10f89cff990888fefd1ab2..95f42872b787ad8f857890b60a581b741f12dfc2 100644 (file)
 struct socket *afs_socket; /* my RxRPC socket */
 static struct workqueue_struct *afs_async_calls;
 static struct afs_call *afs_spare_incoming_call;
-static atomic_t afs_outstanding_calls;
+atomic_t afs_outstanding_calls;
 
-static void afs_free_call(struct afs_call *);
 static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
 static int afs_wait_for_call_to_complete(struct afs_call *);
 static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
-static int afs_dont_wait_for_call_to_complete(struct afs_call *);
 static void afs_process_async_call(struct work_struct *);
 static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
 static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
 static int afs_deliver_cm_op_id(struct afs_call *);
 
-/* synchronous call management */
-const struct afs_wait_mode afs_sync_call = {
-       .notify_rx      = afs_wake_up_call_waiter,
-       .wait           = afs_wait_for_call_to_complete,
-};
-
-/* asynchronous call management */
-const struct afs_wait_mode afs_async_call = {
-       .notify_rx      = afs_wake_up_async_call,
-       .wait           = afs_dont_wait_for_call_to_complete,
-};
-
-/* asynchronous incoming call management */
-static const struct afs_wait_mode afs_async_incoming_call = {
-       .notify_rx      = afs_wake_up_async_call,
-};
-
 /* asynchronous incoming call initial processing */
 static const struct afs_call_type afs_RXCMxxxx = {
        .name           = "CB.xxxx",
@@ -130,9 +111,11 @@ void afs_close_socket(void)
 {
        _enter("");
 
+       kernel_listen(afs_socket, 0);
+       flush_workqueue(afs_async_calls);
+
        if (afs_spare_incoming_call) {
-               atomic_inc(&afs_outstanding_calls);
-               afs_free_call(afs_spare_incoming_call);
+               afs_put_call(afs_spare_incoming_call);
                afs_spare_incoming_call = NULL;
        }
 
@@ -141,7 +124,6 @@ void afs_close_socket(void)
                         TASK_UNINTERRUPTIBLE);
        _debug("no outstanding calls");
 
-       flush_workqueue(afs_async_calls);
        kernel_sock_shutdown(afs_socket, SHUT_RDWR);
        flush_workqueue(afs_async_calls);
        sock_release(afs_socket);
@@ -152,44 +134,79 @@ void afs_close_socket(void)
 }
 
 /*
- * free a call
+ * Allocate a call.
  */
-static void afs_free_call(struct afs_call *call)
+static struct afs_call *afs_alloc_call(const struct afs_call_type *type,
+                                      gfp_t gfp)
 {
-       _debug("DONE %p{%s} [%d]",
-              call, call->type->name, atomic_read(&afs_outstanding_calls));
+       struct afs_call *call;
+       int o;
 
-       ASSERTCMP(call->rxcall, ==, NULL);
-       ASSERT(!work_pending(&call->async_work));
-       ASSERT(call->type->name != NULL);
+       call = kzalloc(sizeof(*call), gfp);
+       if (!call)
+               return NULL;
 
-       kfree(call->request);
-       kfree(call);
+       call->type = type;
+       atomic_set(&call->usage, 1);
+       INIT_WORK(&call->async_work, afs_process_async_call);
+       init_waitqueue_head(&call->waitq);
 
-       if (atomic_dec_and_test(&afs_outstanding_calls))
-               wake_up_atomic_t(&afs_outstanding_calls);
+       o = atomic_inc_return(&afs_outstanding_calls);
+       trace_afs_call(call, afs_call_trace_alloc, 1, o,
+                      __builtin_return_address(0));
+       return call;
 }
 
 /*
- * End a call but do not free it
+ * Dispose of a reference on a call.
  */
-static void afs_end_call_nofree(struct afs_call *call)
+void afs_put_call(struct afs_call *call)
 {
-       if (call->rxcall) {
-               rxrpc_kernel_end_call(afs_socket, call->rxcall);
-               call->rxcall = NULL;
+       int n = atomic_dec_return(&call->usage);
+       int o = atomic_read(&afs_outstanding_calls);
+
+       trace_afs_call(call, afs_call_trace_put, n + 1, o,
+                      __builtin_return_address(0));
+
+       ASSERTCMP(n, >=, 0);
+       if (n == 0) {
+               ASSERT(!work_pending(&call->async_work));
+               ASSERT(call->type->name != NULL);
+
+               if (call->rxcall) {
+                       rxrpc_kernel_end_call(afs_socket, call->rxcall);
+                       call->rxcall = NULL;
+               }
+               if (call->type->destructor)
+                       call->type->destructor(call);
+
+               kfree(call->request);
+               kfree(call);
+
+               o = atomic_dec_return(&afs_outstanding_calls);
+               trace_afs_call(call, afs_call_trace_free, 0, o,
+                              __builtin_return_address(0));
+               if (o == 0)
+                       wake_up_atomic_t(&afs_outstanding_calls);
        }
-       if (call->type->destructor)
-               call->type->destructor(call);
 }
 
 /*
- * End a call and free it
+ * Queue the call for actual work.  Returns 0 unconditionally for convenience.
  */
-static void afs_end_call(struct afs_call *call)
+int afs_queue_call_work(struct afs_call *call)
 {
-       afs_end_call_nofree(call);
-       afs_free_call(call);
+       int u = atomic_inc_return(&call->usage);
+
+       trace_afs_call(call, afs_call_trace_work, u,
+                      atomic_read(&afs_outstanding_calls),
+                      __builtin_return_address(0));
+
+       INIT_WORK(&call->work, call->type->work);
+
+       if (!queue_work(afs_wq, &call->work))
+               afs_put_call(call);
+       return 0;
 }
 
 /*
@@ -200,25 +217,19 @@ struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type,
 {
        struct afs_call *call;
 
-       call = kzalloc(sizeof(*call), GFP_NOFS);
+       call = afs_alloc_call(type, GFP_NOFS);
        if (!call)
                goto nomem_call;
 
-       _debug("CALL %p{%s} [%d]",
-              call, type->name, atomic_read(&afs_outstanding_calls));
-       atomic_inc(&afs_outstanding_calls);
-
-       call->type = type;
-       call->request_size = request_size;
-       call->reply_max = reply_max;
-
        if (request_size) {
+               call->request_size = request_size;
                call->request = kmalloc(request_size, GFP_NOFS);
                if (!call->request)
                        goto nomem_free;
        }
 
        if (reply_max) {
+               call->reply_max = reply_max;
                call->buffer = kmalloc(reply_max, GFP_NOFS);
                if (!call->buffer)
                        goto nomem_free;
@@ -228,7 +239,7 @@ struct afs_call *afs_alloc_flat_call(const struct afs_call_type *type,
        return call;
 
 nomem_free:
-       afs_free_call(call);
+       afs_put_call(call);
 nomem_call:
        return NULL;
 }
@@ -315,7 +326,7 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg,
  * initiate a call
  */
 int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
-                 const struct afs_wait_mode *wait_mode)
+                 bool async)
 {
        struct sockaddr_rxrpc srx;
        struct rxrpc_call *rxcall;
@@ -332,8 +343,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
               call, call->type->name, key_serial(call->key),
               atomic_read(&afs_outstanding_calls));
 
-       call->wait_mode = wait_mode;
-       INIT_WORK(&call->async_work, afs_process_async_call);
+       call->async = async;
 
        memset(&srx, 0, sizeof(srx));
        srx.srx_family = AF_RXRPC;
@@ -347,7 +357,9 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
        /* create a call */
        rxcall = rxrpc_kernel_begin_call(afs_socket, &srx, call->key,
                                         (unsigned long) call, gfp,
-                                        wait_mode->notify_rx);
+                                        (async ?
+                                         afs_wake_up_async_call :
+                                         afs_wake_up_call_waiter));
        call->key = NULL;
        if (IS_ERR(rxcall)) {
                ret = PTR_ERR(rxcall);
@@ -386,12 +398,15 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
 
        /* at this point, an async call may no longer exist as it may have
         * already completed */
-       return wait_mode->wait(call);
+       if (call->async)
+               return -EINPROGRESS;
+
+       return afs_wait_for_call_to_complete(call);
 
 error_do_abort:
        rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD");
 error_kill_call:
-       afs_end_call(call);
+       afs_put_call(call);
        _leave(" = %d", ret);
        return ret;
 }
@@ -416,6 +431,8 @@ static void afs_deliver_to_call(struct afs_call *call)
                        ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall,
                                                     NULL, 0, &offset, false,
                                                     &call->abort_code);
+                       trace_afs_recv_data(call, 0, offset, false, ret);
+
                        if (ret == -EINPROGRESS || ret == -EAGAIN)
                                return;
                        if (ret == 1 || ret < 0) {
@@ -459,7 +476,7 @@ static void afs_deliver_to_call(struct afs_call *call)
 
 done:
        if (call->state == AFS_CALL_COMPLETE && call->incoming)
-               afs_end_call(call);
+               afs_put_call(call);
 out:
        _leave("");
        return;
@@ -516,7 +533,7 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
        }
 
        _debug("call complete");
-       afs_end_call(call);
+       afs_put_call(call);
        _leave(" = %d", ret);
        return ret;
 }
@@ -540,24 +557,25 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
                                   unsigned long call_user_ID)
 {
        struct afs_call *call = (struct afs_call *)call_user_ID;
+       int u;
 
+       trace_afs_notify_call(rxcall, call);
        call->need_attention = true;
-       queue_work(afs_async_calls, &call->async_work);
-}
 
-/*
- * put a call into asynchronous mode
- * - mustn't touch the call descriptor as the call my have completed by the
- *   time we get here
- */
-static int afs_dont_wait_for_call_to_complete(struct afs_call *call)
-{
-       _enter("");
-       return -EINPROGRESS;
+       u = __atomic_add_unless(&call->usage, 1, 0);
+       if (u != 0) {
+               trace_afs_call(call, afs_call_trace_wake, u,
+                              atomic_read(&afs_outstanding_calls),
+                              __builtin_return_address(0));
+
+               if (!queue_work(afs_async_calls, &call->async_work))
+                       afs_put_call(call);
+       }
 }
 
 /*
- * delete an asynchronous call
+ * Delete an asynchronous call.  The work item carries a ref to the call struct
+ * that we need to release.
  */
 static void afs_delete_async_call(struct work_struct *work)
 {
@@ -565,13 +583,14 @@ static void afs_delete_async_call(struct work_struct *work)
 
        _enter("");
 
-       afs_free_call(call);
+       afs_put_call(call);
 
        _leave("");
 }
 
 /*
- * perform processing on an asynchronous call
+ * Perform I/O processing on an asynchronous call.  The work item carries a ref
+ * to the call struct that we either need to release or to pass on.
  */
 static void afs_process_async_call(struct work_struct *work)
 {
@@ -584,21 +603,19 @@ static void afs_process_async_call(struct work_struct *work)
                afs_deliver_to_call(call);
        }
 
-       if (call->state == AFS_CALL_COMPLETE && call->wait_mode) {
-               if (call->wait_mode->async_complete)
-                       call->wait_mode->async_complete(call->reply,
-                                                       call->error);
+       if (call->state == AFS_CALL_COMPLETE) {
                call->reply = NULL;
 
-               /* kill the call */
-               afs_end_call_nofree(call);
-
-               /* we can't just delete the call because the work item may be
-                * queued */
+               /* We have two refs to release - one from the alloc and one
+                * queued with the work item - and we can't just deallocate the
+                * call because the work item may be queued again.
+                */
                call->async_work.func = afs_delete_async_call;
-               queue_work(afs_async_calls, &call->async_work);
+               if (!queue_work(afs_async_calls, &call->async_work))
+                       afs_put_call(call);
        }
 
+       afs_put_call(call);
        _leave("");
 }
 
@@ -618,15 +635,13 @@ static void afs_charge_preallocation(struct work_struct *work)
 
        for (;;) {
                if (!call) {
-                       call = kzalloc(sizeof(struct afs_call), GFP_KERNEL);
+                       call = afs_alloc_call(&afs_RXCMxxxx, GFP_KERNEL);
                        if (!call)
                                break;
 
-                       INIT_WORK(&call->async_work, afs_process_async_call);
-                       call->wait_mode = &afs_async_incoming_call;
-                       call->type = &afs_RXCMxxxx;
-                       init_waitqueue_head(&call->waitq);
+                       call->async = true;
                        call->state = AFS_CALL_AWAIT_OP_ID;
+                       init_waitqueue_head(&call->waitq);
                }
 
                if (rxrpc_kernel_charge_accept(afs_socket,
@@ -648,9 +663,8 @@ static void afs_rx_discard_new_call(struct rxrpc_call *rxcall,
 {
        struct afs_call *call = (struct afs_call *)user_call_ID;
 
-       atomic_inc(&afs_outstanding_calls);
        call->rxcall = NULL;
-       afs_free_call(call);
+       afs_put_call(call);
 }
 
 /*
@@ -659,7 +673,6 @@ static void afs_rx_discard_new_call(struct rxrpc_call *rxcall,
 static void afs_rx_new_call(struct sock *sk, struct rxrpc_call *rxcall,
                            unsigned long user_call_ID)
 {
-       atomic_inc(&afs_outstanding_calls);
        queue_work(afs_wq, &afs_charge_preallocation_work);
 }
 
@@ -689,6 +702,8 @@ static int afs_deliver_cm_op_id(struct afs_call *call)
        if (!afs_cm_incoming_call(call))
                return -ENOTSUPP;
 
+       trace_afs_cb_call(call);
+
        /* pass responsibility for the remainer of this message off to the
         * cache manager op */
        return call->type->deliver(call);
@@ -721,7 +736,6 @@ void afs_send_empty_reply(struct afs_call *call)
                rxrpc_kernel_abort_call(afs_socket, call->rxcall,
                                        RX_USER_ABORT, ENOMEM, "KOO");
        default:
-               afs_end_call(call);
                _leave(" [error]");
                return;
        }
@@ -760,7 +774,6 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
                rxrpc_kernel_abort_call(afs_socket, call->rxcall,
                                        RX_USER_ABORT, ENOMEM, "KOO");
        }
-       afs_end_call(call);
        _leave(" [error]");
 }
 
@@ -780,6 +793,7 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count,
        ret = rxrpc_kernel_recv_data(afs_socket, call->rxcall,
                                     buf, count, &call->offset,
                                     want_more, &call->abort_code);
+       trace_afs_recv_data(call, count, call->offset, want_more, ret);
        if (ret == 0 || ret == -EAGAIN)
                return ret;
 
index 94bcd97d22b86789e5e192b38d450eb784eaedaa..a5e4cc561b6cd52172c3a3f4a9480b2cbae225ab 100644 (file)
@@ -147,7 +147,7 @@ int afs_vl_get_entry_by_name(struct in_addr *addr,
                             struct key *key,
                             const char *volname,
                             struct afs_cache_vlocation *entry,
-                            const struct afs_wait_mode *wait_mode)
+                            bool async)
 {
        struct afs_call *call;
        size_t volnamesz, reqsz, padsz;
@@ -177,7 +177,7 @@ int afs_vl_get_entry_by_name(struct in_addr *addr,
                memset((void *) bp + volnamesz, 0, padsz);
 
        /* initiate the call */
-       return afs_make_call(addr, call, GFP_KERNEL, wait_mode);
+       return afs_make_call(addr, call, GFP_KERNEL, async);
 }
 
 /*
@@ -188,7 +188,7 @@ int afs_vl_get_entry_by_id(struct in_addr *addr,
                           afs_volid_t volid,
                           afs_voltype_t voltype,
                           struct afs_cache_vlocation *entry,
-                          const struct afs_wait_mode *wait_mode)
+                          bool async)
 {
        struct afs_call *call;
        __be32 *bp;
@@ -211,5 +211,5 @@ int afs_vl_get_entry_by_id(struct in_addr *addr,
        *bp   = htonl(voltype);
 
        /* initiate the call */
-       return afs_make_call(addr, call, GFP_KERNEL, wait_mode);
+       return afs_make_call(addr, call, GFP_KERNEL, async);
 }
index 45a86396fd2db350f097bc291b1bbffee70b621e..d7d8dd8c0b3187e6fe7eaed8e6300cb06826ff81 100644 (file)
@@ -53,7 +53,7 @@ static int afs_vlocation_access_vl_by_name(struct afs_vlocation *vl,
 
                /* attempt to access the VL server */
                ret = afs_vl_get_entry_by_name(&addr, key, vl->vldb.name, vldb,
-                                              &afs_sync_call);
+                                              false);
                switch (ret) {
                case 0:
                        goto out;
@@ -111,7 +111,7 @@ static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl,
 
                /* attempt to access the VL server */
                ret = afs_vl_get_entry_by_id(&addr, key, volid, voltype, vldb,
-                                            &afs_sync_call);
+                                            false);
                switch (ret) {
                case 0:
                        goto out;
index 25cf4c3f4ff7ded56a2b0f0183b00584668104c7..dcb956143c864c09da845a6fe27129de3e0e9f14 100644 (file)
@@ -358,7 +358,7 @@ get_anyway:
                       server, ntohl(server->addr.s_addr));
 
                ret = afs_fs_fetch_file_status(server, key, vnode, NULL,
-                                              &afs_sync_call);
+                                              false);
 
        } while (!afs_volume_release_fileserver(vnode, server, ret));
 
@@ -393,7 +393,7 @@ no_server:
  * - TODO implement caching
  */
 int afs_vnode_fetch_data(struct afs_vnode *vnode, struct key *key,
-                        off_t offset, size_t length, struct page *page)
+                        struct afs_read *desc)
 {
        struct afs_server *server;
        int ret;
@@ -420,8 +420,8 @@ int afs_vnode_fetch_data(struct afs_vnode *vnode, struct key *key,
 
                _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
 
-               ret = afs_fs_fetch_data(server, key, vnode, offset, length,
-                                       page, &afs_sync_call);
+               ret = afs_fs_fetch_data(server, key, vnode, desc,
+                                       false);
 
        } while (!afs_volume_release_fileserver(vnode, server, ret));
 
@@ -477,7 +477,7 @@ int afs_vnode_create(struct afs_vnode *vnode, struct key *key,
                _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
 
                ret = afs_fs_create(server, key, vnode, name, mode, newfid,
-                                   newstatus, newcb, &afs_sync_call);
+                                   newstatus, newcb, false);
 
        } while (!afs_volume_release_fileserver(vnode, server, ret));
 
@@ -533,7 +533,7 @@ int afs_vnode_remove(struct afs_vnode *vnode, struct key *key, const char *name,
                _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
 
                ret = afs_fs_remove(server, key, vnode, name, isdir,
-                                   &afs_sync_call);
+                                   false);
 
        } while (!afs_volume_release_fileserver(vnode, server, ret));
 
@@ -595,7 +595,7 @@ int afs_vnode_link(struct afs_vnode *dvnode, struct afs_vnode *vnode,
                _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
 
                ret = afs_fs_link(server, key, dvnode, vnode, name,
-                                 &afs_sync_call);
+                                 false);
 
        } while (!afs_volume_release_fileserver(dvnode, server, ret));
 
@@ -659,7 +659,7 @@ int afs_vnode_symlink(struct afs_vnode *vnode, struct key *key,
                _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
 
                ret = afs_fs_symlink(server, key, vnode, name, content,
-                                    newfid, newstatus, &afs_sync_call);
+                                    newfid, newstatus, false);
 
        } while (!afs_volume_release_fileserver(vnode, server, ret));
 
@@ -729,7 +729,7 @@ int afs_vnode_rename(struct afs_vnode *orig_dvnode,
                _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
 
                ret = afs_fs_rename(server, key, orig_dvnode, orig_name,
-                                   new_dvnode, new_name, &afs_sync_call);
+                                   new_dvnode, new_name, false);
 
        } while (!afs_volume_release_fileserver(orig_dvnode, server, ret));
 
@@ -795,7 +795,7 @@ int afs_vnode_store_data(struct afs_writeback *wb, pgoff_t first, pgoff_t last,
                _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
 
                ret = afs_fs_store_data(server, wb, first, last, offset, to,
-                                       &afs_sync_call);
+                                       false);
 
        } while (!afs_volume_release_fileserver(vnode, server, ret));
 
@@ -847,7 +847,7 @@ int afs_vnode_setattr(struct afs_vnode *vnode, struct key *key,
 
                _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
 
-               ret = afs_fs_setattr(server, key, vnode, attr, &afs_sync_call);
+               ret = afs_fs_setattr(server, key, vnode, attr, false);
 
        } while (!afs_volume_release_fileserver(vnode, server, ret));
 
@@ -894,7 +894,7 @@ int afs_vnode_get_volume_status(struct afs_vnode *vnode, struct key *key,
 
                _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
 
-               ret = afs_fs_get_volume_status(server, key, vnode, vs, &afs_sync_call);
+               ret = afs_fs_get_volume_status(server, key, vnode, vs, false);
 
        } while (!afs_volume_release_fileserver(vnode, server, ret));
 
@@ -933,7 +933,7 @@ int afs_vnode_set_lock(struct afs_vnode *vnode, struct key *key,
 
                _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
 
-               ret = afs_fs_set_lock(server, key, vnode, type, &afs_sync_call);
+               ret = afs_fs_set_lock(server, key, vnode, type, false);
 
        } while (!afs_volume_release_fileserver(vnode, server, ret));
 
@@ -971,7 +971,7 @@ int afs_vnode_extend_lock(struct afs_vnode *vnode, struct key *key)
 
                _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
 
-               ret = afs_fs_extend_lock(server, key, vnode, &afs_sync_call);
+               ret = afs_fs_extend_lock(server, key, vnode, false);
 
        } while (!afs_volume_release_fileserver(vnode, server, ret));
 
@@ -1009,7 +1009,7 @@ int afs_vnode_release_lock(struct afs_vnode *vnode, struct key *key)
 
                _debug("USING SERVER: %08x\n", ntohl(server->addr.s_addr));
 
-               ret = afs_fs_release_lock(server, key, vnode, &afs_sync_call);
+               ret = afs_fs_release_lock(server, key, vnode, false);
 
        } while (!afs_volume_release_fileserver(vnode, server, ret));
 
index d142a2449e65a9962437ce38918b0b47d9124e58..546f9d01710b5b66706331968edc557306ed848e 100644 (file)
@@ -106,6 +106,7 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
        volume->cell            = params->cell;
        volume->vid             = vlocation->vldb.vid[params->type];
 
+       volume->bdi.ra_pages    = VM_MAX_READAHEAD*1024/PAGE_SIZE; 
        ret = bdi_setup_and_register(&volume->bdi, "afs");
        if (ret)
                goto error_bdi;
index f865c3f05bea5074a91a547e4682523e8aab88be..c83c1a0e851fb34051c026bcea8e2a561299cf95 100644 (file)
@@ -86,19 +86,30 @@ void afs_put_writeback(struct afs_writeback *wb)
 static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
                         loff_t pos, struct page *page)
 {
+       struct afs_read *req;
        loff_t i_size;
        int ret;
-       int len;
 
        _enter(",,%llu", (unsigned long long)pos);
 
+       req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
+                     GFP_KERNEL);
+       if (!req)
+               return -ENOMEM;
+
+       atomic_set(&req->usage, 1);
+       req->pos = pos;
+       req->nr_pages = 1;
+       req->pages[0] = page;
+
        i_size = i_size_read(&vnode->vfs_inode);
        if (pos + PAGE_SIZE > i_size)
-               len = i_size - pos;
+               req->len = i_size - pos;
        else
-               len = PAGE_SIZE;
+               req->len = PAGE_SIZE;
 
-       ret = afs_vnode_fetch_data(vnode, key, pos, len, page);
+       ret = afs_vnode_fetch_data(vnode, key, req);
+       afs_put_read(req);
        if (ret < 0) {
                if (ret == -ENOENT) {
                        _debug("got NOENT from server"
index 94f50cac91c617b03025d4d596d8d9d65d2c185b..70e94170af85eb67e553d5fb3ce76e7c343bb384 100644 (file)
@@ -1420,26 +1420,32 @@ static struct shrinker glock_shrinker = {
  * @sdp: the filesystem
  * @bucket: the bucket
  *
+ * Note that the function can be called multiple times on the same
+ * object.  So the user must ensure that the function can cope with
+ * that.
  */
 
 static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
 {
        struct gfs2_glock *gl;
-       struct rhash_head *pos;
-       const struct bucket_table *tbl;
-       int i;
+       struct rhashtable_iter iter;
 
-       rcu_read_lock();
-       tbl = rht_dereference_rcu(gl_hash_table.tbl, &gl_hash_table);
-       for (i = 0; i < tbl->size; i++) {
-               rht_for_each_entry_rcu(gl, pos, tbl, i, gl_node) {
+       rhashtable_walk_enter(&gl_hash_table, &iter);
+
+       do {
+               gl = ERR_PTR(rhashtable_walk_start(&iter));
+               if (gl)
+                       continue;
+
+               while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
                        if ((gl->gl_name.ln_sbd == sdp) &&
                            lockref_get_not_dead(&gl->gl_lockref))
                                examiner(gl);
-               }
-       }
-       rcu_read_unlock();
-       cond_resched();
+
+               rhashtable_walk_stop(&iter);
+       } while (cond_resched(), gl == ERR_PTR(-EAGAIN));
+
+       rhashtable_walk_exit(&iter);
 }
 
 /**
diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h
new file mode 100644 (file)
index 0000000..697161f
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Device Tree constants for Microsemi VSC8531 PHY
+ *
+ * Author: Nagaraju Lakkaraju
+ *
+ * License: Dual MIT/GPL
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _DT_BINDINGS_MSCC_VSC8531_H
+#define _DT_BINDINGS_MSCC_VSC8531_H
+
+/* PHY LED Modes */
+#define VSC8531_LINK_ACTIVITY           0
+#define VSC8531_LINK_1000_ACTIVITY      1
+#define VSC8531_LINK_100_ACTIVITY       2
+#define VSC8531_LINK_10_ACTIVITY        3
+#define VSC8531_LINK_100_1000_ACTIVITY  4
+#define VSC8531_LINK_10_1000_ACTIVITY   5
+#define VSC8531_LINK_10_100_ACTIVITY    6
+#define VSC8531_DUPLEX_COLLISION        8
+#define VSC8531_COLLISION               9
+#define VSC8531_ACTIVITY                10
+#define VSC8531_AUTONEG_FAULT           12
+#define VSC8531_SERIAL_MODE             13
+#define VSC8531_FORCE_LED_OFF           14
+#define VSC8531_FORCE_LED_ON            15
+
+#endif
index f6505d83069d4b0969688ee2e05e30bd08227fe7..8b9d6fff002db113cb2233a2d80b3186ff041ef0 100644 (file)
                                              (1ULL << __bf_shf(_mask))); \
        })
 
+/**
+ * FIELD_FIT() - check if value fits in the field
+ * @_mask: shifted mask defining the field's length and position
+ * @_val:  value to test against the field
+ *
+ * Return: true if @_val can fit inside @_mask, false if @_val is too big.
+ */
+#define FIELD_FIT(_mask, _val)                                         \
+       ({                                                              \
+               __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: ");     \
+               !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
+       })
+
 /**
  * FIELD_PREP() - prepare a bitfield element
  * @_mask: shifted mask defining the field's length and position
index 3ed1f3b1d594b67f1fab991492e5d73e10b81606..909fc033173a7c893ffe7113f0e32568392b76ae 100644 (file)
@@ -8,10 +8,12 @@
 #define _LINUX_BPF_H 1
 
 #include <uapi/linux/bpf.h>
+
 #include <linux/workqueue.h>
 #include <linux/file.h>
 #include <linux/percpu.h>
 #include <linux/err.h>
+#include <linux/rbtree_latch.h>
 
 struct perf_event;
 struct bpf_map;
@@ -69,14 +71,14 @@ enum bpf_arg_type {
        /* the following constraints used to prototype bpf_memcmp() and other
         * functions that access data on eBPF program stack
         */
-       ARG_PTR_TO_STACK,       /* any pointer to eBPF program stack */
-       ARG_PTR_TO_RAW_STACK,   /* any pointer to eBPF program stack, area does not
-                                * need to be initialized, helper function must fill
-                                * all bytes or clear them in error case.
+       ARG_PTR_TO_MEM,         /* pointer to valid memory (stack, packet, map value) */
+       ARG_PTR_TO_UNINIT_MEM,  /* pointer to memory does not need to be initialized,
+                                * helper function must fill all bytes or clear
+                                * them in error case.
                                 */
 
-       ARG_CONST_STACK_SIZE,   /* number of bytes accessed from stack */
-       ARG_CONST_STACK_SIZE_OR_ZERO, /* number of bytes accessed from stack or 0 */
+       ARG_CONST_SIZE,         /* number of bytes accessed from memory */
+       ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */
 
        ARG_PTR_TO_CTX,         /* pointer to context */
        ARG_ANYTHING,           /* any (initialized) argument is ok */
@@ -161,9 +163,10 @@ struct bpf_verifier_ops {
                                enum bpf_reg_type *reg_type);
        int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
                            const struct bpf_prog *prog);
-       u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
-                                 int src_reg, int ctx_off,
-                                 struct bpf_insn *insn, struct bpf_prog *prog);
+       u32 (*convert_ctx_access)(enum bpf_access_type type,
+                                 const struct bpf_insn *src,
+                                 struct bpf_insn *dst,
+                                 struct bpf_prog *prog);
 };
 
 struct bpf_prog_type_list {
@@ -176,6 +179,8 @@ struct bpf_prog_aux {
        atomic_t refcnt;
        u32 used_map_cnt;
        u32 max_ctx_offset;
+       struct latch_tree_node ksym_tnode;
+       struct list_head ksym_lnode;
        const struct bpf_verifier_ops *ops;
        struct bpf_map **used_maps;
        struct bpf_prog *prog;
diff --git a/include/linux/bpf_trace.h b/include/linux/bpf_trace.h
new file mode 100644 (file)
index 0000000..b22efbd
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef __LINUX_BPF_TRACE_H__
+#define __LINUX_BPF_TRACE_H__
+
+#include <trace/events/bpf.h>
+#include <trace/events/xdp.h>
+
+#endif /* __LINUX_BPF_TRACE_H__ */
index 4f7d8be9ddbf8de2fc0e657764e9dfe515852e3e..55e517130311980b36ad628054043130f88626af 100644 (file)
@@ -17,6 +17,7 @@
 #define PHY_ID_BCM5482                 0x0143bcb0
 #define PHY_ID_BCM5411                 0x00206070
 #define PHY_ID_BCM5421                 0x002060e0
+#define PHY_ID_BCM54210E               0x600d84a0
 #define PHY_ID_BCM5464                 0x002060b0
 #define PHY_ID_BCM5461                 0x002060c0
 #define PHY_ID_BCM54612E               0x03625e60
@@ -24,6 +25,7 @@
 #define PHY_ID_BCM57780                        0x03625d90
 
 #define PHY_ID_BCM7250                 0xae025280
+#define PHY_ID_BCM7278                 0xae0251a0
 #define PHY_ID_BCM7364                 0xae025260
 #define PHY_ID_BCM7366                 0x600d8490
 #define PHY_ID_BCM7346                 0x600d8650
@@ -31,6 +33,7 @@
 #define PHY_ID_BCM7425                 0x600d86b0
 #define PHY_ID_BCM7429                 0x600d8730
 #define PHY_ID_BCM7435                 0x600d8750
+#define PHY_ID_BCM74371                        0xae0252e0
 #define PHY_ID_BCM7439                 0x600d8480
 #define PHY_ID_BCM7439_2               0xae025080
 #define PHY_ID_BCM7445                 0x600d8510
 /*
  * AUXILIARY CONTROL SHADOW ACCESS REGISTERS.  (PHY REG 0x18)
  */
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL      0x0000
+#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL      0x00
 #define MII_BCM54XX_AUXCTL_ACTL_TX_6DB         0x0400
 #define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA      0x0800
 
-#define MII_BCM54XX_AUXCTL_MISC_WREN   0x8000
-#define MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW   0x0100
-#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX    0x0200
-#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC     0x7000
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC        0x0007
-#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT  12
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN  (1 << 8)
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN   (1 << 4)
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC                        0x07
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN   0x0010
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN  0x0100
+#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX            0x0200
+#define MII_BCM54XX_AUXCTL_MISC_WREN                   0x8000
 
+#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT  12
 #define MII_BCM54XX_AUXCTL_SHDWSEL_MASK        0x0007
 
 /*
index 5f5270941ba02ae78a81129dfff17d647ef3099c..141b05aade81ddb5974c18e837bd6ea1befe4fe5 100644 (file)
@@ -38,6 +38,13 @@ struct can_priv {
        struct can_bittiming bittiming, data_bittiming;
        const struct can_bittiming_const *bittiming_const,
                *data_bittiming_const;
+       const u16 *termination_const;
+       unsigned int termination_const_cnt;
+       u16 termination;
+       const u32 *bitrate_const;
+       unsigned int bitrate_const_cnt;
+       const u32 *data_bitrate_const;
+       unsigned int data_bitrate_const_cnt;
        struct can_clock clock;
 
        enum can_state state;
@@ -53,6 +60,7 @@ struct can_priv {
        int (*do_set_bittiming)(struct net_device *dev);
        int (*do_set_data_bittiming)(struct net_device *dev);
        int (*do_set_mode)(struct net_device *dev, enum can_mode mode);
+       int (*do_set_termination)(struct net_device *dev, u16 term);
        int (*do_get_state)(const struct net_device *dev,
                            enum can_state *state);
        int (*do_get_berr_counter)(const struct net_device *dev,
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
new file mode 100644 (file)
index 0000000..cb31683
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * linux/can/rx-offload.h
+ *
+ * Copyright (c) 2014 David Jander, Protonic Holland
+ * Copyright (c) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CAN_RX_OFFLOAD_H
+#define _CAN_RX_OFFLOAD_H
+
+#include <linux/netdevice.h>
+#include <linux/can.h>
+
+struct can_rx_offload {
+       struct net_device *dev;
+
+       unsigned int (*mailbox_read)(struct can_rx_offload *offload, struct can_frame *cf,
+                                    u32 *timestamp, unsigned int mb);
+
+       struct sk_buff_head skb_queue;
+       u32 skb_queue_len_max;
+
+       unsigned int mb_first;
+       unsigned int mb_last;
+
+       struct napi_struct napi;
+
+       bool inc;
+};
+
+int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload);
+int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
+int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
+int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
+int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb);
+void can_rx_offload_reset(struct can_rx_offload *offload);
+void can_rx_offload_del(struct can_rx_offload *offload);
+void can_rx_offload_enable(struct can_rx_offload *offload);
+
+static inline void can_rx_offload_schedule(struct can_rx_offload *offload)
+{
+       napi_schedule(&offload->napi);
+}
+
+static inline void can_rx_offload_disable(struct can_rx_offload *offload)
+{
+       napi_disable(&offload->napi);
+}
+
+#endif /* !_CAN_RX_OFFLOAD_H */
index 491b4c0ca6333a217e90cc8841f8b2c5f5bc1dc4..bd684fc8ec1d83b80f393e9b2e5dc5cfb0776ad6 100644 (file)
@@ -88,6 +88,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
  *
  * @suspend:   Called when a device on this bus wants to go to sleep mode.
  * @resume:    Called to bring a device on this bus out of sleep mode.
+ * @num_vf:    Called to find out how many virtual functions a device on this
+ *             bus supports.
  * @pm:                Power management operations of this bus, callback the specific
  *             device driver's pm-ops.
  * @iommu_ops:  IOMMU specific operations for this bus, used to attach IOMMU
@@ -127,6 +129,8 @@ struct bus_type {
        int (*suspend)(struct device *dev, pm_message_t state);
        int (*resume)(struct device *dev);
 
+       int (*num_vf)(struct device *dev);
+
        const struct dev_pm_ops *pm;
 
        const struct iommu_ops *iommu_ops;
@@ -1140,6 +1144,13 @@ extern int device_online(struct device *dev);
 extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
 extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
 
+static inline int dev_num_vf(struct device *dev)
+{
+       if (dev->bus && dev->bus->num_vf)
+               return dev->bus->num_vf(dev);
+       return 0;
+}
+
 /*
  * Root device objects for grouping under /sys/devices
  */
index 6fec9e81bd70d9e0646a240e09b2395f74fd6f79..c62b709b1ce087b7891f5d9c76aa2940b7f4a9a9 100644 (file)
@@ -54,6 +54,11 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
 
+struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
+                                          unsigned int txqs,
+                                          unsigned int rxqs);
+#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
+
 struct sk_buff **eth_gro_receive(struct sk_buff **head,
                                 struct sk_buff *skb);
 int eth_gro_complete(struct sk_buff *skb, int nhoff);
@@ -396,6 +401,66 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2,
        return true;
 }
 
+/**
+ * ether_addr_to_u64 - Convert an Ethernet address into a u64 value.
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Return a u64 value of the address
+ */
+static inline u64 ether_addr_to_u64(const u8 *addr)
+{
+       u64 u = 0;
+       int i;
+
+       for (i = 0; i < ETH_ALEN; i++)
+               u = u << 8 | addr[i];
+
+       return u;
+}
+
+/**
+ * u64_to_ether_addr - Convert a u64 to an Ethernet address.
+ * @u: u64 to convert to an Ethernet MAC address
+ * @addr: Pointer to a six-byte array to contain the Ethernet address
+ */
+static inline void u64_to_ether_addr(u64 u, u8 *addr)
+{
+       int i;
+
+       for (i = ETH_ALEN - 1; i >= 0; i--) {
+               addr[i] = u & 0xff;
+               u = u >> 8;
+       }
+}
+
+/**
+ * eth_addr_dec - Decrement the given MAC address
+ *
+ * @addr: Pointer to a six-byte array containing Ethernet address to decrement
+ */
+static inline void eth_addr_dec(u8 *addr)
+{
+       u64 u = ether_addr_to_u64(addr);
+
+       u--;
+       u64_to_ether_addr(u, addr);
+}
+
+/**
+ * ether_addr_greater - Compare two Ethernet addresses
+ * @addr1: Pointer to a six-byte array containing the Ethernet address
+ * @addr2: Pointer other six-byte array containing the Ethernet address
+ *
+ * Compare two Ethernet addresses, returns true addr1 is greater than addr2
+ */
+static inline bool ether_addr_greater(const u8 *addr1, const u8 *addr2)
+{
+       u64 u1 = ether_addr_to_u64(addr1);
+       u64 u2 = ether_addr_to_u64(addr2);
+
+       return u1 > u2;
+}
+
 /**
  * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
  * @dev: Pointer to a device structure
index e4eb2546339afbf2764f5ba335e1e66947fc5af3..0c1cc9143cb230316d54b1e1277c5e1ab037cb93 100644 (file)
@@ -54,6 +54,12 @@ struct bpf_prog_aux;
 #define BPF_REG_AX             MAX_BPF_REG
 #define MAX_BPF_JIT_REG                (MAX_BPF_REG + 1)
 
+/* As per nm, we expose JITed images as text (code) section for
+ * kallsyms. That way, tools like perf can find it to match
+ * addresses.
+ */
+#define BPF_SYM_ELF_TYPE       't'
+
 /* BPF program can access up to 512 bytes of stack space. */
 #define MAX_BPF_STACK  512
 
@@ -555,6 +561,11 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 {
        set_memory_rw((unsigned long)fp, fp->pages);
 }
+
+static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
+{
+       set_memory_rw((unsigned long)hdr, hdr->pages);
+}
 #else
 static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
 {
@@ -563,8 +574,21 @@ static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
 static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 {
 }
+
+static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
+{
+}
 #endif /* CONFIG_DEBUG_SET_MODULE_RONX */
 
+static inline struct bpf_binary_header *
+bpf_jit_binary_hdr(const struct bpf_prog *fp)
+{
+       unsigned long real_start = (unsigned long)fp->bpf_func;
+       unsigned long addr = real_start & PAGE_MASK;
+
+       return (void *)addr;
+}
+
 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
 static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
 {
@@ -607,6 +631,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 
 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
+void bpf_jit_compile(struct bpf_prog *prog);
 bool bpf_helper_changes_pkt_data(void *func);
 
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
@@ -616,6 +641,7 @@ void bpf_warn_invalid_xdp_action(u32 act);
 #ifdef CONFIG_BPF_JIT
 extern int bpf_jit_enable;
 extern int bpf_jit_harden;
+extern int bpf_jit_kallsyms;
 
 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
 
@@ -625,7 +651,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
                     bpf_jit_fill_hole_t bpf_fill_ill_insns);
 void bpf_jit_binary_free(struct bpf_binary_header *hdr);
 
-void bpf_jit_compile(struct bpf_prog *fp);
 void bpf_jit_free(struct bpf_prog *fp);
 
 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
@@ -651,6 +676,11 @@ static inline bool bpf_jit_is_ebpf(void)
 # endif
 }
 
+static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
+{
+       return fp->jited && bpf_jit_is_ebpf();
+}
+
 static inline bool bpf_jit_blinding_enabled(void)
 {
        /* These are the prerequisites, should someone ever have the
@@ -668,15 +698,91 @@ static inline bool bpf_jit_blinding_enabled(void)
 
        return true;
 }
-#else
-static inline void bpf_jit_compile(struct bpf_prog *fp)
+
+static inline bool bpf_jit_kallsyms_enabled(void)
+{
+       /* There are a couple of corner cases where kallsyms should
+        * not be enabled f.e. on hardening.
+        */
+       if (bpf_jit_harden)
+               return false;
+       if (!bpf_jit_kallsyms)
+               return false;
+       if (bpf_jit_kallsyms == 1)
+               return true;
+
+       return false;
+}
+
+const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
+                                unsigned long *off, char *sym);
+bool is_bpf_text_address(unsigned long addr);
+int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+                   char *sym);
+
+static inline const char *
+bpf_address_lookup(unsigned long addr, unsigned long *size,
+                  unsigned long *off, char **modname, char *sym)
 {
+       const char *ret = __bpf_address_lookup(addr, size, off, sym);
+
+       if (ret && modname)
+               *modname = NULL;
+       return ret;
+}
+
+void bpf_prog_kallsyms_add(struct bpf_prog *fp);
+void bpf_prog_kallsyms_del(struct bpf_prog *fp);
+
+#else /* CONFIG_BPF_JIT */
+
+static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
+{
+       return false;
 }
 
 static inline void bpf_jit_free(struct bpf_prog *fp)
 {
        bpf_prog_unlock_free(fp);
 }
+
+static inline bool bpf_jit_kallsyms_enabled(void)
+{
+       return false;
+}
+
+static inline const char *
+__bpf_address_lookup(unsigned long addr, unsigned long *size,
+                    unsigned long *off, char *sym)
+{
+       return NULL;
+}
+
+static inline bool is_bpf_text_address(unsigned long addr)
+{
+       return false;
+}
+
+static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
+                                 char *type, char *sym)
+{
+       return -ERANGE;
+}
+
+static inline const char *
+bpf_address_lookup(unsigned long addr, unsigned long *size,
+                  unsigned long *off, char **modname, char *sym)
+{
+       return NULL;
+}
+
+static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp)
+{
+}
+
+static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
+{
+}
 #endif /* CONFIG_BPF_JIT */
 
 #define BPF_ANC                BIT(15)
index fe849329511a7b4e46ddb6d61441b8675131eb1b..0dd9498c694f95c166508e40896c94ccc724075c 100644 (file)
@@ -185,6 +185,8 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
 
 /* number of user priorities 802.11 uses */
 #define IEEE80211_NUM_UPS              8
+/* number of ACs */
+#define IEEE80211_NUM_ACS              4
 
 #define IEEE80211_QOS_CTL_LEN          2
 /* 1d tag mask */
@@ -1041,8 +1043,9 @@ struct ieee80211_mgmt {
        } u;
 } __packed __aligned(2);
 
-/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */
+/* Supported rates membership selectors */
 #define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
+#define BSS_MEMBERSHIP_SELECTOR_VHT_PHY        126
 
 /* mgmt header + 1 byte category code */
 #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
@@ -2322,31 +2325,33 @@ enum ieee80211_sa_query_action {
 };
 
 
+#define SUITE(oui, id) (((oui) << 8) | (id))
+
 /* cipher suite selectors */
-#define WLAN_CIPHER_SUITE_USE_GROUP    0x000FAC00
-#define WLAN_CIPHER_SUITE_WEP40                0x000FAC01
-#define WLAN_CIPHER_SUITE_TKIP         0x000FAC02
-/* reserved:                           0x000FAC03 */
-#define WLAN_CIPHER_SUITE_CCMP         0x000FAC04
-#define WLAN_CIPHER_SUITE_WEP104       0x000FAC05
-#define WLAN_CIPHER_SUITE_AES_CMAC     0x000FAC06
-#define WLAN_CIPHER_SUITE_GCMP         0x000FAC08
-#define WLAN_CIPHER_SUITE_GCMP_256     0x000FAC09
-#define WLAN_CIPHER_SUITE_CCMP_256     0x000FAC0A
-#define WLAN_CIPHER_SUITE_BIP_GMAC_128 0x000FAC0B
-#define WLAN_CIPHER_SUITE_BIP_GMAC_256 0x000FAC0C
-#define WLAN_CIPHER_SUITE_BIP_CMAC_256 0x000FAC0D
-
-#define WLAN_CIPHER_SUITE_SMS4         0x00147201
+#define WLAN_CIPHER_SUITE_USE_GROUP    SUITE(0x000FAC, 0)
+#define WLAN_CIPHER_SUITE_WEP40                SUITE(0x000FAC, 1)
+#define WLAN_CIPHER_SUITE_TKIP         SUITE(0x000FAC, 2)
+/* reserved:                           SUITE(0x000FAC, 3) */
+#define WLAN_CIPHER_SUITE_CCMP         SUITE(0x000FAC, 4)
+#define WLAN_CIPHER_SUITE_WEP104       SUITE(0x000FAC, 5)
+#define WLAN_CIPHER_SUITE_AES_CMAC     SUITE(0x000FAC, 6)
+#define WLAN_CIPHER_SUITE_GCMP         SUITE(0x000FAC, 8)
+#define WLAN_CIPHER_SUITE_GCMP_256     SUITE(0x000FAC, 9)
+#define WLAN_CIPHER_SUITE_CCMP_256     SUITE(0x000FAC, 10)
+#define WLAN_CIPHER_SUITE_BIP_GMAC_128 SUITE(0x000FAC, 11)
+#define WLAN_CIPHER_SUITE_BIP_GMAC_256 SUITE(0x000FAC, 12)
+#define WLAN_CIPHER_SUITE_BIP_CMAC_256 SUITE(0x000FAC, 13)
+
+#define WLAN_CIPHER_SUITE_SMS4         SUITE(0x001472, 1)
 
 /* AKM suite selectors */
-#define WLAN_AKM_SUITE_8021X           0x000FAC01
-#define WLAN_AKM_SUITE_PSK             0x000FAC02
-#define WLAN_AKM_SUITE_8021X_SHA256    0x000FAC05
-#define WLAN_AKM_SUITE_PSK_SHA256      0x000FAC06
-#define WLAN_AKM_SUITE_TDLS            0x000FAC07
-#define WLAN_AKM_SUITE_SAE             0x000FAC08
-#define WLAN_AKM_SUITE_FT_OVER_SAE     0x000FAC09
+#define WLAN_AKM_SUITE_8021X           SUITE(0x000FAC, 1)
+#define WLAN_AKM_SUITE_PSK             SUITE(0x000FAC, 2)
+#define WLAN_AKM_SUITE_8021X_SHA256    SUITE(0x000FAC, 5)
+#define WLAN_AKM_SUITE_PSK_SHA256      SUITE(0x000FAC, 6)
+#define WLAN_AKM_SUITE_TDLS            SUITE(0x000FAC, 7)
+#define WLAN_AKM_SUITE_SAE             SUITE(0x000FAC, 8)
+#define WLAN_AKM_SUITE_FT_OVER_SAE     SUITE(0x000FAC, 9)
 
 #define WLAN_MAX_KEY_LEN               32
 
index c6587c01d9514b0074e3a63e40ce951f93178d4b..c5847dc75a937a5ceed4a762ffcc9f2644b499bc 100644 (file)
@@ -46,6 +46,8 @@ struct br_ip_list {
 #define BR_LEARNING_SYNC       BIT(9)
 #define BR_PROXYARP_WIFI       BIT(10)
 #define BR_MCAST_FLOOD         BIT(11)
+#define BR_MULTICAST_TO_UNICAST        BIT(12)
+#define BR_VLAN_TUNNEL         BIT(13)
 
 #define BR_DEFAULT_AGEING_TIME (300 * HZ)
 
index 4316aa173dde363f136a082ef453ced40d65ae4e..46df7e565d6f88f4b99137b4353bd5046f4f2076 100644 (file)
@@ -66,8 +66,6 @@ struct dlci_local
 
 struct frad_local
 {
-   struct net_device_stats stats;
-
    /* devices which this FRAD is slaved to */
    struct net_device     *master[CONFIG_DLCI_MAX];
    short             dlci[CONFIG_DLCI_MAX];
index a4ccc3122f9389ce93dda50d388057cfcd1b1bbb..c9ec1343d1879cc463ffe50afcee143623350b5d 100644 (file)
@@ -9,19 +9,6 @@
 #include <net/netlink.h>
 #include <linux/u64_stats_sync.h>
 
-#if IS_ENABLED(CONFIG_MACVTAP)
-struct socket *macvtap_get_socket(struct file *);
-#else
-#include <linux/err.h>
-#include <linux/errno.h>
-struct file;
-struct socket;
-static inline struct socket *macvtap_get_socket(struct file *f)
-{
-       return ERR_PTR(-EINVAL);
-}
-#endif /* CONFIG_MACVTAP */
-
 struct macvlan_port;
 struct macvtap_queue;
 
@@ -29,7 +16,7 @@ struct macvtap_queue;
  * Maximum times a macvtap device can be opened. This can be used to
  * configure the number of receive queue, e.g. for multiqueue virtio.
  */
-#define MAX_MACVTAP_QUEUES     256
+#define MAX_TAP_QUEUES 256
 
 #define MACVLAN_MC_FILTER_BITS 8
 #define MACVLAN_MC_FILTER_SZ   (1 << MACVLAN_MC_FILTER_BITS)
@@ -49,7 +36,7 @@ struct macvlan_dev {
        enum macvlan_mode       mode;
        u16                     flags;
        /* This array tracks active taps. */
-       struct macvtap_queue    __rcu *taps[MAX_MACVTAP_QUEUES];
+       struct tap_queue        __rcu *taps[MAX_TAP_QUEUES];
        /* This list tracks all taps (both enabled and disabled) */
        struct list_head        queue_list;
        int                     numvtaps;
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
new file mode 100644 (file)
index 0000000..3482c3c
--- /dev/null
@@ -0,0 +1,75 @@
+#ifndef _LINUX_IF_TAP_H_
+#define _LINUX_IF_TAP_H_
+
+#if IS_ENABLED(CONFIG_TAP)
+struct socket *tap_get_socket(struct file *);
+#else
+#include <linux/err.h>
+#include <linux/errno.h>
+struct file;
+struct socket;
+static inline struct socket *tap_get_socket(struct file *f)
+{
+       return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_TAP */
+
+#include <net/sock.h>
+#include <linux/skb_array.h>
+
+#define MAX_TAP_QUEUES 256
+
+struct tap_queue;
+
+struct tap_dev {
+       struct net_device       *dev;
+       u16                     flags;
+       /* This array tracks active taps. */
+       struct tap_queue    __rcu *taps[MAX_TAP_QUEUES];
+       /* This list tracks all taps (both enabled and disabled) */
+       struct list_head        queue_list;
+       int                     numvtaps;
+       int                     numqueues;
+       netdev_features_t       tap_features;
+       int                     minor;
+
+       void (*update_features)(struct tap_dev *tap, netdev_features_t features);
+       void (*count_tx_dropped)(struct tap_dev *tap);
+       void (*count_rx_dropped)(struct tap_dev *tap);
+};
+
+/*
+ * A tap queue is the central object of tap module, it connects
+ * an open character device to virtual interface. There can be
+ * multiple queues on one interface, which map back to queues
+ * implemented in hardware on the underlying device.
+ *
+ * tap_proto is used to allocate queues through the sock allocation
+ * mechanism.
+ *
+ */
+
+struct tap_queue {
+       struct sock sk;
+       struct socket sock;
+       struct socket_wq wq;
+       int vnet_hdr_sz;
+       struct tap_dev __rcu *tap;
+       struct file *file;
+       unsigned int flags;
+       u16 queue_index;
+       bool enabled;
+       struct list_head next;
+       struct skb_array skb_array;
+};
+
+rx_handler_result_t tap_handle_frame(struct sk_buff **pskb);
+void tap_del_queues(struct tap_dev *tap);
+int tap_get_minor(dev_t major, struct tap_dev *tap);
+void tap_free_minor(dev_t major, struct tap_dev *tap);
+int tap_queue_resize(struct tap_dev *tap);
+int tap_create_cdev(struct cdev *tap_cdev,
+                   dev_t *tap_major, const char *device_name);
+void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
+
+#endif /*_LINUX_IF_TAP_H_*/
index 671d014e642937e5a8d9c553668c5376488d6457..71be5b330d21305af23f8f7e1779988930755ed8 100644 (file)
@@ -69,6 +69,7 @@ struct ipv6_devconf {
        __s32           seg6_require_hmac;
 #endif
        __u32           enhanced_dad;
+       __u32           addr_gen_mode;
 
        struct ctl_table_header *sysctl_header;
 };
index d1039ecaf94fdbea21a25c0fa4a5ea351683dd91..ae537fa462160efff72574107dd34a8b1e29375c 100644 (file)
@@ -526,6 +526,19 @@ static inline void list_splice_tail_init(struct list_head *list,
        for (; &pos->member != (head);                                  \
             pos = list_next_entry(pos, member))
 
+/**
+ * list_for_each_entry_from_reverse - iterate backwards over list of given type
+ *                                    from the current point
+ * @pos:       the type * to use as a loop cursor.
+ * @head:      the head for your list.
+ * @member:    the name of the list_head within the struct.
+ *
+ * Iterate backwards over list of given type, continuing from current position.
+ */
+#define list_for_each_entry_from_reverse(pos, head, member)            \
+       for (; &pos->member != (head);                                  \
+            pos = list_prev_entry(pos, member))
+
 /**
  * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
  * @pos:       the type * to use as a loop cursor.
index a57f0dfb6db7f2c0e0e268397b261fe8b45efa89..4055cf8cc978cce05a0c07b3f181f18ce39f1b3f 100644 (file)
 #define MARVELL_PHY_ID_88E1116R                0x01410e40
 #define MARVELL_PHY_ID_88E1510         0x01410dd0
 #define MARVELL_PHY_ID_88E1540         0x01410eb0
+#define MARVELL_PHY_ID_88E1545         0x01410ea0
 #define MARVELL_PHY_ID_88E3016         0x01410e60
 
+/* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do
+ * not have a model ID. So the switch driver traps reads to the ID2
+ * register and returns the switch family ID
+ */
+#define MARVELL_PHY_ID_88E6390         0x01410f90
+
 /* struct phy_device dev_flags definitions */
 #define MARVELL_PHY_M1145_FLAGS_RESISTANCE     0x00000001
 #define MARVELL_PHY_M1118_DNS323_LEDS          0x00000002
index bf9d1d75069353b84d9f2ff53c49bd7c6d67bdfa..ca08ab16ecdc9b78e36e039a381035cafecc660c 100644 (file)
@@ -10,6 +10,7 @@
 #define __LINUX_MDIO_H__
 
 #include <uapi/linux/mdio.h>
+#include <linux/mod_devicetable.h>
 
 struct mii_bus;
 
@@ -29,6 +30,7 @@ struct mdio_device {
 
        const struct dev_pm_ops *pm_ops;
        struct mii_bus *bus;
+       char modalias[MDIO_NAME_SIZE];
 
        int (*bus_match)(struct device *dev, struct device_driver *drv);
        void (*device_free)(struct mdio_device *mdiodev);
@@ -71,6 +73,7 @@ int mdio_device_register(struct mdio_device *mdiodev);
 void mdio_device_remove(struct mdio_device *mdiodev);
 int mdio_driver_register(struct mdio_driver *drv);
 void mdio_driver_unregister(struct mdio_driver *drv);
+int mdio_device_bus_match(struct device *dev, struct device_driver *drv);
 
 static inline bool mdio_phy_id_is_c45(int phy_id)
 {
@@ -130,6 +133,10 @@ extern int mdio45_nway_restart(const struct mdio_if_info *mdio);
 extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
                                      struct ethtool_cmd *ecmd,
                                      u32 npage_adv, u32 npage_lpa);
+extern void
+mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio,
+                                  struct ethtool_link_ksettings *cmd,
+                                  u32 npage_adv, u32 npage_lpa);
 
 /**
  * mdio45_ethtool_gset - get settings for ETHTOOL_GSET
@@ -147,6 +154,23 @@ static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
        mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0);
 }
 
+/**
+ * mdio45_ethtool_ksettings_get - get settings for ETHTOOL_GLINKSETTINGS
+ * @mdio: MDIO interface
+ * @cmd: Ethtool request structure
+ *
+ * Since the CSRs for auto-negotiation using next pages are not fully
+ * standardised, this function does not attempt to decode them.  Use
+ * mdio45_ethtool_ksettings_get_npage() to specify advertisement bits
+ * from next pages.
+ */
+static inline void
+mdio45_ethtool_ksettings_get(const struct mdio_if_info *mdio,
+                            struct ethtool_link_ksettings *cmd)
+{
+       mdio45_ethtool_ksettings_get_npage(mdio, cmd, 0, 0);
+}
+
 extern int mdio_mii_ioctl(const struct mdio_if_info *mdio,
                          struct mii_ioctl_data *mii_data, int cmd);
 
@@ -244,7 +268,7 @@ bool mdiobus_is_registered_device(struct mii_bus *bus, int addr);
 struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
 
 /**
- * module_mdio_driver() - Helper macro for registering mdio drivers
+ * mdio_module_driver() - Helper macro for registering mdio drivers
  *
  * Helper macro for MDIO drivers which do not do anything special in module
  * init/exit. Each module may only use this macro once, and calling it
index 6533c16e27ad7fb03926286ec94055d26b26f615..7e66e4f62858f395cd000226e9580785b03a4cf1 100644 (file)
@@ -1374,6 +1374,7 @@ int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
                          u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
+int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu);
 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
                           u8 promisc);
 int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
@@ -1539,8 +1540,13 @@ enum mlx4_ptys_proto {
        MLX4_PTYS_EN = 1<<2,
 };
 
+enum mlx4_ptys_flags {
+       MLX4_PTYS_AN_DISABLE_CAP   = 1 << 5,
+       MLX4_PTYS_AN_DISABLE_ADMIN = 1 << 6,
+};
+
 struct mlx4_ptys_reg {
-       u8 resrvd1;
+       u8 flags;
        u8 local_port;
        u8 resrvd2;
        u8 proto_mask;
index 7c3c0d3aca37631b3afb1c69c628adb0ef298f29..95898847c7d4420c70773893c260f0c2cbeef450 100644 (file)
@@ -42,13 +42,13 @@ struct mlx5_core_cq {
        int                     cqe_sz;
        __be32                 *set_ci_db;
        __be32                 *arm_db;
+       struct mlx5_uars_page  *uar;
        atomic_t                refcount;
        struct completion       free;
        unsigned                vector;
        unsigned int            irqn;
        void (*comp)            (struct mlx5_core_cq *);
        void (*event)           (struct mlx5_core_cq *, enum mlx5_event);
-       struct mlx5_uar        *uar;
        u32                     cons_index;
        unsigned                arm_sn;
        struct mlx5_rsc_debug   *dbg;
@@ -144,7 +144,6 @@ enum {
 
 static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
                               void __iomem *uar_page,
-                              spinlock_t *doorbell_lock,
                               u32 cons_index)
 {
        __be32 doorbell[2];
@@ -164,7 +163,7 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
        doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
        doorbell[1] = cpu_to_be32(cq->cqn);
 
-       mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock);
+       mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL);
 }
 
 int mlx5_init_cq_table(struct mlx5_core_dev *dev);
index 52b437431c6a642b04d33da532cfcd722c69fa0f..dd9a263ed368d5476b06a3464bbfada0436cf6e2 100644 (file)
 
 /* insert a value to a struct */
 #define MLX5_SET(typ, p, fld, v) do { \
+       u32 _v = v; \
        BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
        *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
        cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
-                    (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
+                    (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \
                     << __mlx5_dw_bit_off(typ, fld))); \
 } while (0)
 
@@ -212,10 +213,20 @@ enum {
 };
 
 enum {
-       MLX5_BF_REGS_PER_PAGE           = 4,
-       MLX5_MAX_UAR_PAGES              = 1 << 8,
-       MLX5_NON_FP_BF_REGS_PER_PAGE    = 2,
-       MLX5_MAX_UUARS  = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE,
+       MLX5_ADAPTER_PAGE_SHIFT         = 12,
+       MLX5_ADAPTER_PAGE_SIZE          = 1 << MLX5_ADAPTER_PAGE_SHIFT,
+};
+
+enum {
+       MLX5_BFREGS_PER_UAR             = 4,
+       MLX5_MAX_UARS                   = 1 << 8,
+       MLX5_NON_FP_BFREGS_PER_UAR      = 2,
+       MLX5_FP_BFREGS_PER_UAR          = MLX5_BFREGS_PER_UAR -
+                                         MLX5_NON_FP_BFREGS_PER_UAR,
+       MLX5_MAX_BFREGS                 = MLX5_MAX_UARS *
+                                         MLX5_NON_FP_BFREGS_PER_UAR,
+       MLX5_UARS_IN_PAGE               = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
+       MLX5_NON_FP_BFREGS_IN_PAGE      = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
 };
 
 enum {
@@ -279,6 +290,7 @@ enum mlx5_event {
        MLX5_EVENT_TYPE_GPIO_EVENT         = 0x15,
        MLX5_EVENT_TYPE_PORT_MODULE_EVENT  = 0x16,
        MLX5_EVENT_TYPE_REMOTE_CONFIG      = 0x19,
+       MLX5_EVENT_TYPE_PPS_EVENT          = 0x25,
 
        MLX5_EVENT_TYPE_DB_BF_CONGESTION   = 0x1a,
        MLX5_EVENT_TYPE_STALL_EVENT        = 0x1b,
@@ -388,11 +400,6 @@ enum {
        MLX5_MAX_PAGE_SHIFT             = 31
 };
 
-enum {
-       MLX5_ADAPTER_PAGE_SHIFT         = 12,
-       MLX5_ADAPTER_PAGE_SIZE          = 1 << MLX5_ADAPTER_PAGE_SHIFT,
-};
-
 enum {
        MLX5_CAP_OFF_CMDIF_CSUM         = 46,
 };
@@ -534,7 +541,9 @@ struct mlx5_eqe_page_fault {
                        __be16  wqe_index;
                        u16     reserved2;
                        __be16  packet_length;
-                       u8      reserved3[12];
+                       __be32  token;
+                       u8      reserved4[8];
+                       __be32  pftype_wq;
                } __packed wqe;
                struct {
                        __be32  r_key;
@@ -542,9 +551,9 @@ struct mlx5_eqe_page_fault {
                        __be16  packet_length;
                        __be32  rdma_op_len;
                        __be64  rdma_va;
+                       __be32  pftype_token;
                } __packed rdma;
        } __packed;
-       __be32 flags_qpn;
 } __packed;
 
 struct mlx5_eqe_vport_change {
@@ -562,6 +571,22 @@ struct mlx5_eqe_port_module {
        u8        error_type;
 } __packed;
 
+struct mlx5_eqe_pps {
+       u8              rsvd0[3];
+       u8              pin;
+       u8              rsvd1[4];
+       union {
+               struct {
+                       __be32          time_sec;
+                       __be32          time_nsec;
+               };
+               struct {
+                       __be64          time_stamp;
+               };
+       };
+       u8              rsvd2[12];
+} __packed;
+
 union ev_data {
        __be32                          raw[7];
        struct mlx5_eqe_cmd             cmd;
@@ -576,6 +601,7 @@ union ev_data {
        struct mlx5_eqe_page_fault      page_fault;
        struct mlx5_eqe_vport_change    vport_change;
        struct mlx5_eqe_port_module     port_module;
+       struct mlx5_eqe_pps             pps;
 } __packed;
 
 struct mlx5_eqe {
@@ -945,38 +971,54 @@ enum mlx5_cap_type {
        MLX5_CAP_NUM
 };
 
+enum mlx5_pcam_reg_groups {
+       MLX5_PCAM_REGS_5000_TO_507F                 = 0x0,
+};
+
+enum mlx5_pcam_feature_groups {
+       MLX5_PCAM_FEATURE_ENHANCED_FEATURES         = 0x0,
+};
+
+enum mlx5_mcam_reg_groups {
+       MLX5_MCAM_REGS_FIRST_128                    = 0x0,
+};
+
+enum mlx5_mcam_feature_groups {
+       MLX5_MCAM_FEATURE_ENHANCED_FEATURES         = 0x0,
+};
+
 /* GET Dev Caps macros */
 #define MLX5_CAP_GEN(mdev, cap) \
-       MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
+       MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
 
 #define MLX5_CAP_GEN_MAX(mdev, cap) \
-       MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
+       MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
 
 #define MLX5_CAP_ETH(mdev, cap) \
        MLX5_GET(per_protocol_networking_offload_caps,\
-                mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+                mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
 
 #define MLX5_CAP_ETH_MAX(mdev, cap) \
        MLX5_GET(per_protocol_networking_offload_caps,\
-                mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+                mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
 
 #define MLX5_CAP_ROCE(mdev, cap) \
-       MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
+       MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
 
 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
-       MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
+       MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
 
 #define MLX5_CAP_ATOMIC(mdev, cap) \
-       MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
+       MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
 
 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
-       MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
+       MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
 
 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
-       MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+       MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
 
 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
-       MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+       MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
 
 #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
        MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
@@ -998,11 +1040,11 @@ enum mlx5_cap_type {
 
 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
        MLX5_GET(flow_table_eswitch_cap, \
-                mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+                mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
 
 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
        MLX5_GET(flow_table_eswitch_cap, \
-                mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+                mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
 
 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
        MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
@@ -1024,21 +1066,27 @@ enum mlx5_cap_type {
 
 #define MLX5_CAP_ESW(mdev, cap) \
        MLX5_GET(e_switch_cap, \
-                mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
+                mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
 
 #define MLX5_CAP_ESW_MAX(mdev, cap) \
        MLX5_GET(e_switch_cap, \
-                mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
+                mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
 
 #define MLX5_CAP_ODP(mdev, cap)\
-       MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+       MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
 
 #define MLX5_CAP_VECTOR_CALC(mdev, cap) \
        MLX5_GET(vector_calc_cap, \
-                mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap)
+                mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
 
 #define MLX5_CAP_QOS(mdev, cap)\
-       MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
+       MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
+
+#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
+       MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
+
+#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
+       MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
 
 enum {
        MLX5_CMD_STAT_OK                        = 0x0,
@@ -1068,9 +1116,14 @@ enum {
        MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
        MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
        MLX5_PHYSICAL_LAYER_COUNTERS_GROUP    = 0x12,
+       MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
        MLX5_INFINIBAND_PORT_COUNTERS_GROUP   = 0x20,
 };
 
+enum {
+       MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP       = 0x0,
+};
+
 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
 {
        if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
index afc78a3f4462e3f2eb2e350abc010ae39a181e7f..0787de28f2fcd6372226d7e28c8920c1d7a8d6dc 100644 (file)
@@ -68,10 +68,12 @@ static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
 {
        unsigned long flags;
 
-       spin_lock_irqsave(doorbell_lock, flags);
+       if (doorbell_lock)
+               spin_lock_irqsave(doorbell_lock, flags);
        __raw_writel((__force u32) val[0], dest);
        __raw_writel((__force u32) val[1], dest + 4);
-       spin_unlock_irqrestore(doorbell_lock, flags);
+       if (doorbell_lock)
+               spin_unlock_irqrestore(doorbell_lock, flags);
 }
 
 #endif
index 735b36335f297e8babe3f1d2089acd11a735bc43..1bc4641734da943e80af43af5c82e4eda0a62319 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/vmalloc.h>
 #include <linux/radix-tree.h>
 #include <linux/workqueue.h>
+#include <linux/mempool.h>
 #include <linux/interrupt.h>
 
 #include <linux/mlx5/device.h>
@@ -83,6 +84,7 @@ enum {
        MLX5_EQ_VEC_PAGES        = 0,
        MLX5_EQ_VEC_CMD          = 1,
        MLX5_EQ_VEC_ASYNC        = 2,
+       MLX5_EQ_VEC_PFAULT       = 3,
        MLX5_EQ_VEC_COMP_BASE,
 };
 
@@ -119,10 +121,15 @@ enum {
        MLX5_REG_PVLC            = 0x500f,
        MLX5_REG_PCMR            = 0x5041,
        MLX5_REG_PMLP            = 0x5002,
+       MLX5_REG_PCAM            = 0x507f,
        MLX5_REG_NODE_DESC       = 0x6001,
        MLX5_REG_HOST_ENDIANNESS = 0x7004,
        MLX5_REG_MCIA            = 0x9014,
        MLX5_REG_MLCR            = 0x902b,
+       MLX5_REG_MPCNT           = 0x9051,
+       MLX5_REG_MTPPS           = 0x9053,
+       MLX5_REG_MTPPSE          = 0x9054,
+       MLX5_REG_MCAM            = 0x907f,
 };
 
 enum mlx5_dcbx_oper_mode {
@@ -170,6 +177,7 @@ enum mlx5_dev_event {
        MLX5_DEV_EVENT_PKEY_CHANGE,
        MLX5_DEV_EVENT_GUID_CHANGE,
        MLX5_DEV_EVENT_CLIENT_REREG,
+       MLX5_DEV_EVENT_PPS,
 };
 
 enum mlx5_port_status {
@@ -177,36 +185,26 @@ enum mlx5_port_status {
        MLX5_PORT_DOWN      = 2,
 };
 
-struct mlx5_uuar_info {
-       struct mlx5_uar        *uars;
-       int                     num_uars;
-       int                     num_low_latency_uuars;
-       unsigned long          *bitmap;
+enum mlx5_eq_type {
+       MLX5_EQ_TYPE_COMP,
+       MLX5_EQ_TYPE_ASYNC,
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       MLX5_EQ_TYPE_PF,
+#endif
+};
+
+struct mlx5_bfreg_info {
+       u32                    *sys_pages;
+       int                     num_low_latency_bfregs;
        unsigned int           *count;
-       struct mlx5_bf         *bfs;
 
        /*
-        * protect uuar allocation data structs
+        * protect bfreg allocation data structs
         */
        struct mutex            lock;
        u32                     ver;
-};
-
-struct mlx5_bf {
-       void __iomem           *reg;
-       void __iomem           *regreg;
-       int                     buf_size;
-       struct mlx5_uar        *uar;
-       unsigned long           offset;
-       int                     need_lock;
-       /* protect blue flame buffer selection when needed
-        */
-       spinlock_t              lock;
-
-       /* serialize 64 bit writes when done as two 32 bit accesses
-        */
-       spinlock_t              lock32;
-       int                     uuarn;
+       bool                    lib_uar_4k;
+       u32                     num_sys_pages;
 };
 
 struct mlx5_cmd_first {
@@ -332,6 +330,14 @@ struct mlx5_eq_tasklet {
        spinlock_t lock;
 };
 
+struct mlx5_eq_pagefault {
+       struct work_struct       work;
+       /* Pagefaults lock */
+       spinlock_t               lock;
+       struct workqueue_struct *wq;
+       mempool_t               *pool;
+};
+
 struct mlx5_eq {
        struct mlx5_core_dev   *dev;
        __be32 __iomem         *doorbell;
@@ -345,7 +351,13 @@ struct mlx5_eq {
        struct list_head        list;
        int                     index;
        struct mlx5_rsc_debug   *dbg;
-       struct mlx5_eq_tasklet  tasklet_ctx;
+       enum mlx5_eq_type       type;
+       union {
+               struct mlx5_eq_tasklet   tasklet_ctx;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+               struct mlx5_eq_pagefault pf_ctx;
+#endif
+       };
 };
 
 struct mlx5_core_psv {
@@ -369,13 +381,21 @@ struct mlx5_core_sig_ctx {
        u32                     sigerr_count;
 };
 
+enum {
+       MLX5_MKEY_MR = 1,
+       MLX5_MKEY_MW,
+};
+
 struct mlx5_core_mkey {
        u64                     iova;
        u64                     size;
        u32                     key;
        u32                     pd;
+       u32                     type;
 };
 
+#define MLX5_24BIT_MASK                ((1 << 24) - 1)
+
 enum mlx5_res_type {
        MLX5_RES_QP     = MLX5_EVENT_QUEUE_TYPE_QP,
        MLX5_RES_RQ     = MLX5_EVENT_QUEUE_TYPE_RQ,
@@ -410,20 +430,47 @@ struct mlx5_eq_table {
        struct mlx5_eq          pages_eq;
        struct mlx5_eq          async_eq;
        struct mlx5_eq          cmd_eq;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       struct mlx5_eq          pfault_eq;
+#endif
        int                     num_comp_vectors;
        /* protect EQs list
         */
        spinlock_t              lock;
 };
 
-struct mlx5_uar {
-       u32                     index;
-       struct list_head        bf_list;
-       unsigned                free_bf_bmap;
-       void __iomem           *bf_map;
+struct mlx5_uars_page {
        void __iomem           *map;
+       bool                    wc;
+       u32                     index;
+       struct list_head        list;
+       unsigned int            bfregs;
+       unsigned long          *reg_bitmap; /* for non fast path bf regs */
+       unsigned long          *fp_bitmap;
+       unsigned int            reg_avail;
+       unsigned int            fp_avail;
+       struct kref             ref_count;
+       struct mlx5_core_dev   *mdev;
+};
+
+struct mlx5_bfreg_head {
+       /* protect blue flame registers allocations */
+       struct mutex            lock;
+       struct list_head        list;
+};
+
+struct mlx5_bfreg_data {
+       struct mlx5_bfreg_head  reg_head;
+       struct mlx5_bfreg_head  wc_head;
 };
 
+struct mlx5_sq_bfreg {
+       void __iomem           *map;
+       struct mlx5_uars_page  *up;
+       bool                    wc;
+       u32                     index;
+       unsigned int            offset;
+};
 
 struct mlx5_core_health {
        struct health_buffer __iomem   *health;
@@ -496,6 +543,7 @@ struct mlx5_fc_stats {
 
 struct mlx5_eswitch;
 struct mlx5_lag;
+struct mlx5_pagefault;
 
 struct mlx5_rl_entry {
        u32                     rate;
@@ -542,8 +590,6 @@ struct mlx5_priv {
        struct mlx5_eq_table    eq_table;
        struct msix_entry       *msix_arr;
        struct mlx5_irq_info    *irq_info;
-       struct mlx5_uuar_info   uuari;
-       MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock);
 
        /* pages stuff */
        struct workqueue_struct *pg_wq;
@@ -600,6 +646,16 @@ struct mlx5_priv {
        struct mlx5_rl_table            rl_table;
 
        struct mlx5_port_module_event_stats  pme_stats;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       void                  (*pfault)(struct mlx5_core_dev *dev,
+                                       void *context,
+                                       struct mlx5_pagefault *pfault);
+       void                   *pfault_ctx;
+       struct srcu_struct      pfault_srcu;
+#endif
+       struct mlx5_bfreg_data          bfregs;
+       struct mlx5_uars_page          *uar;
 };
 
 enum mlx5_device_state {
@@ -618,13 +674,56 @@ enum mlx5_pci_status {
        MLX5_PCI_STATUS_ENABLED,
 };
 
+enum mlx5_pagefault_type_flags {
+       MLX5_PFAULT_REQUESTOR = 1 << 0,
+       MLX5_PFAULT_WRITE     = 1 << 1,
+       MLX5_PFAULT_RDMA      = 1 << 2,
+};
+
+/* Contains the details of a pagefault. */
+struct mlx5_pagefault {
+       u32                     bytes_committed;
+       u32                     token;
+       u8                      event_subtype;
+       u8                      type;
+       union {
+               /* Initiator or send message responder pagefault details. */
+               struct {
+                       /* Received packet size, only valid for responders. */
+                       u32     packet_size;
+                       /*
+                        * Number of resource holding WQE, depends on type.
+                        */
+                       u32     wq_num;
+                       /*
+                        * WQE index. Refers to either the send queue or
+                        * receive queue, according to event_subtype.
+                        */
+                       u16     wqe_index;
+               } wqe;
+               /* RDMA responder pagefault details */
+               struct {
+                       u32     r_key;
+                       /*
+                        * Received packet size, minimal size page fault
+                        * resolution required for forward progress.
+                        */
+                       u32     packet_size;
+                       u32     rdma_op_len;
+                       u64     rdma_va;
+               } rdma;
+       };
+
+       struct mlx5_eq         *eq;
+       struct work_struct      work;
+};
+
 struct mlx5_td {
        struct list_head tirs_list;
        u32              tdn;
 };
 
 struct mlx5e_resources {
-       struct mlx5_uar            cq_uar;
        u32                        pdn;
        struct mlx5_td             td;
        struct mlx5_core_mkey      mkey;
@@ -639,8 +738,12 @@ struct mlx5_core_dev {
        char                    board_id[MLX5_BOARD_ID_LEN];
        struct mlx5_cmd         cmd;
        struct mlx5_port_caps   port_caps[MLX5_MAX_PORTS];
-       u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
-       u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+       struct {
+               u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+               u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+               u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
+               u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
+       } caps;
        phys_addr_t             iseg_base;
        struct mlx5_init_seg __iomem *iseg;
        enum mlx5_device_state  state;
@@ -814,11 +917,6 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
-int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
-int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari);
-int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar,
-                      bool map_wc);
-void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar);
 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
 int mlx5_health_init(struct mlx5_core_dev *dev);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
@@ -878,15 +976,13 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
 void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
 void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
-#endif
 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
 void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
 int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
-                      int nent, u64 mask, const char *name, struct mlx5_uar *uar);
+                      int nent, u64 mask, const char *name,
+                      enum mlx5_eq_type type);
 int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 int mlx5_start_eqs(struct mlx5_core_dev *dev);
 int mlx5_stop_eqs(struct mlx5_core_dev *dev);
@@ -925,12 +1021,19 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
                        struct mlx5_odp_caps *odp_caps);
 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
                             u8 port_num, void *out, size_t sz);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
+                               u32 wq_num, u8 type, int error);
+#endif
 
 int mlx5_init_rl_table(struct mlx5_core_dev *dev);
 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
 int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
 void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
 bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
+int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
+                    bool map_wc, bool fast_path);
+void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
 
 static inline int fw_initializing(struct mlx5_core_dev *dev)
 {
@@ -958,7 +1061,7 @@ enum {
 };
 
 enum {
-       MAX_MR_CACHE_ENTRIES    = 16,
+       MAX_MR_CACHE_ENTRIES    = 21,
 };
 
 enum {
@@ -973,6 +1076,9 @@ struct mlx5_interface {
        void                    (*detach)(struct mlx5_core_dev *dev, void *context);
        void                    (*event)(struct mlx5_core_dev *dev, void *context,
                                         enum mlx5_dev_event event, unsigned long param);
+       void                    (*pfault)(struct mlx5_core_dev *dev,
+                                         void *context,
+                                         struct mlx5_pagefault *pfault);
        void *                  (*get_dev)(void *context);
        int                     protocol;
        struct list_head        list;
@@ -987,6 +1093,8 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
 bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
+struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
+void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
 
 struct mlx5_profile {
        u64     mask;
index a852e9db6f0d5ec809cfc0e5245d1ee3fff7c36b..afcd4736d8df7b57450e6d94c1b67bea7f55e561 100644 (file)
@@ -328,7 +328,7 @@ struct mlx5_ifc_odp_per_transport_service_cap_bits {
        u8         receive[0x1];
        u8         write[0x1];
        u8         read[0x1];
-       u8         reserved_at_4[0x1];
+       u8         atomic[0x1];
        u8         srq_receive[0x1];
        u8         reserved_at_6[0x1a];
 };
@@ -365,8 +365,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
        u8         ip_protocol[0x8];
        u8         ip_dscp[0x6];
        u8         ip_ecn[0x2];
-       u8         vlan_tag[0x1];
-       u8         reserved_at_91[0x1];
+       u8         cvlan_tag[0x1];
+       u8         svlan_tag[0x1];
        u8         frag[0x1];
        u8         reserved_at_93[0x4];
        u8         tcp_flags[0x9];
@@ -398,9 +398,11 @@ struct mlx5_ifc_fte_match_set_misc_bits {
        u8         inner_second_cfi[0x1];
        u8         inner_second_vid[0xc];
 
-       u8         outer_second_vlan_tag[0x1];
-       u8         inner_second_vlan_tag[0x1];
-       u8         reserved_at_62[0xe];
+       u8         outer_second_cvlan_tag[0x1];
+       u8         inner_second_cvlan_tag[0x1];
+       u8         outer_second_svlan_tag[0x1];
+       u8         inner_second_svlan_tag[0x1];
+       u8         reserved_at_64[0xc];
        u8         gre_protocol[0x10];
 
        u8         gre_key_h[0x18];
@@ -545,7 +547,9 @@ struct mlx5_ifc_e_switch_cap_bits {
 struct mlx5_ifc_qos_cap_bits {
        u8         packet_pacing[0x1];
        u8         esw_scheduling[0x1];
-       u8         reserved_at_2[0x1e];
+       u8         esw_bw_share[0x1];
+       u8         esw_rate_limit[0x1];
+       u8         reserved_at_4[0x1c];
 
        u8         reserved_at_20[0x20];
 
@@ -573,7 +577,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
        u8         lro_cap[0x1];
        u8         lro_psh_flag[0x1];
        u8         lro_time_stamp[0x1];
-       u8         reserved_at_5[0x3];
+       u8         reserved_at_5[0x2];
+       u8         wqe_vlan_insert[0x1];
        u8         self_lb_en_modifiable[0x1];
        u8         reserved_at_9[0x2];
        u8         max_lso_cap[0x5];
@@ -782,11 +787,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         log_max_eq[0x4];
 
        u8         max_indirection[0x8];
-       u8         reserved_at_108[0x1];
+       u8         fixed_buffer_size[0x1];
        u8         log_max_mrw_sz[0x7];
        u8         reserved_at_110[0x2];
        u8         log_max_bsf_list_size[0x6];
-       u8         reserved_at_118[0x2];
+       u8         umr_extended_translation_offset[0x1];
+       u8         null_mkey[0x1];
        u8         log_max_klm_list_size[0x6];
 
        u8         reserved_at_120[0xa];
@@ -799,10 +805,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         reserved_at_150[0xa];
        u8         log_max_ra_res_qp[0x6];
 
-       u8         pad_cap[0x1];
+       u8         end_pad[0x1];
        u8         cc_query_allowed[0x1];
        u8         cc_modify_allowed[0x1];
-       u8         reserved_at_163[0xd];
+       u8         start_pad[0x1];
+       u8         cache_line_128byte[0x1];
+       u8         reserved_at_163[0xb];
        u8         gid_table_size[0x10];
 
        u8         out_of_seq_cnt[0x1];
@@ -823,18 +831,21 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         nic_flow_table[0x1];
        u8         eswitch_flow_table[0x1];
        u8         early_vf_enable[0x1];
-       u8         reserved_at_1a9[0x2];
+       u8         mcam_reg[0x1];
+       u8         pcam_reg[0x1];
        u8         local_ca_ack_delay[0x5];
        u8         port_module_event[0x1];
-       u8         reserved_at_1b0[0x1];
+       u8         reserved_at_1b1[0x1];
        u8         ports_check[0x1];
-       u8         reserved_at_1b2[0x1];
+       u8         reserved_at_1b3[0x1];
        u8         disable_link_up[0x1];
        u8         beacon_led[0x1];
        u8         port_type[0x2];
        u8         num_ports[0x8];
 
-       u8         reserved_at_1c0[0x3];
+       u8         reserved_at_1c0[0x1];
+       u8         pps[0x1];
+       u8         pps_modify[0x1];
        u8         log_max_msg[0x5];
        u8         reserved_at_1c8[0x4];
        u8         max_tc[0x4];
@@ -858,7 +869,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
 
        u8         compact_address_vector[0x1];
        u8         striding_rq[0x1];
-       u8         reserved_at_201[0x2];
+       u8         reserved_at_202[0x2];
        u8         ipoib_basic_offloads[0x1];
        u8         reserved_at_205[0xa];
        u8         drain_sigerr[0x1];
@@ -904,7 +915,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         uc[0x1];
        u8         rc[0x1];
 
-       u8         reserved_at_240[0xa];
+       u8         uar_4k[0x1];
+       u8         reserved_at_241[0x9];
        u8         uar_sz[0x6];
        u8         reserved_at_250[0x8];
        u8         log_pg_sz[0x8];
@@ -996,7 +1008,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         device_frequency_mhz[0x20];
        u8         device_frequency_khz[0x20];
 
-       u8         reserved_at_500[0x80];
+       u8         reserved_at_500[0x20];
+       u8         num_of_uars_per_page[0x20];
+       u8         reserved_at_540[0x40];
 
        u8         reserved_at_580[0x3f];
        u8         cqe_compression[0x1];
@@ -1009,10 +1023,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         rndv_offload_rc[0x1];
        u8         rndv_offload_dc[0x1];
        u8         log_tag_matching_list_sz[0x5];
-       u8         reserved_at_5e8[0x3];
+       u8         reserved_at_5f8[0x3];
        u8         log_max_xrq[0x5];
 
-       u8         reserved_at_5f0[0x200];
+       u8         reserved_at_600[0x200];
 };
 
 enum mlx5_flow_destination_type {
@@ -1375,6 +1389,42 @@ struct mlx5_ifc_phys_layer_cntrs_bits {
        u8         reserved_at_640[0x180];
 };
 
+struct mlx5_ifc_phys_layer_statistical_cntrs_bits {
+       u8         time_since_last_clear_high[0x20];
+
+       u8         time_since_last_clear_low[0x20];
+
+       u8         phy_received_bits_high[0x20];
+
+       u8         phy_received_bits_low[0x20];
+
+       u8         phy_symbol_errors_high[0x20];
+
+       u8         phy_symbol_errors_low[0x20];
+
+       u8         phy_corrected_bits_high[0x20];
+
+       u8         phy_corrected_bits_low[0x20];
+
+       u8         phy_corrected_bits_lane0_high[0x20];
+
+       u8         phy_corrected_bits_lane0_low[0x20];
+
+       u8         phy_corrected_bits_lane1_high[0x20];
+
+       u8         phy_corrected_bits_lane1_low[0x20];
+
+       u8         phy_corrected_bits_lane2_high[0x20];
+
+       u8         phy_corrected_bits_lane2_low[0x20];
+
+       u8         phy_corrected_bits_lane3_high[0x20];
+
+       u8         phy_corrected_bits_lane3_low[0x20];
+
+       u8         reserved_at_200[0x5c0];
+};
+
 struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
        u8         symbol_error_counter[0x10];
 
@@ -1757,6 +1807,30 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
        u8         reserved_at_4c0[0x300];
 };
 
+struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
+       u8         life_time_counter_high[0x20];
+
+       u8         life_time_counter_low[0x20];
+
+       u8         rx_errors[0x20];
+
+       u8         tx_errors[0x20];
+
+       u8         l0_to_recovery_eieos[0x20];
+
+       u8         l0_to_recovery_ts[0x20];
+
+       u8         l0_to_recovery_framing[0x20];
+
+       u8         l0_to_recovery_retrain[0x20];
+
+       u8         crc_error_dllp[0x20];
+
+       u8         crc_error_tlp[0x20];
+
+       u8         reserved_at_140[0x680];
+};
+
 struct mlx5_ifc_cmd_inter_comp_event_bits {
        u8         command_completion_vector[0x20];
 
@@ -2495,6 +2569,7 @@ enum {
        MLX5_MKC_ACCESS_MODE_PA    = 0x0,
        MLX5_MKC_ACCESS_MODE_MTT   = 0x1,
        MLX5_MKC_ACCESS_MODE_KLMS  = 0x2,
+       MLX5_MKC_ACCESS_MODE_KSM   = 0x3,
 };
 
 struct mlx5_ifc_mkc_bits {
@@ -2918,6 +2993,12 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
        struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
        struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
        struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+       struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs;
+       u8         reserved_at_0[0x7c0];
+};
+
+union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
+       struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
        u8         reserved_at_0[0x7c0];
 };
 
@@ -3597,6 +3678,10 @@ struct mlx5_ifc_query_special_contexts_out_bits {
        u8         dump_fill_mkey[0x20];
 
        u8         resd_lkey[0x20];
+
+       u8         null_mkey[0x20];
+
+       u8         reserved_at_a0[0x60];
 };
 
 struct mlx5_ifc_query_special_contexts_in_bits {
@@ -4689,12 +4774,11 @@ struct mlx5_ifc_page_fault_resume_in_bits {
 
        u8         error[0x1];
        u8         reserved_at_41[0x4];
-       u8         rdma[0x1];
-       u8         read_write[0x1];
-       u8         req_res[0x1];
-       u8         qpn[0x18];
+       u8         page_fault_type[0x3];
+       u8         wq_number[0x18];
 
-       u8         reserved_at_60[0x20];
+       u8         reserved_at_60[0x8];
+       u8         token[0x18];
 };
 
 struct mlx5_ifc_nop_out_bits {
@@ -7240,6 +7324,18 @@ struct mlx5_ifc_ppcnt_reg_bits {
        union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
 };
 
+struct mlx5_ifc_mpcnt_reg_bits {
+       u8         reserved_at_0[0x8];
+       u8         pcie_index[0x8];
+       u8         reserved_at_10[0xa];
+       u8         grp[0x6];
+
+       u8         clr[0x1];
+       u8         reserved_at_21[0x1f];
+
+       union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
+};
+
 struct mlx5_ifc_ppad_reg_bits {
        u8         reserved_at_0[0x3];
        u8         single_mac[0x1];
@@ -7469,6 +7565,63 @@ struct mlx5_ifc_peir_reg_bits {
        u8         error_type[0x8];
 };
 
+struct mlx5_ifc_pcam_enhanced_features_bits {
+       u8         reserved_at_0[0x7e];
+
+       u8         ppcnt_discard_group[0x1];
+       u8         ppcnt_statistical_group[0x1];
+};
+
+struct mlx5_ifc_pcam_reg_bits {
+       u8         reserved_at_0[0x8];
+       u8         feature_group[0x8];
+       u8         reserved_at_10[0x8];
+       u8         access_reg_group[0x8];
+
+       u8         reserved_at_20[0x20];
+
+       union {
+               u8         reserved_at_0[0x80];
+       } port_access_reg_cap_mask;
+
+       u8         reserved_at_c0[0x80];
+
+       union {
+               struct mlx5_ifc_pcam_enhanced_features_bits enhanced_features;
+               u8         reserved_at_0[0x80];
+       } feature_cap_mask;
+
+       u8         reserved_at_1c0[0xc0];
+};
+
+struct mlx5_ifc_mcam_enhanced_features_bits {
+       u8         reserved_at_0[0x7f];
+
+       u8         pcie_performance_group[0x1];
+};
+
+struct mlx5_ifc_mcam_reg_bits {
+       u8         reserved_at_0[0x8];
+       u8         feature_group[0x8];
+       u8         reserved_at_10[0x8];
+       u8         access_reg_group[0x8];
+
+       u8         reserved_at_20[0x20];
+
+       union {
+               u8         reserved_at_0[0x80];
+       } mng_access_reg_cap_mask;
+
+       u8         reserved_at_c0[0x80];
+
+       union {
+               struct mlx5_ifc_mcam_enhanced_features_bits enhanced_features;
+               u8         reserved_at_0[0x80];
+       } mng_feature_cap_mask;
+
+       u8         reserved_at_1c0[0x80];
+};
+
 struct mlx5_ifc_pcap_reg_bits {
        u8         reserved_at_0[0x8];
        u8         local_port[0x8];
@@ -7813,6 +7966,60 @@ struct mlx5_ifc_initial_seg_bits {
        u8         reserved_at_80a0[0x17fc0];
 };
 
+struct mlx5_ifc_mtpps_reg_bits {
+       u8         reserved_at_0[0xc];
+       u8         cap_number_of_pps_pins[0x4];
+       u8         reserved_at_10[0x4];
+       u8         cap_max_num_of_pps_in_pins[0x4];
+       u8         reserved_at_18[0x4];
+       u8         cap_max_num_of_pps_out_pins[0x4];
+
+       u8         reserved_at_20[0x24];
+       u8         cap_pin_3_mode[0x4];
+       u8         reserved_at_48[0x4];
+       u8         cap_pin_2_mode[0x4];
+       u8         reserved_at_50[0x4];
+       u8         cap_pin_1_mode[0x4];
+       u8         reserved_at_58[0x4];
+       u8         cap_pin_0_mode[0x4];
+
+       u8         reserved_at_60[0x4];
+       u8         cap_pin_7_mode[0x4];
+       u8         reserved_at_68[0x4];
+       u8         cap_pin_6_mode[0x4];
+       u8         reserved_at_70[0x4];
+       u8         cap_pin_5_mode[0x4];
+       u8         reserved_at_78[0x4];
+       u8         cap_pin_4_mode[0x4];
+
+       u8         reserved_at_80[0x80];
+
+       u8         enable[0x1];
+       u8         reserved_at_101[0xb];
+       u8         pattern[0x4];
+       u8         reserved_at_110[0x4];
+       u8         pin_mode[0x4];
+       u8         pin[0x8];
+
+       u8         reserved_at_120[0x20];
+
+       u8         time_stamp[0x40];
+
+       u8         out_pulse_duration[0x10];
+       u8         out_periodic_adjustment[0x10];
+
+       u8         reserved_at_1a0[0x60];
+};
+
+struct mlx5_ifc_mtppse_reg_bits {
+       u8         reserved_at_0[0x18];
+       u8         pin[0x8];
+       u8         event_arm[0x1];
+       u8         reserved_at_21[0x1b];
+       u8         event_generation_mode[0x4];
+       u8         reserved_at_40[0x40];
+};
+
 union mlx5_ifc_ports_control_registers_document_bits {
        struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
        struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -7845,6 +8052,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
        struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
        struct mlx5_ifc_ppad_reg_bits ppad_reg;
        struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+       struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
        struct mlx5_ifc_pplm_reg_bits pplm_reg;
        struct mlx5_ifc_pplr_reg_bits pplr_reg;
        struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
@@ -7857,6 +8065,8 @@ union mlx5_ifc_ports_control_registers_document_bits {
        struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
        struct mlx5_ifc_slrg_reg_bits slrg_reg;
        struct mlx5_ifc_sltp_reg_bits sltp_reg;
+       struct mlx5_ifc_mtpps_reg_bits mtpps_reg;
+       struct mlx5_ifc_mtppse_reg_bits mtppse_reg;
        u8         reserved_at_0[0x60e0];
 };
 
index 0aacb2a7480d8aaf80d6230603a2eff1b164d3f3..3096370fe8319ec76f823b920ded745a10ebb6fd 100644 (file)
@@ -50,9 +50,6 @@
 #define MLX5_BSF_APPTAG_ESCAPE 0x1
 #define MLX5_BSF_APPREF_ESCAPE 0x2
 
-#define MLX5_QPN_BITS          24
-#define MLX5_QPN_MASK          ((1 << MLX5_QPN_BITS) - 1)
-
 enum mlx5_qp_optpar {
        MLX5_QP_OPTPAR_ALT_ADDR_PATH            = 1 << 0,
        MLX5_QP_OPTPAR_RRE                      = 1 << 1,
@@ -215,6 +212,7 @@ struct mlx5_wqe_ctrl_seg {
 #define MLX5_WQE_CTRL_OPCODE_MASK 0xff
 #define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
+#define MLX5_WQE_AV_EXT 0x80000000
 
 enum {
        MLX5_ETH_WQE_L3_INNER_CSUM      = 1 << 4,
@@ -223,14 +221,26 @@ enum {
        MLX5_ETH_WQE_L4_CSUM            = 1 << 7,
 };
 
+enum {
+       MLX5_ETH_WQE_INSERT_VLAN        = 1 << 15,
+};
+
 struct mlx5_wqe_eth_seg {
        u8              rsvd0[4];
        u8              cs_flags;
        u8              rsvd1;
        __be16          mss;
        __be32          rsvd2;
-       __be16          inline_hdr_sz;
-       u8              inline_hdr_start[2];
+       union {
+               struct {
+                       __be16 sz;
+                       u8     start[2];
+               } inline_hdr;
+               struct {
+                       __be16 type;
+                       __be16 vlan_tci;
+               } insert;
+       };
 };
 
 struct mlx5_wqe_xrc_seg {
@@ -245,6 +255,23 @@ struct mlx5_wqe_masked_atomic_seg {
        __be64                  compare_mask;
 };
 
+struct mlx5_base_av {
+       union {
+               struct {
+                       __be32  qkey;
+                       __be32  reserved;
+               } qkey;
+               __be64  dc_key;
+       } key;
+       __be32  dqp_dct;
+       u8      stat_rate_sl;
+       u8      fl_mlid;
+       union {
+               __be16  rlid;
+               __be16  udp_sport;
+       };
+};
+
 struct mlx5_av {
        union {
                struct {
@@ -292,10 +319,14 @@ struct mlx5_wqe_data_seg {
 struct mlx5_wqe_umr_ctrl_seg {
        u8              flags;
        u8              rsvd0[3];
-       __be16          klm_octowords;
-       __be16          bsf_octowords;
+       __be16          xlt_octowords;
+       union {
+               __be16  xlt_offset;
+               __be16  bsf_octowords;
+       };
        __be64          mkey_mask;
-       u8              rsvd1[32];
+       __be32          xlt_offset_47_16;
+       u8              rsvd1[28];
 };
 
 struct mlx5_seg_set_psv {
@@ -389,6 +420,10 @@ struct mlx5_bsf {
        struct mlx5_bsf_inl     m_inl;
 };
 
+struct mlx5_mtt {
+       __be64          ptag;
+};
+
 struct mlx5_klm {
        __be32          bcount;
        __be32          key;
@@ -410,46 +445,9 @@ struct mlx5_stride_block_ctrl_seg {
        __be16          num_entries;
 };
 
-enum mlx5_pagefault_flags {
-       MLX5_PFAULT_REQUESTOR = 1 << 0,
-       MLX5_PFAULT_WRITE     = 1 << 1,
-       MLX5_PFAULT_RDMA      = 1 << 2,
-};
-
-/* Contains the details of a pagefault. */
-struct mlx5_pagefault {
-       u32                     bytes_committed;
-       u8                      event_subtype;
-       enum mlx5_pagefault_flags flags;
-       union {
-               /* Initiator or send message responder pagefault details. */
-               struct {
-                       /* Received packet size, only valid for responders. */
-                       u32     packet_size;
-                       /*
-                        * WQE index. Refers to either the send queue or
-                        * receive queue, according to event_subtype.
-                        */
-                       u16     wqe_index;
-               } wqe;
-               /* RDMA responder pagefault details */
-               struct {
-                       u32     r_key;
-                       /*
-                        * Received packet size, minimal size page fault
-                        * resolution required for forward progress.
-                        */
-                       u32     packet_size;
-                       u32     rdma_op_len;
-                       u64     rdma_va;
-               } rdma;
-       };
-};
-
 struct mlx5_core_qp {
        struct mlx5_core_rsc_common     common; /* must be first */
        void (*event)           (struct mlx5_core_qp *, int);
-       void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
        int                     qpn;
        struct mlx5_rsc_debug   *dbg;
        int                     pid;
@@ -549,10 +547,6 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev);
 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
-                               u8 context, int error);
-#endif
 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
                                struct mlx5_core_qp *rq);
 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
index ec35157ea7252c587894ff4f94d406f50b7aeb66..656c70b65dd270a137f43106d993f1113fef73ab 100644 (file)
@@ -51,6 +51,7 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
                                     u16 vport, u8 *addr);
 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
                                    u16 vport, u8 *min_inline);
+void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
                                     u16 vport, u8 min_inline);
 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
index d43ef96bf0753ce5d11c615d91b81a8298afc69a..71b113e1223fb1dfb7692e51ec736067c56e97ae 100644 (file)
@@ -36,6 +36,7 @@
 #define SDIO_DEVICE_ID_BROADCOM_43362          0xa962
 #define SDIO_DEVICE_ID_BROADCOM_43430          0xa9a6
 #define SDIO_DEVICE_ID_BROADCOM_4345           0x4345
+#define SDIO_DEVICE_ID_BROADCOM_43455          0xa9bf
 #define SDIO_DEVICE_ID_BROADCOM_4354           0x4354
 #define SDIO_DEVICE_ID_BROADCOM_4356           0x4356
 
index 8a57f0b1242d741ef0ebab33cab628773a3e289f..8850fcaf50dba05d9440b958c5d95cd9330e727a 100644 (file)
@@ -501,6 +501,7 @@ struct platform_device_id {
        kernel_ulong_t driver_data;
 };
 
+#define MDIO_NAME_SIZE         32
 #define MDIO_MODULE_PREFIX     "mdio:"
 
 #define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
index e5fb81376e92ff2e1feac6130f07e7af7d9ca003..d7f63339ef0b48e7321954ea2fd083d3da2478d6 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/in.h>
 #include <linux/pim.h>
+#include <linux/rhashtable.h>
 #include <net/sock.h>
 #include <uapi/linux/mroute.h>
 
@@ -60,7 +61,6 @@ struct vif_device {
 #define VIFF_STATIC 0x8000
 
 #define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
-#define MFC_LINES 64
 
 struct mr_table {
        struct list_head        list;
@@ -69,8 +69,9 @@ struct mr_table {
        struct sock __rcu       *mroute_sk;
        struct timer_list       ipmr_expire_timer;
        struct list_head        mfc_unres_queue;
-       struct list_head        mfc_cache_array[MFC_LINES];
        struct vif_device       vif_table[MAXVIFS];
+       struct rhltable         mfc_hash;
+       struct list_head        mfc_cache_list;
        int                     maxvif;
        atomic_t                cache_resolve_queue_len;
        bool                    mroute_do_assert;
@@ -85,17 +86,48 @@ enum {
        MFC_STATIC = BIT(0),
 };
 
+struct mfc_cache_cmp_arg {
+       __be32 mfc_mcastgrp;
+       __be32 mfc_origin;
+};
+
+/**
+ * struct mfc_cache - multicast routing entries
+ * @mnode: rhashtable list
+ * @mfc_mcastgrp: destination multicast group address
+ * @mfc_origin: source address
+ * @cmparg: used for rhashtable comparisons
+ * @mfc_parent: source interface (iif)
+ * @mfc_flags: entry flags
+ * @expires: unresolved entry expire time
+ * @unresolved: unresolved cached skbs
+ * @last_assert: time of last assert
+ * @minvif: minimum VIF id
+ * @maxvif: maximum VIF id
+ * @bytes: bytes that have passed for this entry
+ * @pkt: packets that have passed for this entry
+ * @wrong_if: number of wrong source interface hits
+ * @lastuse: time of last use of the group (traffic or update)
+ * @ttls: OIF TTL threshold array
+ * @list: global entry list
+ * @rcu: used for entry destruction
+ */
 struct mfc_cache {
-       struct list_head list;
-       __be32 mfc_mcastgrp;                    /* Group the entry belongs to   */
-       __be32 mfc_origin;                      /* Source of packet             */
-       vifi_t mfc_parent;                      /* Source interface             */
-       int mfc_flags;                          /* Flags on line                */
+       struct rhlist_head mnode;
+       union {
+               struct {
+                       __be32 mfc_mcastgrp;
+                       __be32 mfc_origin;
+               };
+               struct mfc_cache_cmp_arg cmparg;
+       };
+       vifi_t mfc_parent;
+       int mfc_flags;
 
        union {
                struct {
                        unsigned long expires;
-                       struct sk_buff_head unresolved; /* Unresolved buffers           */
+                       struct sk_buff_head unresolved;
                } unres;
                struct {
                        unsigned long last_assert;
@@ -105,20 +137,15 @@ struct mfc_cache {
                        unsigned long pkt;
                        unsigned long wrong_if;
                        unsigned long lastuse;
-                       unsigned char ttls[MAXVIFS];    /* TTL thresholds               */
+                       unsigned char ttls[MAXVIFS];
                } res;
        } mfc_un;
+       struct list_head list;
        struct rcu_head rcu;
 };
 
-#ifdef __BIG_ENDIAN
-#define MFC_HASH(a,b)  (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1))
-#else
-#define MFC_HASH(a,b)  ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1))
-#endif
-
 struct rtmsg;
 int ipmr_get_route(struct net *net, struct sk_buff *skb,
                   __be32 saddr, __be32 daddr,
-                  struct rtmsg *rtm, int nowait, u32 portid);
+                  struct rtmsg *rtm, u32 portid);
 #endif
index 19a1c0c2993b908a9f9a897f44b7d035c0e5fc26..ce44e3e96d2763cf766058c88680a15215a37084 100644 (file)
@@ -116,7 +116,7 @@ struct mfc6_cache {
 
 struct rtmsg;
 extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
-                          struct rtmsg *rtm, int nowait, u32 portid);
+                          struct rtmsg *rtm, u32 portid);
 
 #ifdef CONFIG_IPV6_MROUTE
 extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
index 9c6c8ef2e9e704513cc4272b0a3ee2fec6809d46..9a0419594e842ca00a5ecfca53823b38bad207bb 100644 (file)
@@ -71,7 +71,6 @@ enum {
        NETIF_F_HW_VLAN_STAG_RX_BIT,    /* Receive VLAN STAG HW acceleration */
        NETIF_F_HW_VLAN_STAG_FILTER_BIT,/* Receive filtering on VLAN STAGs */
        NETIF_F_HW_L2FW_DOFFLOAD_BIT,   /* Allow L2 Forwarding in Hardware */
-       NETIF_F_BUSY_POLL_BIT,          /* Busy poll */
 
        NETIF_F_HW_TC_BIT,              /* Offload TC infrastructure */
 
@@ -134,7 +133,6 @@ enum {
 #define NETIF_F_HW_VLAN_STAG_RX        __NETIF_F(HW_VLAN_STAG_RX)
 #define NETIF_F_HW_VLAN_STAG_TX        __NETIF_F(HW_VLAN_STAG_TX)
 #define NETIF_F_HW_L2FW_DOFFLOAD       __NETIF_F(HW_L2FW_DOFFLOAD)
-#define NETIF_F_BUSY_POLL      __NETIF_F(BUSY_POLL)
 #define NETIF_F_HW_TC          __NETIF_F(HW_TC)
 
 #define for_each_netdev_feature(mask_addr, bit)        \
index 27914672602d9d573e6a3da271cec33ccef51b16..f40f0ab3847a8caaf46bd4d5f224c65014f501cc 100644 (file)
@@ -352,6 +352,7 @@ enum gro_result {
        GRO_HELD,
        GRO_NORMAL,
        GRO_DROP,
+       GRO_CONSUMED,
 };
 typedef enum gro_result gro_result_t;
 
@@ -463,7 +464,6 @@ static inline bool napi_reschedule(struct napi_struct *napi)
        return false;
 }
 
-bool __napi_complete(struct napi_struct *n);
 bool napi_complete_done(struct napi_struct *n, int work_done);
 /**
  *     napi_complete - NAPI processing complete
@@ -917,8 +917,8 @@ struct netdev_xdp {
  *     Callback used when the transmitter has not made any progress
  *     for dev->watchdog ticks.
  *
- * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
- *                      struct rtnl_link_stats64 *storage);
+ * void (*ndo_get_stats64)(struct net_device *dev,
+ *                         struct rtnl_link_stats64 *storage);
  * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
  *     Called when a user wants to get the network device usage
  *     statistics. Drivers must do one of the following:
@@ -968,11 +968,12 @@ struct netdev_xdp {
  *      with PF and querying it may introduce a theoretical security risk.
  * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
  * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
- * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
- *     Called to setup 'tc' number of traffic classes in the net device. This
- *     is always called from the stack with the rtnl lock held and netif tx
- *     queues stopped. This allows the netdevice to perform queue management
- *     safely.
+ * int (*ndo_setup_tc)(struct net_device *dev, u32 handle,
+ *                    __be16 protocol, struct tc_to_netdev *tc);
+ *     Called to setup any 'tc' scheduler, classifier or action on @dev.
+ *     This is always called from the stack with the rtnl lock held and netif
+ *     tx queues stopped. This allows the netdevice to perform queue
+ *     management safely.
  *
  *     Fiber Channel over Ethernet (FCoE) offload functions.
  * int (*ndo_fcoe_enable)(struct net_device *dev);
@@ -1166,8 +1167,8 @@ struct net_device_ops {
                                                   struct neigh_parms *);
        void                    (*ndo_tx_timeout) (struct net_device *dev);
 
-       struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
-                                                    struct rtnl_link_stats64 *storage);
+       void                    (*ndo_get_stats64)(struct net_device *dev,
+                                                  struct rtnl_link_stats64 *storage);
        bool                    (*ndo_has_offload_stats)(const struct net_device *dev, int attr_id);
        int                     (*ndo_get_offload_stats)(int attr_id,
                                                         const struct net_device *dev,
@@ -1183,9 +1184,6 @@ struct net_device_ops {
        int                     (*ndo_netpoll_setup)(struct net_device *dev,
                                                     struct netpoll_info *info);
        void                    (*ndo_netpoll_cleanup)(struct net_device *dev);
-#endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       int                     (*ndo_busy_poll)(struct napi_struct *dev);
 #endif
        int                     (*ndo_set_vf_mac)(struct net_device *dev,
                                                  int queue, u8 *mac);
@@ -1553,7 +1551,6 @@ enum netdev_priv_flags {
  *     @ax25_ptr:      AX.25 specific data
  *     @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
  *
- *     @last_rx:       Time of last Rx
  *     @dev_addr:      Hw address (before bcast,
  *                     because most packets are unicast)
  *
@@ -1780,8 +1777,6 @@ struct net_device {
 /*
  * Cache lines mostly used on receive path (including eth_type_trans())
  */
-       unsigned long           last_rx;
-
        /* Interface address info used in eth_type_trans() */
        unsigned char           *dev_addr;
 
@@ -1871,8 +1866,12 @@ struct net_device {
                struct pcpu_vstats __percpu             *vstats;
        };
 
+#if IS_ENABLED(CONFIG_GARP)
        struct garp_port __rcu  *garp_port;
+#endif
+#if IS_ENABLED(CONFIG_MRP)
        struct mrp_port __rcu   *mrp_port;
+#endif
 
        struct device           dev;
        const struct attribute_group *sysfs_groups[4];
@@ -2669,6 +2668,19 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
        remcsum_unadjust((__sum16 *)ptr, grc->delta);
 }
 
+#ifdef CONFIG_XFRM_OFFLOAD
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
+{
+       if (PTR_ERR(pp) != -EINPROGRESS)
+               NAPI_GRO_CB(skb)->flush |= flush;
+}
+#else
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
+{
+       NAPI_GRO_CB(skb)->flush |= flush;
+}
+#endif
+
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
                                  unsigned short type,
                                  const void *daddr, const void *saddr,
@@ -3111,7 +3123,19 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev,
        return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
 }
 
-void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
+/**
+ *     netif_wake_subqueue - allow sending packets on subqueue
+ *     @dev: network device
+ *     @queue_index: sub queue index
+ *
+ * Resume individual transmit queue of a device with multiple transmit queues.
+ */
+static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
+{
+       struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+
+       netif_tx_wake_queue(txq);
+}
 
 #ifdef CONFIG_XPS
 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
@@ -3805,6 +3829,10 @@ void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
 extern int             netdev_max_backlog;
 extern int             netdev_tstamp_prequeue;
 extern int             weight_p;
+extern int             dev_weight_rx_bias;
+extern int             dev_weight_tx_bias;
+extern int             dev_rx_weight;
+extern int             dev_tx_weight;
 
 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
@@ -3882,10 +3910,6 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
                                   struct net_device *lower_dev);
 void netdev_lower_state_changed(struct net_device *lower_dev,
                                void *lower_state_info);
-int netdev_default_l2upper_neigh_construct(struct net_device *dev,
-                                          struct neighbour *n);
-void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
-                                         struct neighbour *n);
 
 /* RSS keys are 40 or 52 bytes long */
 #define NETDEV_RSS_KEY_LEN 52
@@ -4338,6 +4362,15 @@ do {                                                             \
 })
 #endif
 
+/* if @cond then downgrade to debug, else print at @level */
+#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...)     \
+       do {                                                              \
+               if (cond)                                                 \
+                       netif_dbg(priv, type, netdev, fmt, ##args);       \
+               else                                                      \
+                       netif_ ## level(priv, type, netdev, fmt, ##args); \
+       } while (0)
+
 #if defined(VERBOSE_DEBUG)
 #define netif_vdbg     netif_dbg
 #else
index 1d82dd5e9a08ada113978fc9752585d62dfcba98..1b49209dd5c7cd74644624007ce57df107c3744b 100644 (file)
@@ -28,6 +28,7 @@ struct nfnetlink_subsystem {
        const struct nfnl_callback *cb; /* callback for individual types */
        int (*commit)(struct net *net, struct sk_buff *skb);
        int (*abort)(struct net *net, struct sk_buff *skb);
+       bool (*valid_genid)(struct net *net, u32 genid);
 };
 
 int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
index 5117e4d2ddfa0739317ca468db162d1c8d8829a6..be378cf47fcc93fa2c89c6cd870f0a9fecd65211 100644 (file)
@@ -167,6 +167,7 @@ struct xt_match {
 
        const char *table;
        unsigned int matchsize;
+       unsigned int usersize;
 #ifdef CONFIG_COMPAT
        unsigned int compatsize;
 #endif
@@ -207,6 +208,7 @@ struct xt_target {
 
        const char *table;
        unsigned int targetsize;
+       unsigned int usersize;
 #ifdef CONFIG_COMPAT
        unsigned int compatsize;
 #endif
@@ -287,6 +289,13 @@ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
 int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
                    bool inv_proto);
 
+int xt_match_to_user(const struct xt_entry_match *m,
+                    struct xt_entry_match __user *u);
+int xt_target_to_user(const struct xt_entry_target *t,
+                     struct xt_entry_target __user *u);
+int xt_data_to_user(void __user *dst, const void *src,
+                   int usersize, int size);
+
 void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
                                 struct xt_counters_info *info, bool compat);
 
diff --git a/include/linux/parman.h b/include/linux/parman.h
new file mode 100644 (file)
index 0000000..3c8cccc
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * include/linux/parman.h - Manager for linear priority array areas
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PARMAN_H
+#define _PARMAN_H
+
+#include <linux/list.h>
+
+enum parman_algo_type {
+       PARMAN_ALGO_TYPE_LSORT,
+};
+
+struct parman_item {
+       struct list_head list;
+       unsigned long index;
+};
+
+struct parman_prio {
+       struct list_head list;
+       struct list_head item_list;
+       unsigned long priority;
+};
+
+struct parman_ops {
+       unsigned long base_count;
+       unsigned long resize_step;
+       int (*resize)(void *priv, unsigned long new_count);
+       void (*move)(void *priv, unsigned long from_index,
+                    unsigned long to_index, unsigned long count);
+       enum parman_algo_type algo;
+};
+
+struct parman;
+
+struct parman *parman_create(const struct parman_ops *ops, void *priv);
+void parman_destroy(struct parman *parman);
+void parman_prio_init(struct parman *parman, struct parman_prio *prio,
+                     unsigned long priority);
+void parman_prio_fini(struct parman_prio *prio);
+int parman_item_add(struct parman *parman, struct parman_prio *prio,
+                   struct parman_item *item);
+void parman_item_remove(struct parman *parman, struct parman_prio *prio,
+                       struct parman_item *item);
+
+#endif
index e2d1a124216a9c36b580e68a6c79607cd6686215..adbc859fe7c4c0abd2cea97a312d515e83695af9 100644 (file)
@@ -885,7 +885,6 @@ void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
 void pci_sort_breadthfirst(void);
 #define dev_is_pci(d) ((d)->bus == &pci_bus_type)
 #define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
-#define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
 
 /* Generic PCI functions exported to card drivers */
 
@@ -1630,7 +1629,6 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
 
 #define dev_is_pci(d) (false)
 #define dev_is_pf(d) (false)
-#define dev_num_vf(d) (0)
 #endif /* CONFIG_PCI */
 
 /* Include architecture-dependent settings and functions */
index 7fc1105605bfd2334dab7fd6e96f25a2c33ed566..d9bdf53e05141f9625df423533fa7c6cef926e53 100644 (file)
@@ -81,6 +81,9 @@ typedef enum {
        PHY_INTERFACE_MODE_MOCA,
        PHY_INTERFACE_MODE_QSGMII,
        PHY_INTERFACE_MODE_TRGMII,
+       PHY_INTERFACE_MODE_1000BASEX,
+       PHY_INTERFACE_MODE_2500BASEX,
+       PHY_INTERFACE_MODE_RXAUI,
        PHY_INTERFACE_MODE_MAX,
 } phy_interface_t;
 
@@ -141,6 +144,12 @@ static inline const char *phy_modes(phy_interface_t interface)
                return "qsgmii";
        case PHY_INTERFACE_MODE_TRGMII:
                return "trgmii";
+       case PHY_INTERFACE_MODE_1000BASEX:
+               return "1000base-x";
+       case PHY_INTERFACE_MODE_2500BASEX:
+               return "2500base-x";
+       case PHY_INTERFACE_MODE_RXAUI:
+               return "rxaui";
        default:
                return "unknown";
        }
@@ -157,11 +166,7 @@ static inline const char *phy_modes(phy_interface_t interface)
 /* Used when trying to connect to a specific phy (mii bus id:phy device id) */
 #define PHY_ID_FMT "%s:%02x"
 
-/*
- * Need to be a little smaller than phydev->dev.bus_id to leave room
- * for the ":%02x"
- */
-#define MII_BUS_ID_SIZE        (20 - 3)
+#define MII_BUS_ID_SIZE        61
 
 /* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
    IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */
@@ -631,7 +636,7 @@ struct phy_driver {
 /* A Structure for boards to register fixups with the PHY Lib */
 struct phy_fixup {
        struct list_head list;
-       char bus_id[20];
+       char bus_id[MII_BUS_ID_SIZE + 3];
        u32 phy_uid;
        u32 phy_uid_mask;
        int (*run)(struct phy_device *phydev);
@@ -881,6 +886,25 @@ void mdio_bus_exit(void);
 
 extern struct bus_type mdio_bus_type;
 
+struct mdio_board_info {
+       const char      *bus_id;
+       char            modalias[MDIO_NAME_SIZE];
+       int             mdio_addr;
+       const void      *platform_data;
+};
+
+#if IS_ENABLED(CONFIG_PHYLIB)
+int mdiobus_register_board_info(const struct mdio_board_info *info,
+                               unsigned int n);
+#else
+static inline int mdiobus_register_board_info(const struct mdio_board_info *i,
+                                             unsigned int n)
+{
+       return 0;
+}
+#endif
+
+
 /**
  * module_phy_driver() - Helper macro for registering PHY drivers
  * @__phy_drivers: array of PHY drivers to register
index 734deb09461894c4df468e92c9168aff79444c4f..c33080baf38c64fbe1177a69aba8b84aa08a4f48 100644 (file)
@@ -1,10 +1,35 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2016  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
+
 #ifndef _COMMON_HSI_H
 #define _COMMON_HSI_H
 #include <linux/types.h>
index 1aa0727c413628f5188160b2695b7c9a6757f978..4b402fb0eaad5fdf7bd29221d95596f092c0e945 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef __ETH_COMMON__
index 8f64b1223c2f84022b805ee9c96ce2ff9dc5a36b..4c5747babcf63ff32b8db664146df40545d986d2 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef __ISCSI_COMMON__
index 37dfba101c6cd8e5f05ca4fda6db321900dd9ccc..5cd7a4608c9b7699ee649aeafda7f5953434c24d 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_CHAIN_H
index 7a52f7c58c37ca1942ab29388ad9110282fd5480..4cd1f0ccfa367a5215dd121ca22efa6267e7c51e 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_ETH_IF_H
@@ -53,7 +77,7 @@ struct qed_dev_eth_info {
 };
 
 struct qed_update_vport_rss_params {
-       u16     rss_ind_table[128];
+       void    *rss_ind_table[128];
        u32     rss_key[10];
        u8      rss_caps;
 };
@@ -72,6 +96,7 @@ struct qed_update_vport_params {
 
 struct qed_start_vport_params {
        bool remove_inner_vlan;
+       bool handle_ptp_pkts;
        bool gro_enable;
        bool drop_ttl0;
        u8 vport_id;
@@ -135,6 +160,15 @@ struct qed_eth_cb_ops {
        void (*force_mac) (void *dev, u8 *mac, bool forced);
 };
 
+#define QED_MAX_PHC_DRIFT_PPB   291666666
+
+enum qed_ptp_filter_type {
+       QED_PTP_FILTER_L2,
+       QED_PTP_FILTER_IPV4,
+       QED_PTP_FILTER_IPV4_IPV6,
+       QED_PTP_FILTER_L2_IPV4_IPV6
+};
+
 #ifdef CONFIG_DCB
 /* Prototype declaration of qed_eth_dcbnl_ops should match with the declaration
  * of dcbnl_rtnl_ops structure.
@@ -194,6 +228,17 @@ struct qed_eth_dcbnl_ops {
 };
 #endif
 
+struct qed_eth_ptp_ops {
+       int (*hwtstamp_tx_on)(struct qed_dev *);
+       int (*cfg_rx_filters)(struct qed_dev *, enum qed_ptp_filter_type);
+       int (*read_rx_ts)(struct qed_dev *, u64 *);
+       int (*read_tx_ts)(struct qed_dev *, u64 *);
+       int (*read_cc)(struct qed_dev *, u64 *);
+       int (*disable)(struct qed_dev *);
+       int (*adjfreq)(struct qed_dev *, s32);
+       int (*enable)(struct qed_dev *);
+};
+
 struct qed_eth_ops {
        const struct qed_common_ops *common;
 #ifdef CONFIG_QED_SRIOV
@@ -202,6 +247,7 @@ struct qed_eth_ops {
 #ifdef CONFIG_DCB
        const struct qed_eth_dcbnl_ops *dcb;
 #endif
+       const struct qed_eth_ptp_ops *ptp;
 
        int (*fill_dev_info)(struct qed_dev *cdev,
                             struct qed_dev_eth_info *info);
index 4b454f4f5b2511b71ba0c5815da51a20dfbef48a..d1576a2bcfc921128b24b4b76ecd02a9aed509a6 100644 (file)
@@ -1,10 +1,33 @@
 /* QLogic qed NIC Driver
- *
- * Copyright (c) 2015 QLogic Corporation
- *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * Copyright (c) 2015-2017  QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_IF_H
index 5a4f8d0899e9d94f9cf87e527103b739e3bb7391..ac2e6a3199a36c88eca3e8126bab85500e37fd98 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_IOV_IF_H
@@ -29,6 +53,8 @@ struct qed_iov_hv_ops {
 
        int (*set_rate) (struct qed_dev *cdev, int vfid,
                         u32 min_rate, u32 max_rate);
+
+       int (*set_trust) (struct qed_dev *cdev, int vfid, bool trust);
 };
 
 #endif
index d27912480cb3f7a30272845b29a7fb9515b51661..f70bb81b8b6acfda1701ffcd073163db05a82770 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_ISCSI_IF_H
index fd75c265dba3c4df87616bed15b23549a20eee48..4fb4666ea879c6e8f8b9da42cce9f8217e1681d9 100644 (file)
@@ -1,10 +1,33 @@
 /* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * Copyright (c) 2015 QLogic Corporation
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef _QED_LL2_IF_H
index 53047d3fa6781ea71a0bcad7dcc089537ecb5a8e..f742d4312c9d96f1498554edee09654e065e9e74 100644 (file)
@@ -1,5 +1,5 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015-2016  QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index f48d64b0e2fb943a492981f30f3851ed77103711..3b8dd551a98caf7185ed2ab9b797088a9bdcba02 100644 (file)
@@ -1,5 +1,5 @@
 /* QLogic qedr NIC Driver
- * Copyright (c) 2015-2016  QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 7663725faa94c496eb9f648859d5efa1aa58361e..f773aa5e746ff47bb886aa19f568a24108dd0d1e 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef __RDMA_COMMON__
index 2eeaf3dc66464838cef8ee376be4b557e5bc1e6e..bad02df213dfccd11cd25fa1b1e0decaf17f92c6 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef __ROCE_COMMON__
index 3b8e1efd9bc2c47efb2158ebae5d21575b372b20..03f3e37ab059d5e4b48aa2b7f80366016b27fdf5 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef __STORAGE_COMMON__
index dc3889d1bbe6f0cf2add58b82241bd8529f89109..46fe7856f1b22c828474257ceedf03c182958ec5 100644 (file)
@@ -1,9 +1,33 @@
 /* QLogic qed NIC Driver
- * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2015-2017  QLogic Corporation
  *
- * This software is available under the terms of the GNU General Public License
- * (GPL) Version 2, available from the file COPYING in the main directory of
- * this source tree.
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and /or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
  */
 
 #ifndef __TCP_COMMON__
diff --git a/include/linux/rfkill-regulator.h b/include/linux/rfkill-regulator.h
deleted file mode 100644 (file)
index aca36bc..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * rfkill-regulator.c - Regulator consumer driver for rfkill
- *
- * Copyright (C) 2009  Guiming Zhuo <gmzhuo@gmail.com>
- * Copyright (C) 2011  Antonio Ospite <ospite@studenti.unina.it>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifndef __LINUX_RFKILL_REGULATOR_H
-#define __LINUX_RFKILL_REGULATOR_H
-
-/*
- * Use "vrfkill" as supply id when declaring the regulator consumer:
- *
- * static struct regulator_consumer_supply pcap_regulator_V6_consumers [] = {
- *     { .dev_name = "rfkill-regulator.0", .supply = "vrfkill" },
- * };
- *
- * If you have several regulator driven rfkill, you can append a numerical id to
- * .dev_name as done above, and use the same id when declaring the platform
- * device:
- *
- * static struct rfkill_regulator_platform_data ezx_rfkill_bt_data = {
- *     .name  = "ezx-bluetooth",
- *     .type  = RFKILL_TYPE_BLUETOOTH,
- * };
- *
- * static struct platform_device a910_rfkill = {
- *     .name  = "rfkill-regulator",
- *     .id    = 0,
- *     .dev   = {
- *             .platform_data = &ezx_rfkill_bt_data,
- *     },
- * };
- */
-
-#include <linux/rfkill.h>
-
-struct rfkill_regulator_platform_data {
-       char *name;             /* the name for the rfkill switch */
-       enum rfkill_type type;  /* the type as specified in rfkill.h */
-};
-
-#endif /* __LINUX_RFKILL_REGULATOR_H */
index 5c132d3188be8f2ead11a6c71c75e8bde6f4d27c..f2e12a8459100e5d62ff634a2984e3b15612cf4e 100644 (file)
@@ -61,6 +61,7 @@ struct rhlist_head {
 /**
  * struct bucket_table - Table of hash buckets
  * @size: Number of hash buckets
+ * @nest: Number of bits of first-level nested table.
  * @rehash: Current bucket being rehashed
  * @hash_rnd: Random seed to fold into hash
  * @locks_mask: Mask to apply before accessing locks[]
@@ -68,10 +69,12 @@ struct rhlist_head {
  * @walkers: List of active walkers
  * @rcu: RCU structure for freeing the table
  * @future_tbl: Table under construction during rehashing
+ * @ntbl: Nested table used when out of memory.
  * @buckets: size * hash buckets
  */
 struct bucket_table {
        unsigned int            size;
+       unsigned int            nest;
        unsigned int            rehash;
        u32                     hash_rnd;
        unsigned int            locks_mask;
@@ -81,7 +84,7 @@ struct bucket_table {
 
        struct bucket_table __rcu *future_tbl;
 
-       struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
+       struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
 };
 
 /**
@@ -374,6 +377,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
                                 void *arg);
 void rhashtable_destroy(struct rhashtable *ht);
 
+struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
+                                           unsigned int hash);
+struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
+                                                  struct bucket_table *tbl,
+                                                  unsigned int hash);
+
 #define rht_dereference(p, ht) \
        rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
 
@@ -389,6 +398,27 @@ void rhashtable_destroy(struct rhashtable *ht);
 #define rht_entry(tpos, pos, member) \
        ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
 
+static inline struct rhash_head __rcu *const *rht_bucket(
+       const struct bucket_table *tbl, unsigned int hash)
+{
+       return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
+                                    &tbl->buckets[hash];
+}
+
+static inline struct rhash_head __rcu **rht_bucket_var(
+       struct bucket_table *tbl, unsigned int hash)
+{
+       return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
+                                    &tbl->buckets[hash];
+}
+
+static inline struct rhash_head __rcu **rht_bucket_insert(
+       struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
+{
+       return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
+                                    &tbl->buckets[hash];
+}
+
 /**
  * rht_for_each_continue - continue iterating over hash chain
  * @pos:       the &struct rhash_head to use as a loop cursor.
@@ -408,7 +438,7 @@ void rhashtable_destroy(struct rhashtable *ht);
  * @hash:      the hash value / bucket index
  */
 #define rht_for_each(pos, tbl, hash) \
-       rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
+       rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
 
 /**
  * rht_for_each_entry_continue - continue iterating over hash chain
@@ -433,7 +463,7 @@ void rhashtable_destroy(struct rhashtable *ht);
  * @member:    name of the &struct rhash_head within the hashable struct.
  */
 #define rht_for_each_entry(tpos, pos, tbl, hash, member)               \
-       rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash],    \
+       rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash),  \
                                    tbl, hash, member)
 
 /**
@@ -448,13 +478,13 @@ void rhashtable_destroy(struct rhashtable *ht);
  * This hash chain list-traversal primitive allows for the looped code to
  * remove the loop cursor from the list.
  */
-#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member)        \
-       for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
-            next = !rht_is_a_nulls(pos) ?                                  \
-                      rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
-            (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);        \
-            pos = next,                                                    \
-            next = !rht_is_a_nulls(pos) ?                                  \
+#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member)          \
+       for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
+            next = !rht_is_a_nulls(pos) ?                                    \
+                      rht_dereference_bucket(pos->next, tbl, hash) : NULL;   \
+            (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member);          \
+            pos = next,                                                      \
+            next = !rht_is_a_nulls(pos) ?                                    \
                       rht_dereference_bucket(pos->next, tbl, hash) : NULL)
 
 /**
@@ -485,7 +515,7 @@ void rhashtable_destroy(struct rhashtable *ht);
  * traversal is guarded by rcu_read_lock().
  */
 #define rht_for_each_rcu(pos, tbl, hash)                               \
-       rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
+       rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
 
 /**
  * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
@@ -518,8 +548,8 @@ void rhashtable_destroy(struct rhashtable *ht);
  * the _rcu mutation primitives such as rhashtable_insert() as long as the
  * traversal is guarded by rcu_read_lock().
  */
-#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member)           \
-       rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
+#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member)              \
+       rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \
                                        tbl, hash, member)
 
 /**
@@ -565,7 +595,7 @@ static inline struct rhash_head *__rhashtable_lookup(
                .ht = ht,
                .key = key,
        };
-       const struct bucket_table *tbl;
+       struct bucket_table *tbl;
        struct rhash_head *he;
        unsigned int hash;
 
@@ -697,8 +727,12 @@ slow_path:
        }
 
        elasticity = ht->elasticity;
-       pprev = &tbl->buckets[hash];
-       rht_for_each(head, tbl, hash) {
+       pprev = rht_bucket_insert(ht, tbl, hash);
+       data = ERR_PTR(-ENOMEM);
+       if (!pprev)
+               goto out;
+
+       rht_for_each_continue(head, *pprev, tbl, hash) {
                struct rhlist_head *plist;
                struct rhlist_head *list;
 
@@ -736,7 +770,7 @@ slow_path:
        if (unlikely(rht_grow_above_100(ht, tbl)))
                goto slow_path;
 
-       head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
+       head = rht_dereference_bucket(*pprev, tbl, hash);
 
        RCU_INIT_POINTER(obj->next, head);
        if (rhlist) {
@@ -746,7 +780,7 @@ slow_path:
                RCU_INIT_POINTER(list->next, NULL);
        }
 
-       rcu_assign_pointer(tbl->buckets[hash], obj);
+       rcu_assign_pointer(*pprev, obj);
 
        atomic_inc(&ht->nelems);
        if (rht_grow_above_75(ht, tbl))
@@ -955,8 +989,8 @@ static inline int __rhashtable_remove_fast_one(
 
        spin_lock_bh(lock);
 
-       pprev = &tbl->buckets[hash];
-       rht_for_each(he, tbl, hash) {
+       pprev = rht_bucket_var(tbl, hash);
+       rht_for_each_continue(he, *pprev, tbl, hash) {
                struct rhlist_head *list;
 
                list = container_of(he, struct rhlist_head, rhead);
@@ -1107,8 +1141,8 @@ static inline int __rhashtable_replace_fast(
 
        spin_lock_bh(lock);
 
-       pprev = &tbl->buckets[hash];
-       rht_for_each(he, tbl, hash) {
+       pprev = rht_bucket_var(tbl, hash);
+       rht_for_each_continue(he, *pprev, tbl, hash) {
                if (he != obj_old) {
                        pprev = &he->next;
                        continue;
index fcb4c364617329f10257c5cfe6d86d75d42c7132..b055788de0cff799653697dd7c3a3af9a32c6bdf 100644 (file)
@@ -62,7 +62,7 @@ typedef struct sctphdr {
        __be16 dest;
        __be32 vtag;
        __le32 checksum;
-} __packed sctp_sctphdr_t;
+} sctp_sctphdr_t;
 
 static inline struct sctphdr *sctp_hdr(const struct sk_buff *skb)
 {
@@ -74,7 +74,7 @@ typedef struct sctp_chunkhdr {
        __u8 type;
        __u8 flags;
        __be16 length;
-} __packed sctp_chunkhdr_t;
+} sctp_chunkhdr_t;
 
 
 /* Section 3.2.  Chunk Type Values.
@@ -108,6 +108,7 @@ typedef enum {
        /* Use hex, as defined in ADDIP sec. 3.1 */
        SCTP_CID_ASCONF                 = 0xC1,
        SCTP_CID_ASCONF_ACK             = 0x80,
+       SCTP_CID_RECONF                 = 0x82,
 } sctp_cid_t; /* enum */
 
 
@@ -164,7 +165,7 @@ enum { SCTP_CHUNK_FLAG_T = 0x01 };
 typedef struct sctp_paramhdr {
        __be16 type;
        __be16 length;
-} __packed sctp_paramhdr_t;
+} sctp_paramhdr_t;
 
 typedef enum {
 
@@ -199,6 +200,13 @@ typedef enum {
        SCTP_PARAM_SUCCESS_REPORT       = cpu_to_be16(0xc005),
        SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006),
 
+       /* RE-CONFIG. Section 4 */
+       SCTP_PARAM_RESET_OUT_REQUEST            = cpu_to_be16(0x000d),
+       SCTP_PARAM_RESET_IN_REQUEST             = cpu_to_be16(0x000e),
+       SCTP_PARAM_RESET_TSN_REQUEST            = cpu_to_be16(0x000f),
+       SCTP_PARAM_RESET_RESPONSE               = cpu_to_be16(0x0010),
+       SCTP_PARAM_RESET_ADD_OUT_STREAMS        = cpu_to_be16(0x0011),
+       SCTP_PARAM_RESET_ADD_IN_STREAMS         = cpu_to_be16(0x0012),
 } sctp_param_t; /* enum */
 
 
@@ -225,12 +233,12 @@ typedef struct sctp_datahdr {
        __be16 ssn;
        __be32 ppid;
        __u8  payload[0];
-} __packed sctp_datahdr_t;
+} sctp_datahdr_t;
 
 typedef struct sctp_data_chunk {
         sctp_chunkhdr_t chunk_hdr;
         sctp_datahdr_t  data_hdr;
-} __packed sctp_data_chunk_t;
+} sctp_data_chunk_t;
 
 /* DATA Chuck Specific Flags */
 enum {
@@ -256,78 +264,78 @@ typedef struct sctp_inithdr {
        __be16 num_inbound_streams;
        __be32 initial_tsn;
        __u8  params[0];
-} __packed sctp_inithdr_t;
+} sctp_inithdr_t;
 
 typedef struct sctp_init_chunk {
        sctp_chunkhdr_t chunk_hdr;
        sctp_inithdr_t init_hdr;
-} __packed sctp_init_chunk_t;
+} sctp_init_chunk_t;
 
 
 /* Section 3.3.2.1. IPv4 Address Parameter (5) */
 typedef struct sctp_ipv4addr_param {
        sctp_paramhdr_t param_hdr;
        struct in_addr  addr;
-} __packed sctp_ipv4addr_param_t;
+} sctp_ipv4addr_param_t;
 
 /* Section 3.3.2.1. IPv6 Address Parameter (6) */
 typedef struct sctp_ipv6addr_param {
        sctp_paramhdr_t param_hdr;
        struct in6_addr addr;
-} __packed sctp_ipv6addr_param_t;
+} sctp_ipv6addr_param_t;
 
 /* Section 3.3.2.1 Cookie Preservative (9) */
 typedef struct sctp_cookie_preserve_param {
        sctp_paramhdr_t param_hdr;
        __be32          lifespan_increment;
-} __packed sctp_cookie_preserve_param_t;
+} sctp_cookie_preserve_param_t;
 
 /* Section 3.3.2.1 Host Name Address (11) */
 typedef struct sctp_hostname_param {
        sctp_paramhdr_t param_hdr;
        uint8_t hostname[0];
-} __packed sctp_hostname_param_t;
+} sctp_hostname_param_t;
 
 /* Section 3.3.2.1 Supported Address Types (12) */
 typedef struct sctp_supported_addrs_param {
        sctp_paramhdr_t param_hdr;
        __be16 types[0];
-} __packed sctp_supported_addrs_param_t;
+} sctp_supported_addrs_param_t;
 
 /* Appendix A. ECN Capable (32768) */
 typedef struct sctp_ecn_capable_param {
        sctp_paramhdr_t param_hdr;
-} __packed sctp_ecn_capable_param_t;
+} sctp_ecn_capable_param_t;
 
 /* ADDIP Section 3.2.6 Adaptation Layer Indication */
 typedef struct sctp_adaptation_ind_param {
        struct sctp_paramhdr param_hdr;
        __be32 adaptation_ind;
-} __packed sctp_adaptation_ind_param_t;
+} sctp_adaptation_ind_param_t;
 
 /* ADDIP Section 4.2.7 Supported Extensions Parameter */
 typedef struct sctp_supported_ext_param {
        struct sctp_paramhdr param_hdr;
        __u8 chunks[0];
-} __packed sctp_supported_ext_param_t;
+} sctp_supported_ext_param_t;
 
 /* AUTH Section 3.1 Random */
 typedef struct sctp_random_param {
        sctp_paramhdr_t param_hdr;
        __u8 random_val[0];
-} __packed sctp_random_param_t;
+} sctp_random_param_t;
 
 /* AUTH Section 3.2 Chunk List */
 typedef struct sctp_chunks_param {
        sctp_paramhdr_t param_hdr;
        __u8 chunks[0];
-} __packed sctp_chunks_param_t;
+} sctp_chunks_param_t;
 
 /* AUTH Section 3.3 HMAC Algorithm */
 typedef struct sctp_hmac_algo_param {
        sctp_paramhdr_t param_hdr;
        __be16 hmac_ids[0];
-} __packed sctp_hmac_algo_param_t;
+} sctp_hmac_algo_param_t;
 
 /* RFC 2960.  Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2):
  *   The INIT ACK chunk is used to acknowledge the initiation of an SCTP
@@ -339,13 +347,13 @@ typedef sctp_init_chunk_t sctp_initack_chunk_t;
 typedef struct sctp_cookie_param {
        sctp_paramhdr_t p;
        __u8 body[0];
-} __packed sctp_cookie_param_t;
+} sctp_cookie_param_t;
 
 /* Section 3.3.3.1 Unrecognized Parameters (8) */
 typedef struct sctp_unrecognized_param {
        sctp_paramhdr_t param_hdr;
        sctp_paramhdr_t unrecognized;
-} __packed sctp_unrecognized_param_t;
+} sctp_unrecognized_param_t;
 
 
 
@@ -360,7 +368,7 @@ typedef struct sctp_unrecognized_param {
 typedef struct sctp_gap_ack_block {
        __be16 start;
        __be16 end;
-} __packed sctp_gap_ack_block_t;
+} sctp_gap_ack_block_t;
 
 typedef __be32 sctp_dup_tsn_t;
 
@@ -375,12 +383,12 @@ typedef struct sctp_sackhdr {
        __be16 num_gap_ack_blocks;
        __be16 num_dup_tsns;
        sctp_sack_variable_t variable[0];
-} __packed sctp_sackhdr_t;
+} sctp_sackhdr_t;
 
 typedef struct sctp_sack_chunk {
        sctp_chunkhdr_t chunk_hdr;
        sctp_sackhdr_t sack_hdr;
-} __packed sctp_sack_chunk_t;
+} sctp_sack_chunk_t;
 
 
 /* RFC 2960.  Section 3.3.5 Heartbeat Request (HEARTBEAT) (4):
@@ -392,12 +400,12 @@ typedef struct sctp_sack_chunk {
 
 typedef struct sctp_heartbeathdr {
        sctp_paramhdr_t info;
-} __packed sctp_heartbeathdr_t;
+} sctp_heartbeathdr_t;
 
 typedef struct sctp_heartbeat_chunk {
        sctp_chunkhdr_t chunk_hdr;
        sctp_heartbeathdr_t hb_hdr;
-} __packed sctp_heartbeat_chunk_t;
+} sctp_heartbeat_chunk_t;
 
 
 /* For the abort and shutdown ACK we must carry the init tag in the
@@ -406,7 +414,7 @@ typedef struct sctp_heartbeat_chunk {
  */
 typedef struct sctp_abort_chunk {
         sctp_chunkhdr_t uh;
-} __packed sctp_abort_chunk_t;
+} sctp_abort_chunk_t;
 
 
 /* For the graceful shutdown we must carry the tag (in common header)
@@ -414,12 +422,12 @@ typedef struct sctp_abort_chunk {
  */
 typedef struct sctp_shutdownhdr {
        __be32 cum_tsn_ack;
-} __packed sctp_shutdownhdr_t;
+} sctp_shutdownhdr_t;
 
 struct sctp_shutdown_chunk_t {
         sctp_chunkhdr_t    chunk_hdr;
         sctp_shutdownhdr_t shutdown_hdr;
-} __packed;
+};
 
 /* RFC 2960.  Section 3.3.10 Operation Error (ERROR) (9) */
 
@@ -427,12 +435,12 @@ typedef struct sctp_errhdr {
        __be16 cause;
        __be16 length;
        __u8  variable[0];
-} __packed sctp_errhdr_t;
+} sctp_errhdr_t;
 
 typedef struct sctp_operr_chunk {
         sctp_chunkhdr_t chunk_hdr;
        sctp_errhdr_t   err_hdr;
-} __packed sctp_operr_chunk_t;
+} sctp_operr_chunk_t;
 
 /* RFC 2960 3.3.10 - Operation Error
  *
@@ -522,7 +530,7 @@ typedef struct sctp_ecnehdr {
 typedef struct sctp_ecne_chunk {
        sctp_chunkhdr_t chunk_hdr;
        sctp_ecnehdr_t ence_hdr;
-} __packed sctp_ecne_chunk_t;
+} sctp_ecne_chunk_t;
 
 /* RFC 2960.  Appendix A.  Explicit Congestion Notification.
  *   Congestion Window Reduced (CWR) (13)
@@ -534,7 +542,7 @@ typedef struct sctp_cwrhdr {
 typedef struct sctp_cwr_chunk {
        sctp_chunkhdr_t chunk_hdr;
        sctp_cwrhdr_t cwr_hdr;
-} __packed sctp_cwr_chunk_t;
+} sctp_cwr_chunk_t;
 
 /* PR-SCTP
  * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN)
@@ -585,17 +593,17 @@ typedef struct sctp_cwr_chunk {
 struct sctp_fwdtsn_skip {
        __be16 stream;
        __be16 ssn;
-} __packed;
+};
 
 struct sctp_fwdtsn_hdr {
        __be32 new_cum_tsn;
        struct sctp_fwdtsn_skip skip[0];
-} __packed;
+};
 
 struct sctp_fwdtsn_chunk {
        struct sctp_chunkhdr chunk_hdr;
        struct sctp_fwdtsn_hdr fwdtsn_hdr;
-} __packed;
+};
 
 
 /* ADDIP
@@ -633,17 +641,17 @@ struct sctp_fwdtsn_chunk {
 typedef struct sctp_addip_param {
        sctp_paramhdr_t param_hdr;
        __be32          crr_id;
-} __packed sctp_addip_param_t;
+} sctp_addip_param_t;
 
 typedef struct sctp_addiphdr {
        __be32  serial;
        __u8    params[0];
-} __packed sctp_addiphdr_t;
+} sctp_addiphdr_t;
 
 typedef struct sctp_addip_chunk {
        sctp_chunkhdr_t chunk_hdr;
        sctp_addiphdr_t addip_hdr;
-} __packed sctp_addip_chunk_t;
+} sctp_addip_chunk_t;
 
 /* AUTH
  * Section 4.1  Authentication Chunk (AUTH)
@@ -698,16 +706,47 @@ typedef struct sctp_authhdr {
        __be16 shkey_id;
        __be16 hmac_id;
        __u8   hmac[0];
-} __packed sctp_authhdr_t;
+} sctp_authhdr_t;
 
 typedef struct sctp_auth_chunk {
        sctp_chunkhdr_t chunk_hdr;
        sctp_authhdr_t auth_hdr;
-} __packed sctp_auth_chunk_t;
+} sctp_auth_chunk_t;
 
 struct sctp_infox {
        struct sctp_info *sctpinfo;
        struct sctp_association *asoc;
 };
 
+struct sctp_reconf_chunk {
+       sctp_chunkhdr_t chunk_hdr;
+       __u8 params[0];
+};
+
+struct sctp_strreset_outreq {
+       sctp_paramhdr_t param_hdr;
+       __u32 request_seq;
+       __u32 response_seq;
+       __u32 send_reset_at_tsn;
+       __u16 list_of_streams[0];
+};
+
+struct sctp_strreset_inreq {
+       sctp_paramhdr_t param_hdr;
+       __u32 request_seq;
+       __u16 list_of_streams[0];
+};
+
+struct sctp_strreset_tsnreq {
+       sctp_paramhdr_t param_hdr;
+       __u32 request_seq;
+};
+
+struct sctp_strreset_addstrm {
+       sctp_paramhdr_t param_hdr;
+       __u32 request_seq;
+       __u16 number_of_streams;
+       __u16 reserved;
+};
+
 #endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
new file mode 100644 (file)
index 0000000..fa7a6b9
--- /dev/null
@@ -0,0 +1,140 @@
+/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#ifndef _LINUX_SIPHASH_H
+#define _LINUX_SIPHASH_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#define SIPHASH_ALIGNMENT __alignof__(u64)
+typedef struct {
+       u64 key[2];
+} siphash_key_t;
+
+u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
+#endif
+
+u64 siphash_1u64(const u64 a, const siphash_key_t *key);
+u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
+u64 siphash_3u64(const u64 a, const u64 b, const u64 c,
+                const siphash_key_t *key);
+u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d,
+                const siphash_key_t *key);
+u64 siphash_1u32(const u32 a, const siphash_key_t *key);
+u64 siphash_3u32(const u32 a, const u32 b, const u32 c,
+                const siphash_key_t *key);
+
+static inline u64 siphash_2u32(const u32 a, const u32 b,
+                              const siphash_key_t *key)
+{
+       return siphash_1u64((u64)b << 32 | a, key);
+}
+static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c,
+                              const u32 d, const siphash_key_t *key)
+{
+       return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key);
+}
+
+
+static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
+                                    const siphash_key_t *key)
+{
+       if (__builtin_constant_p(len) && len == 4)
+               return siphash_1u32(le32_to_cpup((const __le32 *)data), key);
+       if (__builtin_constant_p(len) && len == 8)
+               return siphash_1u64(le64_to_cpu(data[0]), key);
+       if (__builtin_constant_p(len) && len == 16)
+               return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+                                   key);
+       if (__builtin_constant_p(len) && len == 24)
+               return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+                                   le64_to_cpu(data[2]), key);
+       if (__builtin_constant_p(len) && len == 32)
+               return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
+                                   le64_to_cpu(data[2]), le64_to_cpu(data[3]),
+                                   key);
+       return __siphash_aligned(data, len, key);
+}
+
+/**
+ * siphash - compute 64-bit siphash PRF value
+ * @data: buffer to hash
+ * @size: size of @data
+ * @key: the siphash key
+ */
+static inline u64 siphash(const void *data, size_t len,
+                         const siphash_key_t *key)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+       if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+               return __siphash_unaligned(data, len, key);
+#endif
+       return ___siphash_aligned(data, len, key);
+}
+
+#define HSIPHASH_ALIGNMENT __alignof__(unsigned long)
+typedef struct {
+       unsigned long key[2];
+} hsiphash_key_t;
+
+u32 __hsiphash_aligned(const void *data, size_t len,
+                      const hsiphash_key_t *key);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+                        const hsiphash_key_t *key);
+#endif
+
+u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
+u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
+u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c,
+                 const hsiphash_key_t *key);
+u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d,
+                 const hsiphash_key_t *key);
+
+static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
+                                     const hsiphash_key_t *key)
+{
+       if (__builtin_constant_p(len) && len == 4)
+               return hsiphash_1u32(le32_to_cpu(data[0]), key);
+       if (__builtin_constant_p(len) && len == 8)
+               return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+                                    key);
+       if (__builtin_constant_p(len) && len == 12)
+               return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+                                    le32_to_cpu(data[2]), key);
+       if (__builtin_constant_p(len) && len == 16)
+               return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
+                                    le32_to_cpu(data[2]), le32_to_cpu(data[3]),
+                                    key);
+       return __hsiphash_aligned(data, len, key);
+}
+
+/**
+ * hsiphash - compute 32-bit hsiphash PRF value
+ * @data: buffer to hash
+ * @size: size of @data
+ * @key: the hsiphash key
+ */
+static inline u32 hsiphash(const void *data, size_t len,
+                          const hsiphash_key_t *key)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+       if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+               return __hsiphash_unaligned(data, len, key);
+#endif
+       return ___hsiphash_aligned(data, len, key);
+}
+
+#endif /* _LINUX_SIPHASH_H */
index a410715bbef8889d148a6b3f8dbd6afc4ae6f4d0..69ccd26369112031a9c47b246da6e94d576c1ebd 100644 (file)
@@ -585,20 +585,22 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
  *     @cloned: Head may be cloned (check refcnt to be sure)
  *     @ip_summed: Driver fed us an IP checksum
  *     @nohdr: Payload reference only, must not modify header
- *     @nfctinfo: Relationship of this skb to the connection
  *     @pkt_type: Packet class
  *     @fclone: skbuff clone status
  *     @ipvs_property: skbuff is owned by ipvs
+ *     @tc_skip_classify: do not classify packet. set by IFB device
+ *     @tc_at_ingress: used within tc_classify to distinguish in/egress
+ *     @tc_redirected: packet was redirected by a tc action
+ *     @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
  *     @peeked: this packet has been seen already, so stats have been
  *             done for it, don't do them again
  *     @nf_trace: netfilter packet trace flag
  *     @protocol: Packet protocol from driver
  *     @destructor: Destruct function
- *     @nfct: Associated connection, if any
+ *     @_nfct: Associated connection, if any (with nfctinfo bits)
  *     @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
  *     @skb_iif: ifindex of device we arrived on
  *     @tc_index: Traffic control index
- *     @tc_verd: traffic control verdict
  *     @hash: the packet hash
  *     @queue_mapping: Queue mapping for multiqueue devices
  *     @xmit_more: More SKBs are pending for this queue
@@ -610,6 +612,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
  *     @wifi_acked_valid: wifi_acked was set
  *     @wifi_acked: whether frame was acked on wifi or not
  *     @no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
+ *     @dst_pending_confirm: need to confirm neighbour
   *    @napi_id: id of the NAPI struct this skb came from
  *     @secmark: security marking
  *     @mark: Generic packet mark
@@ -668,7 +671,7 @@ struct sk_buff {
        struct  sec_path        *sp;
 #endif
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-       struct nf_conntrack     *nfct;
+       unsigned long            _nfct;
 #endif
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        struct nf_bridge_info   *nf_bridge;
@@ -721,7 +724,6 @@ struct sk_buff {
        __u8                    pkt_type:3;
        __u8                    pfmemalloc:1;
        __u8                    ignore_df:1;
-       __u8                    nfctinfo:3;
 
        __u8                    nf_trace:1;
        __u8                    ip_summed:2;
@@ -740,6 +742,7 @@ struct sk_buff {
        __u8                    csum_level:2;
        __u8                    csum_bad:1;
 
+       __u8                    dst_pending_confirm:1;
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
        __u8                    ndisc_nodetype:2;
 #endif
@@ -749,13 +752,15 @@ struct sk_buff {
 #ifdef CONFIG_NET_SWITCHDEV
        __u8                    offload_fwd_mark:1;
 #endif
-       /* 2, 4 or 5 bit hole */
+#ifdef CONFIG_NET_CLS_ACT
+       __u8                    tc_skip_classify:1;
+       __u8                    tc_at_ingress:1;
+       __u8                    tc_redirected:1;
+       __u8                    tc_from_ingress:1;
+#endif
 
 #ifdef CONFIG_NET_SCHED
        __u16                   tc_index;       /* traffic control index */
-#ifdef CONFIG_NET_CLS_ACT
-       __u16                   tc_verd;        /* traffic control verdict */
-#endif
 #endif
 
        union {
@@ -836,6 +841,7 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb)
 #define SKB_DST_NOREF  1UL
 #define SKB_DST_PTRMASK        ~(SKB_DST_NOREF)
 
+#define SKB_NFCT_PTRMASK       ~(7UL)
 /**
  * skb_dst - returns skb dst_entry
  * @skb: buffer
@@ -2178,6 +2184,11 @@ static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
        return skb->head + skb->mac_header;
 }
 
+static inline int skb_mac_offset(const struct sk_buff *skb)
+{
+       return skb_mac_header(skb) - skb->data;
+}
+
 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
 {
        return skb->mac_header != (typeof(skb->mac_header))~0U;
@@ -3553,6 +3564,15 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
        skb->csum = csum_add(skb->csum, delta);
 }
 
+static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+       return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
+#else
+       return NULL;
+#endif
+}
+
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 void nf_conntrack_destroy(struct nf_conntrack *nfct);
 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
@@ -3581,8 +3601,8 @@ static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
 static inline void nf_reset(struct sk_buff *skb)
 {
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-       nf_conntrack_put(skb->nfct);
-       skb->nfct = NULL;
+       nf_conntrack_put(skb_nfct(skb));
+       skb->_nfct = 0;
 #endif
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        nf_bridge_put(skb->nf_bridge);
@@ -3602,10 +3622,8 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
                             bool copy)
 {
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-       dst->nfct = src->nfct;
-       nf_conntrack_get(src->nfct);
-       if (copy)
-               dst->nfctinfo = src->nfctinfo;
+       dst->_nfct = src->_nfct;
+       nf_conntrack_get(skb_nfct(src));
 #endif
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        dst->nf_bridge  = src->nf_bridge;
@@ -3620,7 +3638,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
 {
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-       nf_conntrack_put(dst->nfct);
+       nf_conntrack_put(skb_nfct(dst));
 #endif
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        nf_bridge_put(dst->nf_bridge);
@@ -3652,9 +3670,7 @@ static inline bool skb_irq_freeable(const struct sk_buff *skb)
 #if IS_ENABLED(CONFIG_XFRM)
                !skb->sp &&
 #endif
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
-               !skb->nfct &&
-#endif
+               !skb_nfct(skb) &&
                !skb->_skb_refdst &&
                !skb_has_frag_list(skb);
 }
@@ -3689,6 +3705,16 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
        return skb->queue_mapping != 0;
 }
 
+static inline void skb_set_dst_pending_confirm(struct sk_buff *skb, u32 val)
+{
+       skb->dst_pending_confirm = val;
+}
+
+static inline bool skb_get_dst_pending_confirm(const struct sk_buff *skb)
+{
+       return skb->dst_pending_confirm != 0;
+}
+
 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
 {
 #ifdef CONFIG_XFRM
index 7b88697929e9ef6f729ce9fd1d9e512d352f8b94..b8478ee7a71f2757195a0aa135b0eeb38c766d9c 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __QCOM_SMEM_STATE__
 #define __QCOM_SMEM_STATE__
 
-#include <linux/errno.h>
+#include <linux/err.h>
 
 struct device_node;
 struct qcom_smem_state;
index 35cb9264e0d50bb8b8cce8f569d950de2d6694b2..2b7882666ef6950d4b2c24033849722a7839e943 100644 (file)
@@ -41,6 +41,8 @@
 #define KNAV_DMA_DESC_RETQ_SHIFT               0
 #define KNAV_DMA_DESC_RETQ_MASK                        MASK(14)
 #define KNAV_DMA_DESC_BUF_LEN_MASK             MASK(22)
+#define KNAV_DMA_DESC_EFLAGS_MASK              MASK(4)
+#define KNAV_DMA_DESC_EFLAGS_SHIFT             20
 
 #define KNAV_DMA_NUM_EPIB_WORDS                        4
 #define KNAV_DMA_NUM_PS_WORDS                  16
index b5cc5a6d7011a06e0310d5bf0136dd42569bda03..082027457825a9403d52e8bbdf7a5c0f47dbd479 100644 (file)
@@ -92,9 +92,9 @@ struct cmsghdr {
 
 #define CMSG_ALIGN(len) ( ((len)+sizeof(long)-1) & ~(sizeof(long)-1) )
 
-#define CMSG_DATA(cmsg)        ((void *)((char *)(cmsg) + CMSG_ALIGN(sizeof(struct cmsghdr))))
-#define CMSG_SPACE(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + CMSG_ALIGN(len))
-#define CMSG_LEN(len) (CMSG_ALIGN(sizeof(struct cmsghdr)) + (len))
+#define CMSG_DATA(cmsg)        ((void *)((char *)(cmsg) + sizeof(struct cmsghdr)))
+#define CMSG_SPACE(len) (sizeof(struct cmsghdr) + CMSG_ALIGN(len))
+#define CMSG_LEN(len) (sizeof(struct cmsghdr) + (len))
 
 #define __CMSG_FIRSTHDR(ctl,len) ((len) >= sizeof(struct cmsghdr) ? \
                                  (struct cmsghdr *)(ctl) : \
@@ -202,8 +202,12 @@ struct ucred {
 #define AF_VSOCK       40      /* vSockets                     */
 #define AF_KCM         41      /* Kernel Connection Multiplexor*/
 #define AF_QIPCRTR     42      /* Qualcomm IPC Router          */
+#define AF_SMC         43      /* smc sockets: reserve number for
+                                * PF_SMC protocol family that
+                                * reuses AF_INET address family
+                                */
 
-#define AF_MAX         43      /* For now.. */
+#define AF_MAX         44      /* For now.. */
 
 /* Protocol families, same as address families. */
 #define PF_UNSPEC      AF_UNSPEC
@@ -251,6 +255,7 @@ struct ucred {
 #define PF_VSOCK       AF_VSOCK
 #define PF_KCM         AF_KCM
 #define PF_QIPCRTR     AF_QIPCRTR
+#define PF_SMC         AF_SMC
 #define PF_MAX         AF_MAX
 
 /* Maximum queue length specifiable by listen.  */
index 266dab9ad78240efbecaa992dba735a7f82d6981..fc273e9d5f67625b9ddf611746d1a09ff555e4ab 100644 (file)
@@ -103,7 +103,6 @@ struct stmmac_axi {
        u32 axi_wr_osr_lmt;
        u32 axi_rd_osr_lmt;
        bool axi_kbbe;
-       bool axi_axi_all;
        u32 axi_blen[AXI_BLEN];
        bool axi_fb;
        bool axi_mb;
@@ -135,13 +134,18 @@ struct plat_stmmacenet_data {
        int tx_fifo_size;
        int rx_fifo_size;
        void (*fix_mac_speed)(void *priv, unsigned int speed);
-       void (*bus_setup)(void __iomem *ioaddr);
        int (*init)(struct platform_device *pdev, void *priv);
        void (*exit)(struct platform_device *pdev, void *priv);
        void *bsp_priv;
+       struct clk *stmmac_clk;
+       struct clk *pclk;
+       struct clk *clk_ptp_ref;
+       unsigned int clk_ptp_rate;
+       struct reset_control *stmmac_rst;
        struct stmmac_axi *axi;
        int has_gmac4;
        bool tso_en;
        int mac_port_sel_speed;
+       bool en_tx_lpi_clockgating;
 };
 #endif
index c93f4b3a59cb7a9f578ec18dd2f6c56014195a90..cfc2d9506ce8077af1ec92eb7086fd52ce4fe1ac 100644 (file)
@@ -212,6 +212,8 @@ struct tcp_sock {
        /* Information of the most recently (s)acked skb */
        struct tcp_rack {
                struct skb_mstamp mstamp; /* (Re)sent time of the skb */
+               u32 rtt_us;  /* Associated RTT */
+               u32 end_seq; /* Ending TCP sequence of the skb */
                u8 advanced; /* mstamp advanced since last lost marking */
                u8 reord;    /* reordering detected */
        } rack;
@@ -220,15 +222,15 @@ struct tcp_sock {
        u32     chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
        u8      chrono_type:2,  /* current chronograph type */
                rate_app_limited:1,  /* rate_{delivered,interval_us} limited? */
-               unused:5;
+               fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
+               unused:4;
        u8      nonagle     : 4,/* Disable Nagle algorithm?             */
                thin_lto    : 1,/* Use linear timeouts for thin streams */
-               thin_dupack : 1,/* Fast retransmit on first dupack      */
+               unused1     : 1,
                repair      : 1,
                frto        : 1;/* F-RTO (RFC5682) activated in CA_Loss */
        u8      repair_queue;
-       u8      do_early_retrans:1,/* Enable RFC5827 early-retransmit  */
-               syn_data:1,     /* SYN includes data */
+       u8      syn_data:1,     /* SYN includes data */
                syn_fastopen:1, /* SYN includes Fast Open option */
                syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
                syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
@@ -310,7 +312,6 @@ struct tcp_sock {
                                         */
 
        int     lost_cnt_hint;
-       u32     retransmit_high;        /* L-bits may be on up to this seqno */
 
        u32     prior_ssthresh; /* ssthresh saved at recovery start     */
        u32     high_seq;       /* snd_nxt at onset of congestion       */
@@ -444,4 +445,13 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp)
 
 struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk);
 
+static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss)
+{
+       /* We use READ_ONCE() here because socket might not be locked.
+        * This happens for listeners.
+        */
+       u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
+
+       return (user_mss && user_mss < mss) ? user_mss : mss;
+}
 #endif /* _LINUX_TCP_H */
index be007610ceb08aaa7cad1073effe73b24adff66d..0f165507495c6e649c1942c982efa0d1be20befa 100644 (file)
@@ -33,7 +33,8 @@ const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
                                    unsigned int bitmask_size);
 
 const char *trace_print_hex_seq(struct trace_seq *p,
-                               const unsigned char *buf, int len);
+                               const unsigned char *buf, int len,
+                               bool concatenate);
 
 const char *trace_print_array_seq(struct trace_seq *p,
                                   const void *buf, int count,
index 2d095fc6020452fa69e5e3b60571ce8cf34ea82f..4dff73a8975836dab9e49ce57ddcb1d6a2e79782 100644 (file)
 
 #include <uapi/linux/uuid.h>
 
+/*
+ * V1 (time-based) UUID definition [RFC 4122].
+ * - the timestamp is a 60-bit value, split 32/16/12, and goes in 100ns
+ *   increments since midnight 15th October 1582
+ *   - add AFS_UUID_TO_UNIX_TIME to convert unix time in 100ns units to UUID
+ *     time
+ * - the clock sequence is a 14-bit counter to avoid duplicate times
+ */
+struct uuid_v1 {
+       __be32          time_low;                       /* low part of timestamp */
+       __be16          time_mid;                       /* mid part of timestamp */
+       __be16          time_hi_and_version;            /* high part of timestamp and version  */
+#define UUID_TO_UNIX_TIME      0x01b21dd213814000ULL
+#define UUID_TIMEHI_MASK       0x0fff
+#define UUID_VERSION_TIME      0x1000  /* time-based UUID */
+#define UUID_VERSION_NAME      0x3000  /* name-based UUID */
+#define UUID_VERSION_RANDOM    0x4000  /* (pseudo-)random generated UUID */
+       u8              clock_seq_hi_and_reserved;      /* clock seq hi and variant */
+#define UUID_CLOCKHI_MASK      0x3f
+#define UUID_VARIANT_STD       0x80
+       u8              clock_seq_low;                  /* clock seq low */
+       u8              node[6];                        /* spatially unique node ID (MAC addr) */
+};
+
 /*
  * The length of a UUID string ("aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee")
  * not including trailing NUL.
index d5eb5479a4254356a39e76c805743d35458dd07b..04b0d3f95043c66856c6a6f4cab078c4791d35e8 100644 (file)
@@ -132,12 +132,16 @@ static inline struct virtio_device *dev_to_virtio(struct device *_dev)
        return container_of(_dev, struct virtio_device, dev);
 }
 
+void virtio_add_status(struct virtio_device *dev, unsigned int status);
 int register_virtio_device(struct virtio_device *dev);
 void unregister_virtio_device(struct virtio_device *dev);
 
 void virtio_break_device(struct virtio_device *dev);
 
 void virtio_config_changed(struct virtio_device *dev);
+void virtio_config_disable(struct virtio_device *dev);
+void virtio_config_enable(struct virtio_device *dev);
+int virtio_finalize_features(struct virtio_device *dev);
 #ifdef CONFIG_PM_SLEEP
 int virtio_device_freeze(struct virtio_device *dev);
 int virtio_device_restore(struct virtio_device *dev);
index 1d716449209e4753a297c61a287077a1eb96e6d8..cfa2ae33da9a19969cb3951b2a2278f61a7fd93f 100644 (file)
@@ -41,6 +41,7 @@ struct tc_action {
        struct rcu_head                 tcfa_rcu;
        struct gnet_stats_basic_cpu __percpu *cpu_bstats;
        struct gnet_stats_queue __percpu *cpu_qstats;
+       struct tc_cookie        *act_cookie;
 };
 #define tcf_head       common.tcfa_head
 #define tcf_index      common.tcfa_index
index 8f998afc138434f672ab28883287e463f60f4733..17c6fd84e287808eca08503b87a56a5d6fc0cc5c 100644 (file)
@@ -88,9 +88,7 @@ int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
                      u32 banned_flags);
 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
                    u32 banned_flags);
-int ipv4_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
-                        bool match_wildcard);
-int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
+int inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
                         bool match_wildcard);
 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
index 5e0f891d476c299d91fde14ce9cdeedc9a7e26c6..65619a2de6f44178d037823c3ad30c0f83f12456 100644 (file)
@@ -35,6 +35,22 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32
        return n;
 }
 
+static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key)
+{
+       struct neighbour *n;
+
+       rcu_read_lock_bh();
+       n = __ipv4_neigh_lookup_noref(dev, key);
+       if (n) {
+               unsigned long now = jiffies;
+
+               /* avoid dirtying neighbour */
+               if (n->confirmed != now)
+                       n->confirmed = now;
+       }
+       rcu_read_unlock_bh();
+}
+
 void arp_init(void);
 int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg);
 void arp_send(int type, int ptype, __be32 dest_ip,
index d73b849e29a6869e282103f329c3a02f4e1a6882..b8d637225a07ddd2c0183b75a42cd5c9c5a69851 100644 (file)
@@ -33,10 +33,6 @@ struct napi_struct;
 extern unsigned int sysctl_net_busy_read __read_mostly;
 extern unsigned int sysctl_net_busy_poll __read_mostly;
 
-/* return values from ndo_ll_poll */
-#define LL_FLUSH_FAILED                -1
-#define LL_FLUSH_BUSY          -2
-
 static inline bool net_busy_loop_on(void)
 {
        return sysctl_net_busy_poll;
index 814be4b4200c253da028555613a67eead223933b..c92dc03c852825fd3f92df6bbae0e7e77409c6d4 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright 2015-2016 Intel Deutschland GmbH
+ * Copyright 2015-2017 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -311,6 +311,34 @@ struct ieee80211_supported_band {
        struct ieee80211_sta_vht_cap vht_cap;
 };
 
+/**
+ * wiphy_read_of_freq_limits - read frequency limits from device tree
+ *
+ * @wiphy: the wireless device to get extra limits for
+ *
+ * Some devices may have extra limitations specified in DT. This may be useful
+ * for chipsets that normally support more bands but are limited due to board
+ * design (e.g. by antennas or external power amplifier).
+ *
+ * This function reads info from DT and uses it to *modify* channels (disable
+ * unavailable ones). It's usually a *bad* idea to use it in drivers with
+ * shared channel data as DT limitations are device specific. You should make
+ * sure to call it only if channels in wiphy are copied and can be modified
+ * without affecting other devices.
+ *
+ * As this function access device node it has to be called after set_wiphy_dev.
+ * It also modifies channels so they have to be set first.
+ * If using this helper, call it before wiphy_register().
+ */
+#ifdef CONFIG_OF
+void wiphy_read_of_freq_limits(struct wiphy *wiphy);
+#else /* CONFIG_OF */
+static inline void wiphy_read_of_freq_limits(struct wiphy *wiphy)
+{
+}
+#endif /* !CONFIG_OF */
+
+
 /*
  * Wireless hardware/device configuration structures and methods
  */
@@ -720,6 +748,10 @@ struct cfg80211_bitrate_mask {
  * @pbss: If set, start as a PCP instead of AP. Relevant for DMG
  *     networks.
  * @beacon_rate: bitrate to be used for beacons
+ * @ht_cap: HT capabilities (or %NULL if HT isn't enabled)
+ * @vht_cap: VHT capabilities (or %NULL if VHT isn't enabled)
+ * @ht_required: stations must support HT
+ * @vht_required: stations must support VHT
  */
 struct cfg80211_ap_settings {
        struct cfg80211_chan_def chandef;
@@ -740,6 +772,10 @@ struct cfg80211_ap_settings {
        const struct cfg80211_acl_data *acl;
        bool pbss;
        struct cfg80211_bitrate_mask beacon_rate;
+
+       const struct ieee80211_ht_cap *ht_cap;
+       const struct ieee80211_vht_cap *vht_cap;
+       bool ht_required, vht_required;
 };
 
 /**
@@ -1591,6 +1627,17 @@ struct cfg80211_sched_scan_plan {
        u32 iterations;
 };
 
+/**
+ * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment.
+ *
+ * @band: band of BSS which should match for RSSI level adjustment.
+ * @delta: value of RSSI level adjustment.
+ */
+struct cfg80211_bss_select_adjust {
+       enum nl80211_band band;
+       s8 delta;
+};
+
 /**
  * struct cfg80211_sched_scan_request - scheduled scan request description
  *
@@ -1626,6 +1673,16 @@ struct cfg80211_sched_scan_plan {
  *     cycle.  The driver may ignore this parameter and start
  *     immediately (or at any other time), if this feature is not
  *     supported.
+ * @relative_rssi_set: Indicates whether @relative_rssi is set or not.
+ * @relative_rssi: Relative RSSI threshold in dB to restrict scan result
+ *     reporting in connected state to cases where a matching BSS is determined
+ *     to have better or slightly worse RSSI than the current connected BSS.
+ *     The relative RSSI threshold values are ignored in disconnected state.
+ * @rssi_adjust: delta dB of RSSI preference to be given to the BSSs that belong
+ *     to the specified band while deciding whether a better BSS is reported
+ *     using @relative_rssi. If delta is a negative number, the BSSs that
+ *     belong to the specified band will be penalized by delta dB in relative
+ *     comparisions.
  */
 struct cfg80211_sched_scan_request {
        struct cfg80211_ssid *ssids;
@@ -1645,6 +1702,10 @@ struct cfg80211_sched_scan_request {
        u8 mac_addr[ETH_ALEN] __aligned(2);
        u8 mac_addr_mask[ETH_ALEN] __aligned(2);
 
+       bool relative_rssi_set;
+       s8 relative_rssi;
+       struct cfg80211_bss_select_adjust rssi_adjust;
+
        /* internal */
        struct wiphy *wiphy;
        struct net_device *dev;
@@ -1952,17 +2013,6 @@ struct cfg80211_ibss_params {
        struct ieee80211_ht_cap ht_capa_mask;
 };
 
-/**
- * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment.
- *
- * @band: band of BSS which should match for RSSI level adjustment.
- * @delta: value of RSSI level adjustment.
- */
-struct cfg80211_bss_select_adjust {
-       enum nl80211_band band;
-       s8 delta;
-};
-
 /**
  * struct cfg80211_bss_selection - connection parameters for BSS selection.
  *
@@ -2366,11 +2416,13 @@ struct cfg80211_qos_map {
  * This struct defines NAN configuration parameters
  *
  * @master_pref: master preference (1 - 255)
- * @dual: dual band operation mode, see &enum nl80211_nan_dual_band_conf
+ * @bands: operating bands, a bitmap of &enum nl80211_band values.
+ *     For instance, for NL80211_BAND_2GHZ, bit 0 would be set
+ *     (i.e. BIT(NL80211_BAND_2GHZ)).
  */
 struct cfg80211_nan_conf {
        u8 master_pref;
-       u8 dual;
+       u8 bands;
 };
 
 /**
@@ -2378,11 +2430,11 @@ struct cfg80211_nan_conf {
  * configuration
  *
  * @CFG80211_NAN_CONF_CHANGED_PREF: master preference
- * @CFG80211_NAN_CONF_CHANGED_DUAL: dual band operation
+ * @CFG80211_NAN_CONF_CHANGED_BANDS: operating bands
  */
 enum cfg80211_nan_conf_changes {
        CFG80211_NAN_CONF_CHANGED_PREF = BIT(0),
-       CFG80211_NAN_CONF_CHANGED_DUAL = BIT(1),
+       CFG80211_NAN_CONF_CHANGED_BANDS = BIT(1),
 };
 
 /**
@@ -3136,22 +3188,6 @@ struct ieee80211_iface_limit {
 
 /**
  * struct ieee80211_iface_combination - possible interface combination
- * @limits: limits for the given interface types
- * @n_limits: number of limitations
- * @num_different_channels: can use up to this many different channels
- * @max_interfaces: maximum number of interfaces in total allowed in this
- *     group
- * @beacon_int_infra_match: In this combination, the beacon intervals
- *     between infrastructure and AP types must match. This is required
- *     only in special cases.
- * @radar_detect_widths: bitmap of channel widths supported for radar detection
- * @radar_detect_regions: bitmap of regions supported for radar detection
- * @beacon_int_min_gcd: This interface combination supports different
- *     beacon intervals.
- *     = 0 - all beacon intervals for different interface must be same.
- *     > 0 - any beacon interval for the interface part of this combination AND
- *           *GCD* of all beacon intervals from beaconing interfaces of this
- *           combination must be greater or equal to this value.
  *
  * With this structure the driver can describe which interface
  * combinations it supports concurrently.
@@ -3210,13 +3246,60 @@ struct ieee80211_iface_limit {
  *
  */
 struct ieee80211_iface_combination {
+       /**
+        * @limits:
+        * limits for the given interface types
+        */
        const struct ieee80211_iface_limit *limits;
+
+       /**
+        * @num_different_channels:
+        * can use up to this many different channels
+        */
        u32 num_different_channels;
+
+       /**
+        * @max_interfaces:
+        * maximum number of interfaces in total allowed in this group
+        */
        u16 max_interfaces;
+
+       /**
+        * @n_limits:
+        * number of limitations
+        */
        u8 n_limits;
+
+       /**
+        * @beacon_int_infra_match:
+        * In this combination, the beacon intervals between infrastructure
+        * and AP types must match. This is required only in special cases.
+        */
        bool beacon_int_infra_match;
+
+       /**
+        * @radar_detect_widths:
+        * bitmap of channel widths supported for radar detection
+        */
        u8 radar_detect_widths;
+
+       /**
+        * @radar_detect_regions:
+        * bitmap of regions supported for radar detection
+        */
        u8 radar_detect_regions;
+
+       /**
+        * @beacon_int_min_gcd:
+        * This interface combination supports different beacon intervals.
+        *
+        * = 0
+        *   all beacon intervals for different interface must be same.
+        * > 0
+        *   any beacon interval for the interface part of this combination AND
+        *   GCD of all beacon intervals from beaconing interfaces of this
+        *   combination must be greater or equal to this value.
+        */
        u32 beacon_int_min_gcd;
 };
 
@@ -3515,6 +3598,10 @@ struct wiphy_iftype_ext_capab {
  *     attribute indices defined in &enum nl80211_bss_select_attr.
  *
  * @cookie_counter: unique generic cookie counter, used to identify objects.
+ * @nan_supported_bands: bands supported by the device in NAN mode, a
+ *     bitmap of &enum nl80211_band values.  For instance, for
+ *     NL80211_BAND_2GHZ, bit 0 would be set
+ *     (i.e. BIT(NL80211_BAND_2GHZ)).
  */
 struct wiphy {
        /* assign these fields before you register the wiphy */
@@ -3646,6 +3733,8 @@ struct wiphy {
 
        u64 cookie_counter;
 
+       u8 nan_supported_bands;
+
        char priv[0] __aligned(NETDEV_ALIGN);
 };
 
@@ -3837,6 +3926,9 @@ struct cfg80211_cached_keys;
  * @conn: (private) cfg80211 software SME connection state machine data
  * @connect_keys: (private) keys to set after connection is established
  * @conn_bss_type: connecting/connected BSS type
+ * @conn_owner_nlportid: (private) connection owner socket port ID
+ * @disconnect_wk: (private) auto-disconnect work
+ * @disconnect_bssid: (private) the BSSID to use for auto-disconnect
  * @ibss_fixed: (private) IBSS is using fixed BSSID
  * @ibss_dfs_possible: (private) IBSS may change to a DFS channel
  * @event_list: (private) list for internal event processing
@@ -3868,6 +3960,10 @@ struct wireless_dev {
        struct cfg80211_conn *conn;
        struct cfg80211_cached_keys *connect_keys;
        enum ieee80211_bss_type conn_bss_type;
+       u32 conn_owner_nlportid;
+
+       struct work_struct disconnect_wk;
+       u8 disconnect_bssid[ETH_ALEN];
 
        struct list_head event_list;
        spinlock_t event_lock;
@@ -3955,26 +4051,15 @@ int ieee80211_channel_to_frequency(int chan, enum nl80211_band band);
  */
 int ieee80211_frequency_to_channel(int freq);
 
-/*
- * Name indirection necessary because the ieee80211 code also has
- * a function named "ieee80211_get_channel", so if you include
- * cfg80211's header file you get cfg80211's version, if you try
- * to include both header files you'll (rightfully!) get a symbol
- * clash.
- */
-struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
-                                                 int freq);
 /**
  * ieee80211_get_channel - get channel struct from wiphy for specified frequency
+ *
  * @wiphy: the struct wiphy to get the channel for
  * @freq: the center frequency of the channel
+ *
  * Return: The channel struct from @wiphy at @freq.
  */
-static inline struct ieee80211_channel *
-ieee80211_get_channel(struct wiphy *wiphy, int freq)
-{
-       return __ieee80211_get_channel(wiphy, freq);
-}
+struct ieee80211_channel *ieee80211_get_channel(struct wiphy *wiphy, int freq);
 
 /**
  * ieee80211_get_response_rate - get basic rate for a given rate
@@ -5048,20 +5133,32 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
  * @req_ie_len: association request IEs length
  * @resp_ie: association response IEs (may be %NULL)
  * @resp_ie_len: assoc response IEs length
- * @status: status code, 0 for successful connection, use
- *      %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
- *      the real status code for failures.
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use
+ *     %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ *     the real status code for failures. If this call is used to report a
+ *     failure due to a timeout (e.g., not receiving an Authentication frame
+ *     from the AP) instead of an explicit rejection by the AP, -1 is used to
+ *     indicate that this is a failure, but without a status code.
+ *     @timeout_reason is used to report the reason for the timeout in that
+ *     case.
  * @gfp: allocation flags
- *
- * It should be called by the underlying driver whenever connect() has
- * succeeded. This is similar to cfg80211_connect_result(), but with the
- * option of identifying the exact bss entry for the connection. Only one of
- * these functions should be called.
+ * @timeout_reason: reason for connection timeout. This is used when the
+ *     connection fails due to a timeout instead of an explicit rejection from
+ *     the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
+ *     not known. This value is used only if @status < 0 to indicate that the
+ *     failure is due to a timeout and not due to explicit rejection by the AP.
+ *     This value is ignored in other cases (@status >= 0).
+ *
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_result(), but with the option of identifying the exact bss
+ * entry for the connection. Only one of these functions should be called.
  */
 void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
                          struct cfg80211_bss *bss, const u8 *req_ie,
                          size_t req_ie_len, const u8 *resp_ie,
-                         size_t resp_ie_len, int status, gfp_t gfp);
+                         size_t resp_ie_len, int status, gfp_t gfp,
+                         enum nl80211_timeout_reason timeout_reason);
 
 /**
  * cfg80211_connect_result - notify cfg80211 of connection result
@@ -5072,13 +5169,15 @@ void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
  * @req_ie_len: association request IEs length
  * @resp_ie: association response IEs (may be %NULL)
  * @resp_ie_len: assoc response IEs length
- * @status: status code, 0 for successful connection, use
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use
  *     %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
  *     the real status code for failures.
  * @gfp: allocation flags
  *
- * It should be called by the underlying driver whenever connect() has
- * succeeded.
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_bss() which allows the exact bss entry to be specified. Only
+ * one of these functions should be called.
  */
 static inline void
 cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
@@ -5087,7 +5186,8 @@ cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                        u16 status, gfp_t gfp)
 {
        cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, resp_ie,
-                            resp_ie_len, status, gfp);
+                            resp_ie_len, status, gfp,
+                            NL80211_TIMEOUT_UNSPECIFIED);
 }
 
 /**
@@ -5098,6 +5198,7 @@ cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
  * @req_ie: association request IEs (maybe be %NULL)
  * @req_ie_len: association request IEs length
  * @gfp: allocation flags
+ * @timeout_reason: reason for connection timeout.
  *
  * It should be called by the underlying driver whenever connect() has failed
  * in a sequence where no explicit authentication/association rejection was
@@ -5107,10 +5208,11 @@ cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
  */
 static inline void
 cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
-                        const u8 *req_ie, size_t req_ie_len, gfp_t gfp)
+                        const u8 *req_ie, size_t req_ie_len, gfp_t gfp,
+                        enum nl80211_timeout_reason timeout_reason)
 {
        cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, NULL, 0, -1,
-                            gfp);
+                            gfp, timeout_reason);
 }
 
 /**
@@ -5296,6 +5398,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
  * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
  * @dev: network device
  * @rssi_event: the triggered RSSI event
+ * @rssi_level: new RSSI level value or 0 if not available
  * @gfp: context flags
  *
  * This function is called when a configured connection quality monitoring
@@ -5303,7 +5406,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
  */
 void cfg80211_cqm_rssi_notify(struct net_device *dev,
                              enum nl80211_cqm_rssi_threshold_event rssi_event,
-                             gfp_t gfp);
+                             s32 rssi_level, gfp_t gfp);
 
 /**
  * cfg80211_cqm_pktloss_notify - notify userspace about packetloss to peer
index 35d0fabd2782618603bcd051c34750db53b9742f..aef2b2bb6603fa5c308c7aca4c1ef9d47d6496f9 100644 (file)
@@ -179,7 +179,7 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
 
 static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
 {
-       *psum = csum_fold(csum_sub(delta, *psum));
+       *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
 }
 
 #endif
index b122196d5a1f50a629520ce00b1cafde9d440cda..4e13e695f0251d5c762c3089065f4eeb429033eb 100644 (file)
 #ifndef __LINUX_NET_DSA_H
 #define __LINUX_NET_DSA_H
 
+#include <linux/if.h>
 #include <linux/if_ether.h>
 #include <linux/list.h>
+#include <linux/notifier.h>
 #include <linux/timer.h>
 #include <linux/workqueue.h>
 #include <linux/of.h>
-#include <linux/phy.h>
-#include <linux/phy_fixed.h>
 #include <linux/ethtool.h>
 
+struct tc_action;
+struct phy_device;
+struct fixed_phy_status;
+
 enum dsa_tag_protocol {
        DSA_TAG_PROTO_NONE = 0,
        DSA_TAG_PROTO_DSA,
@@ -42,6 +46,11 @@ struct dsa_chip_data {
        struct device   *host_dev;
        int             sw_addr;
 
+       /*
+        * Reference to network devices
+        */
+       struct device   *netdev[DSA_MAX_PORTS];
+
        /* set to size of eeprom if supported by the switch */
        int             eeprom_len;
 
@@ -90,6 +99,9 @@ struct packet_type;
 struct dsa_switch_tree {
        struct list_head        list;
 
+       /* Notifier chain for switch-wide events */
+       struct raw_notifier_head        nh;
+
        /* Tree identifier */
        u32 tree;
 
@@ -124,7 +136,7 @@ struct dsa_switch_tree {
        /*
         * The switch and port to which the CPU is attached.
         */
-       s8                      cpu_switch;
+       struct dsa_switch       *cpu_switch;
        s8                      cpu_port;
 
        /*
@@ -139,11 +151,37 @@ struct dsa_switch_tree {
        const struct dsa_device_ops *tag_ops;
 };
 
+/* TC matchall action types, only mirroring for now */
+enum dsa_port_mall_action_type {
+       DSA_PORT_MALL_MIRROR,
+};
+
+/* TC mirroring entry */
+struct dsa_mall_mirror_tc_entry {
+       u8 to_local_port;
+       bool ingress;
+};
+
+/* TC matchall entry */
+struct dsa_mall_tc_entry {
+       struct list_head list;
+       unsigned long cookie;
+       enum dsa_port_mall_action_type type;
+       union {
+               struct dsa_mall_mirror_tc_entry mirror;
+       };
+};
+
+
 struct dsa_port {
+       struct dsa_switch       *ds;
+       unsigned int            index;
+       const char              *name;
        struct net_device       *netdev;
        struct device_node      *dn;
        unsigned int            ageing_time;
        u8                      stp_state;
+       struct net_device       *bridge_dev;
 };
 
 struct dsa_switch {
@@ -155,6 +193,9 @@ struct dsa_switch {
        struct dsa_switch_tree  *dst;
        int                     index;
 
+       /* Listener for switch fabric events */
+       struct notifier_block   nb;
+
        /*
         * Give the switch driver somewhere to hang its private data
         * structure.
@@ -169,7 +210,7 @@ struct dsa_switch {
        /*
         * The switch operations.
         */
-       struct dsa_switch_ops   *ops;
+       const struct dsa_switch_ops     *ops;
 
        /*
         * An array of which element [a] indicates which port on this
@@ -178,14 +219,6 @@ struct dsa_switch {
         */
        s8              rtable[DSA_MAX_SWITCHES];
 
-#ifdef CONFIG_NET_DSA_HWMON
-       /*
-        * Hardware monitoring information
-        */
-       char                    hwmon_name[IFNAMSIZ + 8];
-       struct device           *hwmon_dev;
-#endif
-
        /*
         * The lower device this switch uses to talk to the host
         */
@@ -198,13 +231,16 @@ struct dsa_switch {
        u32                     cpu_port_mask;
        u32                     enabled_port_mask;
        u32                     phys_mii_mask;
-       struct dsa_port         ports[DSA_MAX_PORTS];
        struct mii_bus          *slave_mii_bus;
+
+       /* Dynamically allocated ports, keep last */
+       size_t num_ports;
+       struct dsa_port ports[];
 };
 
 static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
 {
-       return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
+       return !!(ds == ds->dst->cpu_switch && p == ds->dst->cpu_port);
 }
 
 static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
@@ -227,10 +263,10 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
         * Else return the (DSA) port number that connects to the
         * switch that is one hop closer to the cpu.
         */
-       if (dst->cpu_switch == ds->index)
+       if (dst->cpu_switch == ds)
                return dst->cpu_port;
        else
-               return ds->rtable[dst->cpu_switch];
+               return ds->rtable[dst->cpu_switch->index];
 }
 
 struct switchdev_trans;
@@ -239,9 +275,17 @@ struct switchdev_obj_port_fdb;
 struct switchdev_obj_port_mdb;
 struct switchdev_obj_port_vlan;
 
-struct dsa_switch_ops {
-       struct list_head        list;
+#define DSA_NOTIFIER_BRIDGE_JOIN               1
+#define DSA_NOTIFIER_BRIDGE_LEAVE              2
+
+/* DSA_NOTIFIER_BRIDGE_* */
+struct dsa_notifier_bridge_info {
+       struct net_device *br;
+       int sw_index;
+       int port;
+};
 
+struct dsa_switch_ops {
        /*
         * Probing and setup.
         */
@@ -309,14 +353,6 @@ struct dsa_switch_ops {
        int     (*get_eee)(struct dsa_switch *ds, int port,
                           struct ethtool_eee *e);
 
-#ifdef CONFIG_NET_DSA_HWMON
-       /* Hardware monitoring */
-       int     (*get_temp)(struct dsa_switch *ds, int *temp);
-       int     (*get_temp_limit)(struct dsa_switch *ds, int *temp);
-       int     (*set_temp_limit)(struct dsa_switch *ds, int temp);
-       int     (*get_temp_alarm)(struct dsa_switch *ds, bool *alarm);
-#endif
-
        /* EEPROM access */
        int     (*get_eeprom_len)(struct dsa_switch *ds);
        int     (*get_eeprom)(struct dsa_switch *ds,
@@ -337,7 +373,8 @@ struct dsa_switch_ops {
        int     (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs);
        int     (*port_bridge_join)(struct dsa_switch *ds, int port,
                                    struct net_device *bridge);
-       void    (*port_bridge_leave)(struct dsa_switch *ds, int port);
+       void    (*port_bridge_leave)(struct dsa_switch *ds, int port,
+                                    struct net_device *bridge);
        void    (*port_stp_state_set)(struct dsa_switch *ds, int port,
                                      u8 state);
        void    (*port_fast_age)(struct dsa_switch *ds, int port);
@@ -388,19 +425,43 @@ struct dsa_switch_ops {
        int     (*port_mdb_dump)(struct dsa_switch *ds, int port,
                                 struct switchdev_obj_port_mdb *mdb,
                                 int (*cb)(struct switchdev_obj *obj));
+
+       /*
+        * RXNFC
+        */
+       int     (*get_rxnfc)(struct dsa_switch *ds, int port,
+                            struct ethtool_rxnfc *nfc, u32 *rule_locs);
+       int     (*set_rxnfc)(struct dsa_switch *ds, int port,
+                            struct ethtool_rxnfc *nfc);
+
+       /*
+        * TC integration
+        */
+       int     (*port_mirror_add)(struct dsa_switch *ds, int port,
+                                  struct dsa_mall_mirror_tc_entry *mirror,
+                                  bool ingress);
+       void    (*port_mirror_del)(struct dsa_switch *ds, int port,
+                                  struct dsa_mall_mirror_tc_entry *mirror);
+};
+
+struct dsa_switch_driver {
+       struct list_head        list;
+       const struct dsa_switch_ops *ops;
 };
 
-void register_switch_driver(struct dsa_switch_ops *type);
-void unregister_switch_driver(struct dsa_switch_ops *type);
+void register_switch_driver(struct dsa_switch_driver *type);
+void unregister_switch_driver(struct dsa_switch_driver *type);
 struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
+struct net_device *dsa_dev_to_net_device(struct device *dev);
 
 static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
 {
        return dst->rcv != NULL;
 }
 
+struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n);
 void dsa_unregister_switch(struct dsa_switch *ds);
-int dsa_register_switch(struct dsa_switch *ds, struct device_node *np);
+int dsa_register_switch(struct dsa_switch *ds, struct device *dev);
 #ifdef CONFIG_PM_SLEEP
 int dsa_switch_suspend(struct dsa_switch *ds);
 int dsa_switch_resume(struct dsa_switch *ds);
index 6835d224d47b502fa3e396a02a71fed6130d6650..049af33da3b6c95897d544670cea65c542317673 100644 (file)
@@ -59,8 +59,6 @@ struct dst_entry {
 #define DST_XFRM_QUEUE         0x0100
 #define DST_METADATA           0x0200
 
-       unsigned short          pending_confirm;
-
        short                   error;
 
        /* A non-zero value of dst->obsolete forces by-hand validation
@@ -78,6 +76,8 @@ struct dst_entry {
 #define DST_OBSOLETE_KILL      -2
        unsigned short          header_len;     /* more space at head required */
        unsigned short          trailer_len;    /* space to reserve at tail */
+       unsigned short          __pad3;
+
 #ifdef CONFIG_IP_ROUTE_CLASSID
        __u32                   tclassid;
 #else
@@ -440,28 +440,6 @@ static inline void dst_rcu_free(struct rcu_head *head)
 
 static inline void dst_confirm(struct dst_entry *dst)
 {
-       dst->pending_confirm = 1;
-}
-
-static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
-                                  struct sk_buff *skb)
-{
-       const struct hh_cache *hh;
-
-       if (dst->pending_confirm) {
-               unsigned long now = jiffies;
-
-               dst->pending_confirm = 0;
-               /* avoid dirtying neighbour */
-               if (n->confirmed != now)
-                       n->confirmed = now;
-       }
-
-       hh = &n->hh;
-       if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
-               return neigh_hh_output(hh, skb);
-       else
-               return n->output(n, skb);
 }
 
 static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
@@ -477,6 +455,13 @@ static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst
        return IS_ERR(n) ? NULL : n;
 }
 
+static inline void dst_confirm_neigh(const struct dst_entry *dst,
+                                    const void *daddr)
+{
+       if (dst->ops->confirm_neigh)
+               dst->ops->confirm_neigh(dst, daddr);
+}
+
 static inline void dst_link_failure(struct sk_buff *skb)
 {
        struct dst_entry *dst = skb_dst(skb);
index a0d443ca16fcca26b4925d477265935f5bdaf5bc..c84b3287e38b9e80961516b594f0e3b7950ee5e1 100644 (file)
@@ -33,6 +33,8 @@ struct dst_ops {
        struct neighbour *      (*neigh_lookup)(const struct dst_entry *dst,
                                                struct sk_buff *skb,
                                                const void *daddr);
+       void                    (*confirm_neigh)(const struct dst_entry *dst,
+                                                const void *daddr);
 
        struct kmem_cache       *kmem_cachep;
 
@@ -46,19 +48,12 @@ static inline int dst_entries_get_fast(struct dst_ops *dst)
 
 static inline int dst_entries_get_slow(struct dst_ops *dst)
 {
-       int res;
-
-       local_bh_disable();
-       res = percpu_counter_sum_positive(&dst->pcpuc_entries);
-       local_bh_enable();
-       return res;
+       return percpu_counter_sum_positive(&dst->pcpuc_entries);
 }
 
 static inline void dst_entries_add(struct dst_ops *dst, int val)
 {
-       local_bh_disable();
        percpu_counter_add(&dst->pcpuc_entries, val);
-       local_bh_enable();
 }
 
 static inline int dst_entries_init(struct dst_ops *dst)
index d896a33e00d4d1b5a81b85dd5766b78d09647d78..ac9703018a3a63bd392aa37f31b1afdf54d69aa0 100644 (file)
@@ -88,6 +88,24 @@ struct flow_dissector_key_addrs {
        };
 };
 
+/**
+ * flow_dissector_key_arp:
+ *     @ports: Operation, source and target addresses for an ARP header
+ *              for Ethernet hardware addresses and IPv4 protocol addresses
+ *             sip: Sender IP address
+ *             tip: Target IP address
+ *             op:  Operation
+ *             sha: Sender hardware address
+ *             tpa: Target hardware address
+ */
+struct flow_dissector_key_arp {
+       __u32 sip;
+       __u32 tip;
+       __u8 op;
+       unsigned char sha[ETH_ALEN];
+       unsigned char tha[ETH_ALEN];
+};
+
 /**
  * flow_dissector_key_tp_ports:
  *     @ports: port numbers of Transport header
@@ -141,6 +159,7 @@ enum flow_dissector_key_id {
        FLOW_DISSECTOR_KEY_ICMP, /* struct flow_dissector_key_icmp */
        FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
        FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */
+       FLOW_DISSECTOR_KEY_ARP, /* struct flow_dissector_key_arp */
        FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */
        FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
        FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
index 2a1abbf8da74368cd01adc40cef6c0644e059ef2..fcaf8f47913054543e97d606518f78eabf0659e1 100644 (file)
@@ -5,92 +5,14 @@
 #include <linux/slab.h>
 #include <linux/netdevice.h>
 
-struct gro_cell {
-       struct sk_buff_head     napi_skbs;
-       struct napi_struct      napi;
-};
+struct gro_cell;
 
 struct gro_cells {
        struct gro_cell __percpu        *cells;
 };
 
-static inline int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
-{
-       struct gro_cell *cell;
-       struct net_device *dev = skb->dev;
-
-       if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO))
-               return netif_rx(skb);
-
-       cell = this_cpu_ptr(gcells->cells);
-
-       if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
-               atomic_long_inc(&dev->rx_dropped);
-               kfree_skb(skb);
-               return NET_RX_DROP;
-       }
-
-       __skb_queue_tail(&cell->napi_skbs, skb);
-       if (skb_queue_len(&cell->napi_skbs) == 1)
-               napi_schedule(&cell->napi);
-       return NET_RX_SUCCESS;
-}
-
-/* called under BH context */
-static inline int gro_cell_poll(struct napi_struct *napi, int budget)
-{
-       struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
-       struct sk_buff *skb;
-       int work_done = 0;
-
-       while (work_done < budget) {
-               skb = __skb_dequeue(&cell->napi_skbs);
-               if (!skb)
-                       break;
-               napi_gro_receive(napi, skb);
-               work_done++;
-       }
-
-       if (work_done < budget)
-               napi_complete_done(napi, work_done);
-       return work_done;
-}
-
-static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
-{
-       int i;
-
-       gcells->cells = alloc_percpu(struct gro_cell);
-       if (!gcells->cells)
-               return -ENOMEM;
-
-       for_each_possible_cpu(i) {
-               struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
-
-               __skb_queue_head_init(&cell->napi_skbs);
-
-               set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
-
-               netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
-               napi_enable(&cell->napi);
-       }
-       return 0;
-}
-
-static inline void gro_cells_destroy(struct gro_cells *gcells)
-{
-       int i;
-
-       if (!gcells->cells)
-               return;
-       for_each_possible_cpu(i) {
-               struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
-
-               netif_napi_del(&cell->napi);
-               __skb_queue_purge(&cell->napi_skbs);
-       }
-       free_percpu(gcells->cells);
-       gcells->cells = NULL;
-}
+int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb);
+int gro_cells_init(struct gro_cells *gcells, struct net_device *dev);
+void gro_cells_destroy(struct gro_cells *gcells);
 
 #endif
index d0e7e3f8e67ae208de3195f4af9cd740972dee0b..d91f9e7f4d71486f9af01c4b17b074d653d144d1 100644 (file)
 /*
- * Copyright (c) 2003, 2004 David Young.  All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. The name of David Young may not be used to endorse or promote
- *    products derived from this software without specific prior
- *    written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY DAVID YOUNG ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL DAVID
- * YOUNG BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
- * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
- * OF SUCH DAMAGE.
- */
-
-/*
- * Modifications to fit into the linux IEEE 802.11 stack,
- * Mike Kershaw (dragorn@kismetwireless.net)
+ * Copyright (c) 2017          Intel Deutschland GmbH
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
+#ifndef __RADIOTAP_H
+#define __RADIOTAP_H
 
-#ifndef IEEE80211RADIOTAP_H
-#define IEEE80211RADIOTAP_H
-
-#include <linux/if_ether.h>
 #include <linux/kernel.h>
 #include <asm/unaligned.h>
 
-/* Base version of the radiotap packet header data */
-#define PKTHDR_RADIOTAP_VERSION                0
-
-/* A generic radio capture format is desirable. There is one for
- * Linux, but it is neither rigidly defined (there were not even
- * units given for some fields) nor easily extensible.
- *
- * I suggest the following extensible radio capture format. It is
- * based on a bitmap indicating which fields are present.
- *
- * I am trying to describe precisely what the application programmer
- * should expect in the following, and for that reason I tell the
- * units and origin of each measurement (where it applies), or else I
- * use sufficiently weaselly language ("is a monotonically nondecreasing
- * function of...") that I cannot set false expectations for lawyerly
- * readers.
- */
-
-/*
- * The radio capture header precedes the 802.11 header.
- * All data in the header is little endian on all platforms.
+/**
+ * struct ieee82011_radiotap_header - base radiotap header
  */
 struct ieee80211_radiotap_header {
-       u8 it_version;          /* Version 0. Only increases
-                                * for drastic changes,
-                                * introduction of compatible
-                                * new fields does not count.
-                                */
-       u8 it_pad;
-       __le16 it_len;          /* length of the whole
-                                * header in bytes, including
-                                * it_version, it_pad,
-                                * it_len, and data fields.
-                                */
-       __le32 it_present;      /* A bitmap telling which
-                                * fields are present. Set bit 31
-                                * (0x80000000) to extend the
-                                * bitmap by another 32 bits.
-                                * Additional extensions are made
-                                * by setting bit 31.
-                                */
+       /**
+        * @it_version: radiotap version, always 0
+        */
+       uint8_t it_version;
+
+       /**
+        * @it_pad: padding (or alignment)
+        */
+       uint8_t it_pad;
+
+       /**
+        * @it_len: overall radiotap header length
+        */
+       __le16 it_len;
+
+       /**
+        * @it_present: (first) present word
+        */
+       __le32 it_present;
 } __packed;
 
-/* Name                                 Data type    Units
- * ----                                 ---------    -----
- *
- * IEEE80211_RADIOTAP_TSFT              __le64       microseconds
- *
- *      Value in microseconds of the MAC's 64-bit 802.11 Time
- *      Synchronization Function timer when the first bit of the
- *      MPDU arrived at the MAC. For received frames, only.
- *
- * IEEE80211_RADIOTAP_CHANNEL           2 x __le16   MHz, bitmap
- *
- *      Tx/Rx frequency in MHz, followed by flags (see below).
- *
- * IEEE80211_RADIOTAP_FHSS              __le16       see below
- *
- *      For frequency-hopping radios, the hop set (first byte)
- *      and pattern (second byte).
- *
- * IEEE80211_RADIOTAP_RATE              u8           500kb/s
- *
- *      Tx/Rx data rate
- *
- * IEEE80211_RADIOTAP_DBM_ANTSIGNAL     s8           decibels from
- *                                                   one milliwatt (dBm)
- *
- *      RF signal power at the antenna, decibel difference from
- *      one milliwatt.
- *
- * IEEE80211_RADIOTAP_DBM_ANTNOISE      s8           decibels from
- *                                                   one milliwatt (dBm)
- *
- *      RF noise power at the antenna, decibel difference from one
- *      milliwatt.
- *
- * IEEE80211_RADIOTAP_DB_ANTSIGNAL      u8           decibel (dB)
- *
- *      RF signal power at the antenna, decibel difference from an
- *      arbitrary, fixed reference.
- *
- * IEEE80211_RADIOTAP_DB_ANTNOISE       u8           decibel (dB)
- *
- *      RF noise power at the antenna, decibel difference from an
- *      arbitrary, fixed reference point.
- *
- * IEEE80211_RADIOTAP_LOCK_QUALITY      __le16       unitless
- *
- *      Quality of Barker code lock. Unitless. Monotonically
- *      nondecreasing with "better" lock strength. Called "Signal
- *      Quality" in datasheets.  (Is there a standard way to measure
- *      this?)
- *
- * IEEE80211_RADIOTAP_TX_ATTENUATION    __le16       unitless
- *
- *      Transmit power expressed as unitless distance from max
- *      power set at factory calibration.  0 is max power.
- *      Monotonically nondecreasing with lower power levels.
- *
- * IEEE80211_RADIOTAP_DB_TX_ATTENUATION __le16       decibels (dB)
- *
- *      Transmit power expressed as decibel distance from max power
- *      set at factory calibration.  0 is max power.  Monotonically
- *      nondecreasing with lower power levels.
- *
- * IEEE80211_RADIOTAP_DBM_TX_POWER      s8           decibels from
- *                                                   one milliwatt (dBm)
- *
- *      Transmit power expressed as dBm (decibels from a 1 milliwatt
- *      reference). This is the absolute power level measured at
- *      the antenna port.
- *
- * IEEE80211_RADIOTAP_FLAGS             u8           bitmap
- *
- *      Properties of transmitted and received frames. See flags
- *      defined below.
- *
- * IEEE80211_RADIOTAP_ANTENNA           u8           antenna index
- *
- *      Unitless indication of the Rx/Tx antenna for this packet.
- *      The first antenna is antenna 0.
- *
- * IEEE80211_RADIOTAP_RX_FLAGS          __le16       bitmap
- *
- *     Properties of received frames. See flags defined below.
- *
- * IEEE80211_RADIOTAP_TX_FLAGS          __le16       bitmap
- *
- *     Properties of transmitted frames. See flags defined below.
- *
- * IEEE80211_RADIOTAP_RTS_RETRIES       u8           data
- *
- *     Number of rts retries a transmitted frame used.
- *
- * IEEE80211_RADIOTAP_DATA_RETRIES      u8           data
- *
- *     Number of unicast retries a transmitted frame used.
- *
- * IEEE80211_RADIOTAP_MCS      u8, u8, u8              unitless
- *
- *     Contains a bitmap of known fields/flags, the flags, and
- *     the MCS index.
- *
- * IEEE80211_RADIOTAP_AMPDU_STATUS     u32, u16, u8, u8        unitless
- *
- *     Contains the AMPDU information for the subframe.
- *
- * IEEE80211_RADIOTAP_VHT      u16, u8, u8, u8[4], u8, u8, u16
- *
- *     Contains VHT information about this frame.
- *
- * IEEE80211_RADIOTAP_TIMESTAMP                u64, u16, u8, u8        variable
- *
- *     Contains timestamp information for this frame.
- */
-enum ieee80211_radiotap_type {
+/* version is always 0 */
+#define PKTHDR_RADIOTAP_VERSION        0
+
+/* see the radiotap website for the descriptions */
+enum ieee80211_radiotap_presence {
        IEEE80211_RADIOTAP_TSFT = 0,
        IEEE80211_RADIOTAP_FLAGS = 1,
        IEEE80211_RADIOTAP_RATE = 2,
@@ -214,7 +67,7 @@ enum ieee80211_radiotap_type {
        IEEE80211_RADIOTAP_TX_FLAGS = 15,
        IEEE80211_RADIOTAP_RTS_RETRIES = 16,
        IEEE80211_RADIOTAP_DATA_RETRIES = 17,
-
+       /* 18 is XChannel, but it's not defined yet */
        IEEE80211_RADIOTAP_MCS = 19,
        IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
        IEEE80211_RADIOTAP_VHT = 21,
@@ -226,129 +79,135 @@ enum ieee80211_radiotap_type {
        IEEE80211_RADIOTAP_EXT = 31
 };
 
-/* Channel flags. */
-#define        IEEE80211_CHAN_TURBO    0x0010  /* Turbo channel */
-#define        IEEE80211_CHAN_CCK      0x0020  /* CCK channel */
-#define        IEEE80211_CHAN_OFDM     0x0040  /* OFDM channel */
-#define        IEEE80211_CHAN_2GHZ     0x0080  /* 2 GHz spectrum channel. */
-#define        IEEE80211_CHAN_5GHZ     0x0100  /* 5 GHz spectrum channel */
-#define        IEEE80211_CHAN_PASSIVE  0x0200  /* Only passive scan allowed */
-#define        IEEE80211_CHAN_DYN      0x0400  /* Dynamic CCK-OFDM channel */
-#define        IEEE80211_CHAN_GFSK     0x0800  /* GFSK channel (FHSS PHY) */
-#define        IEEE80211_CHAN_GSM      0x1000  /* GSM (900 MHz) */
-#define        IEEE80211_CHAN_STURBO   0x2000  /* Static Turbo */
-#define        IEEE80211_CHAN_HALF     0x4000  /* Half channel (10 MHz wide) */
-#define        IEEE80211_CHAN_QUARTER  0x8000  /* Quarter channel (5 MHz wide) */
-
-/* For IEEE80211_RADIOTAP_FLAGS */
-#define        IEEE80211_RADIOTAP_F_CFP        0x01    /* sent/received
-                                                * during CFP
-                                                */
-#define        IEEE80211_RADIOTAP_F_SHORTPRE   0x02    /* sent/received
-                                                * with short
-                                                * preamble
-                                                */
-#define        IEEE80211_RADIOTAP_F_WEP        0x04    /* sent/received
-                                                * with WEP encryption
-                                                */
-#define        IEEE80211_RADIOTAP_F_FRAG       0x08    /* sent/received
-                                                * with fragmentation
-                                                */
-#define        IEEE80211_RADIOTAP_F_FCS        0x10    /* frame includes FCS */
-#define        IEEE80211_RADIOTAP_F_DATAPAD    0x20    /* frame has padding between
-                                                * 802.11 header and payload
-                                                * (to 32-bit boundary)
-                                                */
-#define IEEE80211_RADIOTAP_F_BADFCS    0x40    /* bad FCS */
-
-/* For IEEE80211_RADIOTAP_RX_FLAGS */
-#define IEEE80211_RADIOTAP_F_RX_BADPLCP        0x0002  /* frame has bad PLCP */
+/* for IEEE80211_RADIOTAP_FLAGS */
+enum ieee80211_radiotap_flags {
+       IEEE80211_RADIOTAP_F_CFP = 0x01,
+       IEEE80211_RADIOTAP_F_SHORTPRE = 0x02,
+       IEEE80211_RADIOTAP_F_WEP = 0x04,
+       IEEE80211_RADIOTAP_F_FRAG = 0x08,
+       IEEE80211_RADIOTAP_F_FCS = 0x10,
+       IEEE80211_RADIOTAP_F_DATAPAD = 0x20,
+       IEEE80211_RADIOTAP_F_BADFCS = 0x40,
+};
 
-/* For IEEE80211_RADIOTAP_TX_FLAGS */
-#define IEEE80211_RADIOTAP_F_TX_FAIL   0x0001  /* failed due to excessive
-                                                * retries */
-#define IEEE80211_RADIOTAP_F_TX_CTS    0x0002  /* used cts 'protection' */
-#define IEEE80211_RADIOTAP_F_TX_RTS    0x0004  /* used rts/cts handshake */
-#define IEEE80211_RADIOTAP_F_TX_NOACK  0x0008  /* don't expect an ack */
+/* for IEEE80211_RADIOTAP_CHANNEL */
+enum ieee80211_radiotap_channel_flags {
+       IEEE80211_CHAN_CCK = 0x0020,
+       IEEE80211_CHAN_OFDM = 0x0040,
+       IEEE80211_CHAN_2GHZ = 0x0080,
+       IEEE80211_CHAN_5GHZ = 0x0100,
+       IEEE80211_CHAN_DYN = 0x0400,
+       IEEE80211_CHAN_HALF = 0x4000,
+       IEEE80211_CHAN_QUARTER = 0x8000,
+};
 
+/* for IEEE80211_RADIOTAP_RX_FLAGS */
+enum ieee80211_radiotap_rx_flags {
+       IEEE80211_RADIOTAP_F_RX_BADPLCP = 0x0002,
+};
 
-/* For IEEE80211_RADIOTAP_MCS */
-#define IEEE80211_RADIOTAP_MCS_HAVE_BW         0x01
-#define IEEE80211_RADIOTAP_MCS_HAVE_MCS                0x02
-#define IEEE80211_RADIOTAP_MCS_HAVE_GI         0x04
-#define IEEE80211_RADIOTAP_MCS_HAVE_FMT                0x08
-#define IEEE80211_RADIOTAP_MCS_HAVE_FEC                0x10
-#define IEEE80211_RADIOTAP_MCS_HAVE_STBC       0x20
+/* for IEEE80211_RADIOTAP_TX_FLAGS */
+enum ieee80211_radiotap_tx_flags {
+       IEEE80211_RADIOTAP_F_TX_FAIL = 0x0001,
+       IEEE80211_RADIOTAP_F_TX_CTS = 0x0002,
+       IEEE80211_RADIOTAP_F_TX_RTS = 0x0004,
+       IEEE80211_RADIOTAP_F_TX_NOACK = 0x0008,
+};
 
-#define IEEE80211_RADIOTAP_MCS_BW_MASK         0x03
-#define                IEEE80211_RADIOTAP_MCS_BW_20    0
-#define                IEEE80211_RADIOTAP_MCS_BW_40    1
-#define                IEEE80211_RADIOTAP_MCS_BW_20L   2
-#define                IEEE80211_RADIOTAP_MCS_BW_20U   3
-#define IEEE80211_RADIOTAP_MCS_SGI             0x04
-#define IEEE80211_RADIOTAP_MCS_FMT_GF          0x08
-#define IEEE80211_RADIOTAP_MCS_FEC_LDPC                0x10
-#define IEEE80211_RADIOTAP_MCS_STBC_MASK       0x60
-#define                IEEE80211_RADIOTAP_MCS_STBC_1   1
-#define                IEEE80211_RADIOTAP_MCS_STBC_2   2
-#define                IEEE80211_RADIOTAP_MCS_STBC_3   3
+/* for IEEE80211_RADIOTAP_MCS "have" flags */
+enum ieee80211_radiotap_mcs_have {
+       IEEE80211_RADIOTAP_MCS_HAVE_BW = 0x01,
+       IEEE80211_RADIOTAP_MCS_HAVE_MCS = 0x02,
+       IEEE80211_RADIOTAP_MCS_HAVE_GI = 0x04,
+       IEEE80211_RADIOTAP_MCS_HAVE_FMT = 0x08,
+       IEEE80211_RADIOTAP_MCS_HAVE_FEC = 0x10,
+       IEEE80211_RADIOTAP_MCS_HAVE_STBC = 0x20,
+};
 
-#define IEEE80211_RADIOTAP_MCS_STBC_SHIFT      5
+enum ieee80211_radiotap_mcs_flags {
+       IEEE80211_RADIOTAP_MCS_BW_MASK = 0x03,
+       IEEE80211_RADIOTAP_MCS_BW_20 = 0,
+       IEEE80211_RADIOTAP_MCS_BW_40 = 1,
+       IEEE80211_RADIOTAP_MCS_BW_20L = 2,
+       IEEE80211_RADIOTAP_MCS_BW_20U = 3,
+
+       IEEE80211_RADIOTAP_MCS_SGI = 0x04,
+       IEEE80211_RADIOTAP_MCS_FMT_GF = 0x08,
+       IEEE80211_RADIOTAP_MCS_FEC_LDPC = 0x10,
+       IEEE80211_RADIOTAP_MCS_STBC_MASK = 0x60,
+       IEEE80211_RADIOTAP_MCS_STBC_1 = 1,
+       IEEE80211_RADIOTAP_MCS_STBC_2 = 2,
+       IEEE80211_RADIOTAP_MCS_STBC_3 = 3,
+       IEEE80211_RADIOTAP_MCS_STBC_SHIFT = 5,
+};
 
-/* For IEEE80211_RADIOTAP_AMPDU_STATUS */
-#define IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN                0x0001
-#define IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN            0x0002
-#define IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN            0x0004
-#define IEEE80211_RADIOTAP_AMPDU_IS_LAST               0x0008
-#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR         0x0010
-#define IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN       0x0020
+/* for IEEE80211_RADIOTAP_AMPDU_STATUS */
+enum ieee80211_radiotap_ampdu_flags {
+       IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN = 0x0001,
+       IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN = 0x0002,
+       IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN = 0x0004,
+       IEEE80211_RADIOTAP_AMPDU_IS_LAST = 0x0008,
+       IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR = 0x0010,
+       IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN = 0x0020,
+};
 
-/* For IEEE80211_RADIOTAP_VHT */
-#define IEEE80211_RADIOTAP_VHT_KNOWN_STBC                      0x0001
-#define IEEE80211_RADIOTAP_VHT_KNOWN_TXOP_PS_NA                        0x0002
-#define IEEE80211_RADIOTAP_VHT_KNOWN_GI                                0x0004
-#define IEEE80211_RADIOTAP_VHT_KNOWN_SGI_NSYM_DIS              0x0008
-#define IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM       0x0010
-#define IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED                        0x0020
-#define IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH                 0x0040
-#define IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID                  0x0080
-#define IEEE80211_RADIOTAP_VHT_KNOWN_PARTIAL_AID               0x0100
+/* for IEEE80211_RADIOTAP_VHT */
+enum ieee80211_radiotap_vht_known {
+       IEEE80211_RADIOTAP_VHT_KNOWN_STBC = 0x0001,
+       IEEE80211_RADIOTAP_VHT_KNOWN_TXOP_PS_NA = 0x0002,
+       IEEE80211_RADIOTAP_VHT_KNOWN_GI = 0x0004,
+       IEEE80211_RADIOTAP_VHT_KNOWN_SGI_NSYM_DIS = 0x0008,
+       IEEE80211_RADIOTAP_VHT_KNOWN_LDPC_EXTRA_OFDM_SYM = 0x0010,
+       IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED = 0x0020,
+       IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH = 0x0040,
+       IEEE80211_RADIOTAP_VHT_KNOWN_GROUP_ID = 0x0080,
+       IEEE80211_RADIOTAP_VHT_KNOWN_PARTIAL_AID = 0x0100,
+};
 
-#define IEEE80211_RADIOTAP_VHT_FLAG_STBC                       0x01
-#define IEEE80211_RADIOTAP_VHT_FLAG_TXOP_PS_NA                 0x02
-#define IEEE80211_RADIOTAP_VHT_FLAG_SGI                                0x04
-#define IEEE80211_RADIOTAP_VHT_FLAG_SGI_NSYM_M10_9             0x08
-#define IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM                0x10
-#define IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED                 0x20
+enum ieee80211_radiotap_vht_flags {
+       IEEE80211_RADIOTAP_VHT_FLAG_STBC = 0x01,
+       IEEE80211_RADIOTAP_VHT_FLAG_TXOP_PS_NA = 0x02,
+       IEEE80211_RADIOTAP_VHT_FLAG_SGI = 0x04,
+       IEEE80211_RADIOTAP_VHT_FLAG_SGI_NSYM_M10_9 = 0x08,
+       IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM = 0x10,
+       IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED = 0x20,
+};
 
-#define IEEE80211_RADIOTAP_CODING_LDPC_USER0                   0x01
-#define IEEE80211_RADIOTAP_CODING_LDPC_USER1                   0x02
-#define IEEE80211_RADIOTAP_CODING_LDPC_USER2                   0x04
-#define IEEE80211_RADIOTAP_CODING_LDPC_USER3                   0x08
+enum ieee80211_radiotap_vht_coding {
+       IEEE80211_RADIOTAP_CODING_LDPC_USER0 = 0x01,
+       IEEE80211_RADIOTAP_CODING_LDPC_USER1 = 0x02,
+       IEEE80211_RADIOTAP_CODING_LDPC_USER2 = 0x04,
+       IEEE80211_RADIOTAP_CODING_LDPC_USER3 = 0x08,
+};
 
-/* For IEEE80211_RADIOTAP_TIMESTAMP */
-#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK                 0x000F
-#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MS                   0x0000
-#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US                   0x0001
-#define IEEE80211_RADIOTAP_TIMESTAMP_UNIT_NS                   0x0003
-#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_MASK                 0x00F0
-#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_BEGIN_MDPU           0x0000
-#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ         0x0010
-#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_PPDU              0x0020
-#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU              0x0030
-#define IEEE80211_RADIOTAP_TIMESTAMP_SPOS_UNKNOWN              0x00F0
+/* for IEEE80211_RADIOTAP_TIMESTAMP */
+enum ieee80211_radiotap_timestamp_unit_spos {
+       IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MASK = 0x000F,
+       IEEE80211_RADIOTAP_TIMESTAMP_UNIT_MS = 0x0000,
+       IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US = 0x0001,
+       IEEE80211_RADIOTAP_TIMESTAMP_UNIT_NS = 0x0003,
+       IEEE80211_RADIOTAP_TIMESTAMP_SPOS_MASK = 0x00F0,
+       IEEE80211_RADIOTAP_TIMESTAMP_SPOS_BEGIN_MDPU = 0x0000,
+       IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ = 0x0010,
+       IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_PPDU = 0x0020,
+       IEEE80211_RADIOTAP_TIMESTAMP_SPOS_EO_MPDU = 0x0030,
+       IEEE80211_RADIOTAP_TIMESTAMP_SPOS_UNKNOWN = 0x00F0,
+};
 
-#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT                        0x00
-#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT                        0x01
-#define IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY             0x02
+enum ieee80211_radiotap_timestamp_flags {
+       IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT = 0x00,
+       IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT = 0x01,
+       IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY = 0x02,
+};
 
-/* helpers */
-static inline int ieee80211_get_radiotap_len(unsigned char *data)
+/**
+ * ieee80211_get_radiotap_len - get radiotap header length
+ */
+static inline u16 ieee80211_get_radiotap_len(const char *data)
 {
-       struct ieee80211_radiotap_header *hdr =
-               (struct ieee80211_radiotap_header *)data;
+       struct ieee80211_radiotap_header *hdr = (void *)data;
 
        return get_unaligned_le16(&hdr->it_len);
 }
 
-#endif                         /* IEEE80211_RADIOTAP_H */
+#endif /* __RADIOTAP_H */
index 0fa4c324b71391bd5c50fbe355e23ff4e59a691a..f656f9051acafa8026d391f32b026562e24d8c4e 100644 (file)
@@ -205,7 +205,6 @@ struct inet6_dev {
        __s32                   rs_interval;    /* in jiffies */
        __u8                    rs_probes;
 
-       __u8                    addr_gen_mode;
        unsigned long           tstamp; /* ipv6InterfaceTable update timestamp */
        struct rcu_head         rcu;
 };
diff --git a/include/net/ife.h b/include/net/ife.h
new file mode 100644 (file)
index 0000000..2d87d68
--- /dev/null
@@ -0,0 +1,51 @@
+#ifndef __NET_IFE_H
+#define __NET_IFE_H
+
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+#include <uapi/linux/ife.h>
+
+#if IS_ENABLED(CONFIG_NET_IFE)
+
+void *ife_encode(struct sk_buff *skb, u16 metalen);
+void *ife_decode(struct sk_buff *skb, u16 *metalen);
+
+void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen);
+int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
+                       const void *dval);
+
+void *ife_tlv_meta_next(void *skbdata);
+
+#else
+
+static inline void *ife_encode(struct sk_buff *skb, u16 metalen)
+{
+       return NULL;
+}
+
+static inline void *ife_decode(struct sk_buff *skb, u16 *metalen)
+{
+       return NULL;
+}
+
+static inline void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen,
+                                       u16 *totlen)
+{
+       return NULL;
+}
+
+static inline int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
+                       const void *dval)
+{
+       return 0;
+}
+
+static inline void *ife_tlv_meta_next(void *skbdata)
+{
+       return NULL;
+}
+
+#endif
+
+#endif /* __NET_IFE_H */
index 3212b39b5bfcb543fff0f1cd174f2124e3b9cc45..8ec87b62257b3189bd3c9308266b92df1a2fb6dc 100644 (file)
 
 #include <linux/types.h>
 
-struct inet_bind_bucket;
 struct request_sock;
 struct sk_buff;
 struct sock;
 struct sockaddr;
 
-int inet6_csk_bind_conflict(const struct sock *sk,
-                           const struct inet_bind_bucket *tb, bool relax,
-                           bool soreuseport_ok);
-
 struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
                                      const struct request_sock *req, u8 proto);
 
index 5d683428fced65af56d83ab584bbbee1c089e8f0..b7952d55b9c00039a9eca46544997c10722682b6 100644 (file)
@@ -17,7 +17,7 @@ int inet_release(struct socket *sock);
 int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
                        int addr_len, int flags);
 int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
-                         int addr_len, int flags);
+                         int addr_len, int flags, int is_sendmsg);
 int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
                       int addr_len, int flags);
 int inet_accept(struct socket *sock, struct socket *newsock, int flags);
index 85ee3879499ebc4ebd63a59b2c425918858154c6..826f198374f809a4b7ca23ada4a46433b972ef35 100644 (file)
@@ -62,9 +62,6 @@ struct inet_connection_sock_af_ops {
                                char __user *optval, int __user *optlen);
 #endif
        void        (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
-       int         (*bind_conflict)(const struct sock *sk,
-                                    const struct inet_bind_bucket *tb,
-                                    bool relax, bool soreuseport_ok);
        void        (*mtu_reduced)(struct sock *sk);
 };
 
@@ -144,6 +141,7 @@ struct inet_connection_sock {
 #define ICSK_TIME_PROBE0       3       /* Zero window probe timer */
 #define ICSK_TIME_EARLY_RETRANS 4      /* Early retransmit timer */
 #define ICSK_TIME_LOSS_PROBE   5       /* Tail loss probe timer */
+#define ICSK_TIME_REO_TIMEOUT  6       /* Reordering timer */
 
 static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
 {
@@ -234,7 +232,8 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
        }
 
        if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
-           what == ICSK_TIME_EARLY_RETRANS || what ==  ICSK_TIME_LOSS_PROBE) {
+           what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE ||
+           what == ICSK_TIME_REO_TIMEOUT) {
                icsk->icsk_pending = what;
                icsk->icsk_timeout = jiffies + when;
                sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
@@ -261,9 +260,6 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
 
 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
 
-int inet_csk_bind_conflict(const struct sock *sk,
-                          const struct inet_bind_bucket *tb, bool relax,
-                          bool soreuseport_ok);
 int inet_csk_get_port(struct sock *sk, unsigned short snum);
 
 struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
index 909972aa3acd7e4e3d865800f45056235efc7bef..5894730ec82a9fba3b9bca160de6e0db3a73024e 100644 (file)
@@ -164,13 +164,7 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
 
 static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
 {
-       unsigned int res;
-
-       local_bh_disable();
-       res = percpu_counter_sum_positive(&nf->mem);
-       local_bh_enable();
-
-       return res;
+       return percpu_counter_sum_positive(&nf->mem);
 }
 
 /* RFC 3168 support :
index 0574493e38993e7a0675b0bf6e3b9b58b5552f18..1178931288cbfc4d32ef5868ec11d98b2ba8df7d 100644 (file)
@@ -74,13 +74,21 @@ struct inet_ehash_bucket {
  * users logged onto your box, isn't it nice to know that new data
  * ports are created in O(1) time?  I thought so. ;-)  -DaveM
  */
+#define FASTREUSEPORT_ANY      1
+#define FASTREUSEPORT_STRICT   2
+
 struct inet_bind_bucket {
        possible_net_t          ib_net;
        unsigned short          port;
        signed char             fastreuse;
        signed char             fastreuseport;
        kuid_t                  fastuid;
-       int                     num_owners;
+#if IS_ENABLED(CONFIG_IPV6)
+       struct in6_addr         fast_v6_rcv_saddr;
+#endif
+       __be32                  fast_rcv_saddr;
+       unsigned short          fast_sk_family;
+       bool                    fast_ipv6_only;
        struct hlist_node       node;
        struct hlist_head       owners;
 };
@@ -203,10 +211,7 @@ void inet_hashinfo_init(struct inet_hashinfo *h);
 
 bool inet_ehash_insert(struct sock *sk, struct sock *osk);
 bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
-int __inet_hash(struct sock *sk, struct sock *osk,
-               int (*saddr_same)(const struct sock *sk1,
-                                 const struct sock *sk2,
-                                 bool match_wildcard));
+int __inet_hash(struct sock *sk, struct sock *osk);
 int inet_hash(struct sock *sk);
 void inet_unhash(struct sock *sk);
 
index c9cff977a7fb2cc9d8446715cc09a51e5090aff9..aa95053dfc78d35d04aef276e2a5dce7343f72a0 100644 (file)
@@ -206,7 +206,11 @@ struct inet_sock {
                                transparent:1,
                                mc_all:1,
                                nodefrag:1;
-       __u8                    bind_address_no_port:1;
+       __u8                    bind_address_no_port:1,
+                               defer_connect:1; /* Indicates that fastopen_connect is set
+                                                 * and cookie exists so we defer connect
+                                                 * until first data frame is written
+                                                 */
        __u8                    rcv_tos;
        __u8                    convert_csum;
        int                     uc_index;
index c9b3eb70f340d48ffe60105622bee367c7ea848f..6a75d67a30fd80d15e40e86b59d6216da5e94989 100644 (file)
 
 #include <linux/atomic.h>
 
-struct inet_hashinfo;
-
-struct inet_timewait_death_row {
-       atomic_t                tw_count;
-
-       struct inet_hashinfo    *hashinfo ____cacheline_aligned_in_smp;
-       int                     sysctl_tw_recycle;
-       int                     sysctl_max_tw_buckets;
-};
-
 struct inet_bind_bucket;
 
 /*
@@ -125,8 +115,7 @@ static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo
 
 void inet_twsk_deschedule_put(struct inet_timewait_sock *tw);
 
-void inet_twsk_purge(struct inet_hashinfo *hashinfo,
-                    struct inet_timewait_death_row *twdr, int family);
+void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family);
 
 static inline
 struct net *twsk_net(const struct inet_timewait_sock *twsk)
index ab6761a7c883a756583f570dc861af1e1d30e67f..bf264a8db1ce3b1b29b9a5cc9732df323154e7e5 100644 (file)
@@ -263,11 +263,21 @@ static inline bool sysctl_dev_name_is_allowed(const char *name)
        return strcmp(name, "default") != 0  && strcmp(name, "all") != 0;
 }
 
+static inline int inet_prot_sock(struct net *net)
+{
+       return net->ipv4.sysctl_ip_prot_sock;
+}
+
 #else
 static inline int inet_is_local_reserved_port(struct net *net, int port)
 {
        return 0;
 }
+
+static inline int inet_prot_sock(struct net *net)
+{
+       return PROT_SOCK;
+}
 #endif
 
 __be32 inet_current_timestamp(void);
index a74e2aa40ef42d6e7edb917890164cce9f0fa835..c979c878df1c0923e803e73ad426a90fbe5d0668 100644 (file)
@@ -37,7 +37,9 @@ struct fib6_config {
        int             fc_ifindex;
        u32             fc_flags;
        u32             fc_protocol;
-       u32             fc_type;        /* only 8 bits are used */
+       u16             fc_type;        /* only 8 bits are used */
+       u16             fc_delete_all_nh : 1,
+                       __unused : 15;
 
        struct in6_addr fc_dst;
        struct in6_addr fc_src;
index 5f376af377c79bae8fdc02775938ddec38bd5f46..368bb4024b78c411d02a842f340f9fa11a9b5f7e 100644 (file)
@@ -211,14 +211,22 @@ struct fib_entry_notifier_info {
        u8 tos;
        u8 type;
        u32 tb_id;
-       u32 nlflags;
+};
+
+struct fib_nh_notifier_info {
+       struct fib_notifier_info info; /* must be first */
+       struct fib_nh *fib_nh;
 };
 
 enum fib_event_type {
+       FIB_EVENT_ENTRY_REPLACE,
+       FIB_EVENT_ENTRY_APPEND,
        FIB_EVENT_ENTRY_ADD,
        FIB_EVENT_ENTRY_DEL,
        FIB_EVENT_RULE_ADD,
        FIB_EVENT_RULE_DEL,
+       FIB_EVENT_NH_ADD,
+       FIB_EVENT_NH_DEL,
 };
 
 int register_fib_notifier(struct notifier_block *nb,
@@ -344,7 +352,6 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb);
 int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
                        u8 tos, int oif, struct net_device *dev,
                        struct in_device *idev, u32 *itag);
-void fib_select_default(const struct flowi4 *flp, struct fib_result *res);
 #ifdef CONFIG_IP_ROUTE_CLASSID
 static inline int fib_num_tclassid_users(struct net *net)
 {
index e893fe43dd139d827cd587c814c1cb0cd0a9c8fe..95056796657cf9b11c24ae1497f1d5c01a9fd178 100644 (file)
@@ -58,6 +58,7 @@ struct ip_tunnel_key {
 /* Flags for ip_tunnel_info mode. */
 #define IP_TUNNEL_INFO_TX      0x01    /* represents tx tunnel parameters */
 #define IP_TUNNEL_INFO_IPV6    0x02    /* key contains IPv6 addresses */
+#define IP_TUNNEL_INFO_BRIDGE  0x04    /* represents a bridged tunnel id */
 
 /* Maximum tunnel options length. */
 #define IP_TUNNEL_OPTS_MAX                                     \
@@ -261,8 +262,8 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
 int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
 
-struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
-                                               struct rtnl_link_stats64 *tot);
+void ip_tunnel_get_stats64(struct net_device *dev,
+                          struct rtnl_link_stats64 *tot);
 struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
                                   int link, __be16 flags,
                                   __be32 remote, __be32 local,
index cd6018a9ee2467cec1fbe4d621fc294bc843a05e..7bdfa7d783639d8b65c18bd7f5a6ea5fa4fbb7da 100644 (file)
@@ -1421,7 +1421,7 @@ static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
 
 static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
 {
-       if (atomic_dec_return(&dest->refcnt) < 0)
+       if (atomic_dec_and_test(&dest->refcnt))
                kfree(dest);
 }
 
@@ -1554,10 +1554,12 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
        struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
        if (!ct || !nf_ct_is_untracked(ct)) {
-               nf_conntrack_put(skb->nfct);
-               skb->nfct = &nf_ct_untracked_get()->ct_general;
-               skb->nfctinfo = IP_CT_NEW;
-               nf_conntrack_get(skb->nfct);
+               struct nf_conn *untracked;
+
+               nf_conntrack_put(&ct->ct_general);
+               untracked = nf_ct_untracked_get();
+               nf_conntrack_get(&untracked->ct_general);
+               nf_ct_set(skb, untracked, IP_CT_NEW);
        }
 #endif
 }
index e0f4109e64c6fca9ba87d768c2c7b1220a6557f4..2509728650bd79f8caabe6de6342c04eae0dd490 100644 (file)
@@ -505,25 +505,8 @@ static inline int iwe_stream_event_len_adjust(struct iw_request_info *info,
 /*
  * Wrapper to add an Wireless Event to a stream of events.
  */
-static inline char *
-iwe_stream_add_event(struct iw_request_info *info, char *stream, char *ends,
-                    struct iw_event *iwe, int event_len)
-{
-       int lcp_len = iwe_stream_lcp_len(info);
-
-       event_len = iwe_stream_event_len_adjust(info, event_len);
-
-       /* Check if it's possible */
-       if(likely((stream + event_len) < ends)) {
-               iwe->len = event_len;
-               /* Beware of alignement issues on 64 bits */
-               memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN);
-               memcpy(stream + lcp_len, &iwe->u,
-                      event_len - lcp_len);
-               stream += event_len;
-       }
-       return stream;
-}
+char *iwe_stream_add_event(struct iw_request_info *info, char *stream,
+                          char *ends, struct iw_event *iwe, int event_len);
 
 static inline char *
 iwe_stream_add_event_check(struct iw_request_info *info, char *stream,
@@ -541,26 +524,8 @@ iwe_stream_add_event_check(struct iw_request_info *info, char *stream,
  * Wrapper to add an short Wireless Event containing a pointer to a
  * stream of events.
  */
-static inline char *
-iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends,
-                    struct iw_event *iwe, char *extra)
-{
-       int event_len = iwe_stream_point_len(info) + iwe->u.data.length;
-       int point_len = iwe_stream_point_len(info);
-       int lcp_len   = iwe_stream_lcp_len(info);
-
-       /* Check if it's possible */
-       if(likely((stream + event_len) < ends)) {
-               iwe->len = event_len;
-               memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN);
-               memcpy(stream + lcp_len,
-                      ((char *) &iwe->u) + IW_EV_POINT_OFF,
-                      IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
-               memcpy(stream + point_len, extra, iwe->u.data.length);
-               stream += event_len;
-       }
-       return stream;
-}
+char *iwe_stream_add_point(struct iw_request_info *info, char *stream,
+                          char *ends, struct iw_event *iwe, char *extra);
 
 static inline char *
 iwe_stream_add_point_check(struct iw_request_info *info, char *stream,
@@ -579,25 +544,8 @@ iwe_stream_add_point_check(struct iw_request_info *info, char *stream,
  * Be careful, this one is tricky to use properly :
  * At the first run, you need to have (value = event + IW_EV_LCP_LEN).
  */
-static inline char *
-iwe_stream_add_value(struct iw_request_info *info, char *event, char *value,
-                    char *ends, struct iw_event *iwe, int event_len)
-{
-       int lcp_len = iwe_stream_lcp_len(info);
-
-       /* Don't duplicate LCP */
-       event_len -= IW_EV_LCP_LEN;
-
-       /* Check if it's possible */
-       if(likely((value + event_len) < ends)) {
-               /* Add new value */
-               memcpy(value, &iwe->u, event_len);
-               value += event_len;
-               /* Patch LCP */
-               iwe->len = value - event;
-               memcpy(event, (char *) iwe, lcp_len);
-       }
-       return value;
-}
+char *iwe_stream_add_value(struct iw_request_info *info, char *event,
+                          char *value, char *ends, struct iw_event *iwe,
+                          int event_len);
 
 #endif /* _IW_HANDLER_H */
index 0388b9c5f5e2c7257cc0eb19be469974becab5ed..ebfe237aad7e0fb4c9d165a0090708134768c239 100644 (file)
@@ -33,7 +33,7 @@ struct lwtunnel_state {
 };
 
 struct lwtunnel_encap_ops {
-       int (*build_state)(struct net_device *dev, struct nlattr *encap,
+       int (*build_state)(struct nlattr *encap,
                           unsigned int family, const void *cfg,
                           struct lwtunnel_state **ts);
        void (*destroy_state)(struct lwtunnel_state *lws);
@@ -109,7 +109,7 @@ int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
                           unsigned int num);
 int lwtunnel_valid_encap_type(u16 encap_type);
 int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len);
-int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+int lwtunnel_build_state(u16 encap_type,
                         struct nlattr *encap,
                         unsigned int family, const void *cfg,
                         struct lwtunnel_state **lws);
@@ -184,7 +184,7 @@ static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
        return 0;
 }
 
-static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+static inline int lwtunnel_build_state(u16 encap_type,
                                       struct nlattr *encap,
                                       unsigned int family, const void *cfg,
                                       struct lwtunnel_state **lws)
index 5345d358a510ce63d5c13fd48cfd7f9284405768..b9a08cd1d97d8486294197ccd72d3d4538f5e7ca 100644 (file)
@@ -147,7 +147,6 @@ enum ieee80211_ac_numbers {
        IEEE80211_AC_BE         = 2,
        IEEE80211_AC_BK         = 3,
 };
-#define IEEE80211_NUM_ACS      4
 
 /**
  * struct ieee80211_tx_queue_params - transmit queue configuration
@@ -1018,7 +1017,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_DECRYPTED: This frame was decrypted in hardware.
  * @RX_FLAG_MMIC_STRIPPED: the Michael MIC is stripped off this frame,
  *     verification has been done by the hardware.
- * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame.
+ * @RX_FLAG_IV_STRIPPED: The IV and ICV are stripped from this frame.
  *     If this flag is set, the stack cannot do any replay detection
  *     hence the driver or hardware will have to do that.
  * @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this
@@ -1089,6 +1088,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_ALLOW_SAME_PN: Allow the same PN as same packet before.
  *     This is used for AMSDU subframes which can have the same PN as
  *     the first subframe.
+ * @RX_FLAG_ICV_STRIPPED: The ICV is stripped from this frame. CRC checking must
+ *     be done in the hardware.
  */
 enum mac80211_rx_flags {
        RX_FLAG_MMIC_ERROR              = BIT(0),
@@ -1124,6 +1125,7 @@ enum mac80211_rx_flags {
        RX_FLAG_RADIOTAP_VENDOR_DATA    = BIT(31),
        RX_FLAG_MIC_STRIPPED            = BIT_ULL(32),
        RX_FLAG_ALLOW_SAME_PN           = BIT_ULL(33),
+       RX_FLAG_ICV_STRIPPED            = BIT_ULL(34),
 };
 
 #define RX_FLAG_STBC_SHIFT             26
@@ -1766,15 +1768,6 @@ struct ieee80211_sta_rates {
  * @max_amsdu_subframes: indicates the maximal number of MSDUs in a single
  *     A-MSDU. Taken from the Extended Capabilities element. 0 means
  *     unlimited.
- * @max_amsdu_len: indicates the maximal length of an A-MSDU in bytes. This
- *     field is always valid for packets with a VHT preamble. For packets
- *     with a HT preamble, additional limits apply:
- *             + If the skb is transmitted as part of a BA agreement, the
- *               A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
- *             + If the skb is not part of a BA aggreement, the A-MSDU maximal
- *               size is min(max_amsdu_len, 7935) bytes.
- *     Both additional HT limits must be enforced by the low level driver.
- *     This is defined by the spec (IEEE 802.11-2012 section 8.3.2.2 NOTE 2).
  * @support_p2p_ps: indicates whether the STA supports P2P PS mechanism or not.
  * @max_rc_amsdu_len: Maximum A-MSDU size in bytes recommended by rate control.
  * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
@@ -1797,6 +1790,22 @@ struct ieee80211_sta {
        bool tdls_initiator;
        bool mfp;
        u8 max_amsdu_subframes;
+
+       /**
+        * @max_amsdu_len:
+        * indicates the maximal length of an A-MSDU in bytes.
+        * This field is always valid for packets with a VHT preamble.
+        * For packets with a HT preamble, additional limits apply:
+        *
+        * * If the skb is transmitted as part of a BA agreement, the
+        *   A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
+        * * If the skb is not part of a BA aggreement, the A-MSDU maximal
+        *   size is min(max_amsdu_len, 7935) bytes.
+        *
+        * Both additional HT limits must be enforced by the low level
+        * driver. This is defined by the spec (IEEE 802.11-2012 section
+        * 8.3.2.2 NOTE 2).
+        */
        u16 max_amsdu_len;
        bool support_p2p_ps;
        u16 max_rc_amsdu_len;
@@ -3201,26 +3210,6 @@ enum ieee80211_reconfig_type {
  *     Returns non-zero if this device sent the last beacon.
  *     The callback can sleep.
  *
- * @ampdu_action: Perform a certain A-MPDU action
- *     The RA/TID combination determines the destination and TID we want
- *     the ampdu action to be performed for. The action is defined through
- *     ieee80211_ampdu_mlme_action.
- *     When the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL the driver
- *     may neither send aggregates containing more subframes than @buf_size
- *     nor send aggregates in a way that lost frames would exceed the
- *     buffer size. If just limiting the aggregate size, this would be
- *     possible with a buf_size of 8:
- *      - TX: 1.....7
- *      - RX:  2....7 (lost frame #1)
- *      - TX:        8..1...
- *     which is invalid since #1 was now re-transmitted well past the
- *     buffer size of 8. Correct ways to retransmit #1 would be:
- *      - TX:       1 or 18 or 81
- *     Even "189" would be wrong since 1 could be lost again.
- *
- *     Returns a negative error code on failure.
- *     The callback can sleep.
- *
  * @get_survey: Return per-channel survey information
  *
  * @rfkill_poll: Poll rfkill hardware state. If you need this, you also
@@ -3573,6 +3562,35 @@ struct ieee80211_ops {
                           s64 offset);
        void (*reset_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
        int (*tx_last_beacon)(struct ieee80211_hw *hw);
+
+       /**
+        * @ampdu_action:
+        * Perform a certain A-MPDU action.
+        * The RA/TID combination determines the destination and TID we want
+        * the ampdu action to be performed for. The action is defined through
+        * ieee80211_ampdu_mlme_action.
+        * When the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL the driver
+        * may neither send aggregates containing more subframes than @buf_size
+        * nor send aggregates in a way that lost frames would exceed the
+        * buffer size. If just limiting the aggregate size, this would be
+        * possible with a buf_size of 8:
+        *
+        * - ``TX: 1.....7``
+        * - ``RX:  2....7`` (lost frame #1)
+        * - ``TX:        8..1...``
+        *
+        * which is invalid since #1 was now re-transmitted well past the
+        * buffer size of 8. Correct ways to retransmit #1 would be:
+        *
+        * - ``TX:        1   or``
+        * - ``TX:        18  or``
+        * - ``TX:        81``
+        *
+        * Even ``189`` would be wrong since 1 could be lost again.
+        *
+        * Returns a negative error code on failure.
+        * The callback can sleep.
+        */
        int (*ampdu_action)(struct ieee80211_hw *hw,
                            struct ieee80211_vif *vif,
                            struct ieee80211_ampdu_params *params);
@@ -5260,6 +5278,7 @@ void ieee80211_resume_disconnect(struct ieee80211_vif *vif);
  *
  * @vif: &struct ieee80211_vif pointer from the add_interface callback.
  * @rssi_event: the RSSI trigger event type
+ * @rssi_level: new RSSI level value or 0 if not available
  * @gfp: context flags
  *
  * When the %IEEE80211_VIF_SUPPORTS_CQM_RSSI is set, and a connection quality
@@ -5268,6 +5287,7 @@ void ieee80211_resume_disconnect(struct ieee80211_vif *vif);
  */
 void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
                               enum nl80211_cqm_rssi_threshold_event rssi_event,
+                              s32 rssi_level,
                               gfp_t gfp);
 
 /**
index d562a2fe48604b278b08834af5c60f14c7405711..8a0214654b6b10bc480d7e6dc8195555ca58dc9a 100644 (file)
@@ -391,6 +391,23 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons
        return n;
 }
 
+static inline void __ipv6_confirm_neigh(struct net_device *dev,
+                                       const void *pkey)
+{
+       struct neighbour *n;
+
+       rcu_read_lock_bh();
+       n = __ipv6_neigh_lookup_noref(dev, pkey);
+       if (n) {
+               unsigned long now = jiffies;
+
+               /* avoid dirtying neighbour */
+               if (n->confirmed != now)
+                       n->confirmed = now;
+       }
+       rcu_read_unlock_bh();
+}
+
 int ndisc_init(void);
 int ndisc_late_init(void);
 
index 8b683841e5743f011a0d362af4ea9c26d0e75c77..5ebf6949116097f60e668b0c2c4c48dd1639e5e8 100644 (file)
@@ -468,6 +468,16 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
        return dev_queue_xmit(skb);
 }
 
+static inline int neigh_output(struct neighbour *n, struct sk_buff *skb)
+{
+       const struct hh_cache *hh = &n->hh;
+
+       if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
+               return neigh_hh_output(hh, skb);
+       else
+               return n->output(n, skb);
+}
+
 static inline struct neighbour *
 __neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
 {
index 919e4e8af3272b3d66580e4c60c79bdc39b47616..6ff32815641b21ba0d3c20214806c4f16aa217ca 100644 (file)
@@ -14,6 +14,7 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
 
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
 #ifdef CONFIG_NF_CT_PROTO_DCCP
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
index eaea968f86570db7010011b1af4a3608f2dee8f9..c59b82456f89cd421fde702f13c7cfe59f73266a 100644 (file)
@@ -5,6 +5,7 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
 
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
+extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
 #ifdef CONFIG_NF_CT_PROTO_DCCP
 extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
index 5916aa9ab3f0070411f6c3eb2842fadcdb605510..f540f9ad2af4f673a204875864ce73f423204a74 100644 (file)
@@ -34,6 +34,7 @@ union nf_conntrack_proto {
        struct ip_ct_sctp sctp;
        struct ip_ct_tcp tcp;
        struct nf_ct_gre gre;
+       unsigned int tmpl_padto;
 };
 
 union nf_conntrack_expect_proto {
@@ -75,7 +76,7 @@ struct nf_conn {
        /* Usage count in here is 1 for hash table, 1 per skb,
         * plus 1 for any connection(s) we are `master' for
         *
-        * Hint, SKB address this struct and refcnt via skb->nfct and
+        * Hint, SKB address this struct and refcnt via skb->_nfct and
         * helpers nf_conntrack_get() and nf_conntrack_put().
         * Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
         * beware nf_ct_get() is different and don't inc refcnt.
@@ -162,12 +163,16 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
 int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
                             const struct nf_conn *ignored_conntrack);
 
+#define NFCT_INFOMASK  7UL
+#define NFCT_PTRMASK   ~(NFCT_INFOMASK)
+
 /* Return conntrack_info and tuple hash for given skb. */
 static inline struct nf_conn *
 nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
 {
-       *ctinfo = skb->nfctinfo;
-       return (struct nf_conn *)skb->nfct;
+       *ctinfo = skb->_nfct & NFCT_INFOMASK;
+
+       return (struct nf_conn *)(skb->_nfct & NFCT_PTRMASK);
 }
 
 /* decrement reference count on a conntrack */
@@ -341,6 +346,12 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
                                 gfp_t flags);
 void nf_ct_tmpl_free(struct nf_conn *tmpl);
 
+static inline void
+nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
+{
+       skb->_nfct = (unsigned long)ct | info;
+}
+
 #define NF_CT_STAT_INC(net, count)       __this_cpu_inc((net)->ct.stat->count)
 #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
 #define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
index 62e17d1319ff7423dbcf176815f04cecfcdf3371..84ec7ca5f195db411b7e07c6dce641a286b5ea0a 100644 (file)
@@ -62,7 +62,7 @@ int __nf_conntrack_confirm(struct sk_buff *skb);
 /* Confirm a connection: returns NF_DROP if packet must be dropped. */
 static inline int nf_conntrack_confirm(struct sk_buff *skb)
 {
-       struct nf_conn *ct = (struct nf_conn *)skb->nfct;
+       struct nf_conn *ct = (struct nf_conn *)skb_nfct(skb);
        int ret = NF_ACCEPT;
 
        if (ct && !nf_ct_is_untracked(ct)) {
index e7b836590f0b7a24e13659063b7aa87ad133885e..85e993e278d5e1e7a886e772dd69f5031214410d 100644 (file)
@@ -55,7 +55,7 @@ struct nf_conntrack_l4proto {
        void (*destroy)(struct nf_conn *ct);
 
        int (*error)(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
-                    unsigned int dataoff, enum ip_conntrack_info *ctinfo,
+                    unsigned int dataoff,
                     u_int8_t pf, unsigned int hooknum);
 
        /* Print out the per-protocol part of the tuple. Return like seq_* */
index 450f87f95415824586a1dae0bbfdcef467e6dc7a..42e0696f38d80f0b8b03bc18cf7413aa1893a6de 100644 (file)
@@ -51,6 +51,9 @@ struct nf_logger {
        struct module           *me;
 };
 
+/* sysctl_nf_log_all_netns - allow LOG target in all network namespaces */
+extern int sysctl_nf_log_all_netns;
+
 /* Function to register/unregister log function. */
 int nf_log_register(u_int8_t pf, struct nf_logger *logger);
 void nf_log_unregister(struct nf_logger *logger);
index 7dfdb517f0be826018cf649bb2e410e44d8f9c36..ac84686aaafb0b3fadd1ed1883943cecc5f19ee5 100644 (file)
@@ -203,6 +203,7 @@ struct nft_set_elem {
 struct nft_set;
 struct nft_set_iter {
        u8              genmask;
+       bool            flush;
        unsigned int    count;
        unsigned int    skip;
        int             err;
@@ -243,11 +244,13 @@ enum nft_set_class {
  *                               characteristics
  *
  *     @size: required memory
- *     @class: lookup performance class
+ *     @lookup: lookup performance class
+ *     @space: memory class
  */
 struct nft_set_estimate {
        unsigned int            size;
-       enum nft_set_class      class;
+       enum nft_set_class      lookup;
+       enum nft_set_class      space;
 };
 
 struct nft_set_ext;
@@ -260,7 +263,7 @@ struct nft_expr;
  *     @insert: insert new element into set
  *     @activate: activate new element in the next generation
  *     @deactivate: lookup for element and deactivate it in the next generation
- *     @deactivate_one: deactivate element in the next generation
+ *     @flush: deactivate element in the next generation
  *     @remove: remove element from set
  *     @walk: iterate over all set elemeennts
  *     @privsize: function to return size of set private data
@@ -295,10 +298,11 @@ struct nft_set_ops {
        void *                          (*deactivate)(const struct net *net,
                                                      const struct nft_set *set,
                                                      const struct nft_set_elem *elem);
-       bool                            (*deactivate_one)(const struct net *net,
-                                                         const struct nft_set *set,
-                                                         void *priv);
-       void                            (*remove)(const struct nft_set *set,
+       bool                            (*flush)(const struct net *net,
+                                                const struct nft_set *set,
+                                                void *priv);
+       void                            (*remove)(const struct net *net,
+                                                 const struct nft_set *set,
                                                  const struct nft_set_elem *elem);
        void                            (*walk)(const struct nft_ctx *ctx,
                                                struct nft_set *set,
@@ -1198,10 +1202,13 @@ struct nft_trans {
 
 struct nft_trans_rule {
        struct nft_rule                 *rule;
+       u32                             rule_id;
 };
 
 #define nft_trans_rule(trans)  \
        (((struct nft_trans_rule *)trans->data)->rule)
+#define nft_trans_rule_id(trans)       \
+       (((struct nft_trans_rule *)trans->data)->rule_id)
 
 struct nft_trans_set {
        struct nft_set                  *set;
index d3938f11ae52ee234ea0b4c4e3f7b2e37615601d..b239fcd33d8091268fd793fd45cc755264fa592f 100644 (file)
@@ -229,6 +229,7 @@ struct nl_info {
        struct nlmsghdr         *nlh;
        struct net              *nl_net;
        u32                     portid;
+       bool                    skip_notify;
 };
 
 int netlink_rcv_skb(struct sk_buff *skb,
index cf799fc3fdec488bf1c352cf228e719a392b4801..17724c62de97c272ccba7960a7187b7f4325b56b 100644 (file)
@@ -69,19 +69,6 @@ struct nf_sctp_net {
 };
 #endif
 
-#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-enum udplite_conntrack {
-       UDPLITE_CT_UNREPLIED,
-       UDPLITE_CT_REPLIED,
-       UDPLITE_CT_MAX
-};
-
-struct nf_udplite_net {
-       struct nf_proto_net pn;
-       unsigned int timeouts[UDPLITE_CT_MAX];
-};
-#endif
-
 struct nf_ip_net {
        struct nf_generic_net   generic;
        struct nf_tcp_net       tcp;
@@ -94,9 +81,6 @@ struct nf_ip_net {
 #ifdef CONFIG_NF_CT_PROTO_SCTP
        struct nf_sctp_net      sctp;
 #endif
-#ifdef CONFIG_NF_CT_PROTO_UDPLITE
-       struct nf_udplite_net   udplite;
-#endif
 };
 
 struct ct_pcpu {
index 0378e88f6fd3d6076f0393d67343cdb92643dbb4..622d2da27135586d164c228b81e71afb922d5d8c 100644 (file)
@@ -27,6 +27,16 @@ struct ping_group_range {
        kgid_t          range[2];
 };
 
+struct inet_hashinfo;
+
+struct inet_timewait_death_row {
+       atomic_t                tw_count;
+
+       struct inet_hashinfo    *hashinfo ____cacheline_aligned_in_smp;
+       int                     sysctl_tw_recycle;
+       int                     sysctl_max_tw_buckets;
+};
+
 struct netns_ipv4 {
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *forw_hdr;
@@ -111,6 +121,12 @@ struct netns_ipv4 {
        int sysctl_tcp_fin_timeout;
        unsigned int sysctl_tcp_notsent_lowat;
        int sysctl_tcp_tw_reuse;
+       struct inet_timewait_death_row tcp_death_row;
+       int sysctl_max_syn_backlog;
+
+#ifdef CONFIG_NET_L3_MASTER_DEV
+       int sysctl_udp_l3mdev_accept;
+#endif
 
        int sysctl_igmp_max_memberships;
        int sysctl_igmp_max_msf;
@@ -123,6 +139,7 @@ struct netns_ipv4 {
 
 #ifdef CONFIG_SYSCTL
        unsigned long *sysctl_local_reserved_ports;
+       int sysctl_ip_prot_sock;
 #endif
 
 #ifdef CONFIG_IP_MROUTE
index c501d67172b125d55108ca407f4c50a158da0e14..b7871d0183541c63d53ac9274aa93cf8982bd1fb 100644 (file)
@@ -118,6 +118,9 @@ struct netns_sctp {
        /* Flag to indicate if PR-SCTP is enabled. */
        int prsctp_enable;
 
+       /* Flag to indicate if PR-CONFIG is enabled. */
+       int reconf_enable;
+
        /* Flag to idicate if SCTP-AUTH is enabled */
        int auth_enable;
 
index f0a051480c6c27e1a360a9c041acd582114c4aae..269fd78bb0ae0a88b2affa858a75bd646322a9fc 100644 (file)
@@ -17,6 +17,14 @@ struct tcf_walker {
 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
 
+#ifdef CONFIG_NET_CLS
+void tcf_destroy_chain(struct tcf_proto __rcu **fl);
+#else
+static inline void tcf_destroy_chain(struct tcf_proto __rcu **fl)
+{
+}
+#endif
+
 static inline unsigned long
 __cls_set_class(unsigned long *clp, unsigned long cl)
 {
@@ -473,6 +481,11 @@ static inline bool tc_flags_valid(u32 flags)
        return true;
 }
 
+static inline bool tc_in_hw(u32 flags)
+{
+       return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
+}
+
 enum tc_fl_command {
        TC_CLSFLOWER_REPLACE,
        TC_CLSFLOWER_DESTROY,
@@ -481,6 +494,7 @@ enum tc_fl_command {
 
 struct tc_cls_flower_offload {
        enum tc_fl_command command;
+       u32 prio;
        unsigned long cookie;
        struct flow_dissector *dissector;
        struct fl_flow_key *mask;
@@ -515,4 +529,12 @@ struct tc_cls_bpf_offload {
        u32 gen_flags;
 };
 
+
+/* This structure holds cookie structure that is passed from user
+ * to the kernel for actions and classifiers
+ */
+struct tc_cookie {
+       u8  *data;
+       u32 len;
+};
 #endif
diff --git a/include/net/psample.h b/include/net/psample.h
new file mode 100644 (file)
index 0000000..8888b0e
--- /dev/null
@@ -0,0 +1,36 @@
+#ifndef __NET_PSAMPLE_H
+#define __NET_PSAMPLE_H
+
+#include <uapi/linux/psample.h>
+#include <linux/module.h>
+#include <linux/list.h>
+
+struct psample_group {
+       struct list_head list;
+       struct net *net;
+       u32 group_num;
+       u32 refcount;
+       u32 seq;
+};
+
+struct psample_group *psample_group_get(struct net *net, u32 group_num);
+void psample_group_put(struct psample_group *group);
+
+#if IS_ENABLED(CONFIG_PSAMPLE)
+
+void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
+                          u32 trunc_size, int in_ifindex, int out_ifindex,
+                          u32 sample_rate);
+
+#else
+
+static inline void psample_sample_packet(struct psample_group *group,
+                                        struct sk_buff *skb, u32 trunc_size,
+                                        int in_ifindex, int out_ifindex,
+                                        u32 sample_rate)
+{
+}
+
+#endif
+
+#endif /* __NET_PSAMPLE_H */
index 6ebe13eb1c4cbcd84c4b3345b051b3320d7591e6..a12a5d25b27e650166cdd1bd8513317874b714da 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * NET         Generic infrastructure for Network protocols.
  *
- *             Definitions for request_sock 
+ *             Definitions for request_sock
  *
  * Authors:    Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  *
@@ -123,8 +123,6 @@ static inline void reqsk_put(struct request_sock *req)
                reqsk_free(req);
 }
 
-extern int sysctl_max_syn_backlog;
-
 /*
  * For a TCP Fast Open listener -
  *     lock - protects the access to all the reqsk, which is co-owned by
index 4113916cc1bb055bf16ba5f53819f2bdeeca2ef0..106de5f7bf0675e29114cde65b7ea8a4e557c1ce 100644 (file)
@@ -139,6 +139,10 @@ struct rtnl_af_ops {
                                                    const struct nlattr *attr);
        int                     (*set_link_af)(struct net_device *dev,
                                               const struct nlattr *attr);
+
+       int                     (*fill_stats_af)(struct sk_buff *skb,
+                                                const struct net_device *dev);
+       size_t                  (*get_stats_af_size)(const struct net_device *dev);
 };
 
 void __rtnl_af_unregister(struct rtnl_af_ops *ops);
index 498f81b229a4565e140f1a054ce931e80361e8b2..aeec4086afb2446dadb1fb8c54ad54a909634380 100644 (file)
@@ -405,19 +405,35 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
                                const struct Qdisc_ops *ops, u32 parentid);
 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
                               const struct qdisc_size_table *stab);
-bool tcf_destroy(struct tcf_proto *tp, bool force);
-void tcf_destroy_chain(struct tcf_proto __rcu **fl);
 int skb_do_redirect(struct sk_buff *);
 
+static inline void skb_reset_tc(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+       skb->tc_redirected = 0;
+#endif
+}
+
 static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
 {
 #ifdef CONFIG_NET_CLS_ACT
-       return G_TC_AT(skb->tc_verd) & AT_INGRESS;
+       return skb->tc_at_ingress;
 #else
        return false;
 #endif
 }
 
+static inline bool skb_skip_tc_classify(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+       if (skb->tc_skip_classify) {
+               skb->tc_skip_classify = 0;
+               return true;
+       }
+#endif
+       return false;
+}
+
 /* Reset all TX qdiscs greater then index of a device.  */
 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
 {
index 5b847e49f7e9bf31743a711b38f69fbfb746296a..3567c971cf3bbb379a65ff26925e7c3ff3352d65 100644 (file)
@@ -90,6 +90,7 @@ typedef enum {
        SCTP_EVENT_TIMEOUT_T4_RTO,
        SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
        SCTP_EVENT_TIMEOUT_HEARTBEAT,
+       SCTP_EVENT_TIMEOUT_RECONF,
        SCTP_EVENT_TIMEOUT_SACK,
        SCTP_EVENT_TIMEOUT_AUTOCLOSE,
 } sctp_event_timeout_t;
@@ -113,9 +114,10 @@ typedef enum {
        SCTP_PRIMITIVE_SEND,
        SCTP_PRIMITIVE_REQUESTHEARTBEAT,
        SCTP_PRIMITIVE_ASCONF,
+       SCTP_PRIMITIVE_RECONF,
 } sctp_event_primitive_t;
 
-#define SCTP_EVENT_PRIMITIVE_MAX       SCTP_PRIMITIVE_ASCONF
+#define SCTP_EVENT_PRIMITIVE_MAX       SCTP_PRIMITIVE_RECONF
 #define SCTP_NUM_PRIMITIVE_TYPES       (SCTP_EVENT_PRIMITIVE_MAX + 1)
 
 /* We define here a utility type for manipulating subtypes.
index d8833a86cd7e48192f4c3b5d984ac76aee3b7516..6dfc5536a3e65796eb95a6ecd33ce5ab62e65e03 100644 (file)
@@ -141,6 +141,8 @@ int sctp_primitive_ABORT(struct net *, struct sctp_association *, void *arg);
 int sctp_primitive_SEND(struct net *, struct sctp_association *, void *arg);
 int sctp_primitive_REQUESTHEARTBEAT(struct net *, struct sctp_association *, void *arg);
 int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_RECONF(struct net *net, struct sctp_association *asoc,
+                         void *arg);
 
 /*
  * sctp/input.c
@@ -191,6 +193,15 @@ void sctp_remaddr_proc_exit(struct net *net);
  */
 int sctp_offload_init(void);
 
+/*
+ * sctp/stream.c
+ */
+int sctp_send_reset_streams(struct sctp_association *asoc,
+                           struct sctp_reset_streams *params);
+int sctp_send_reset_assoc(struct sctp_association *asoc);
+int sctp_send_add_streams(struct sctp_association *asoc,
+                         struct sctp_add_streams *params);
+
 /*
  * Module global variables
  */
@@ -283,7 +294,6 @@ extern atomic_t sctp_dbg_objcnt_chunk;
 extern atomic_t sctp_dbg_objcnt_bind_addr;
 extern atomic_t sctp_dbg_objcnt_bind_bucket;
 extern atomic_t sctp_dbg_objcnt_addr;
-extern atomic_t sctp_dbg_objcnt_ssnmap;
 extern atomic_t sctp_dbg_objcnt_datamsg;
 extern atomic_t sctp_dbg_objcnt_keys;
 
@@ -586,10 +596,8 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
  */
 static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
 {
-       if (t->dst && !dst_check(t->dst, t->dst_cookie)) {
-               dst_release(t->dst);
-               t->dst = NULL;
-       }
+       if (t->dst && !dst_check(t->dst, t->dst_cookie))
+               sctp_transport_dst_release(t);
 
        return t->dst;
 }
index ca6c971dd74aede829d4512ddf71006520c78f47..3675fde3a26e3ac17aabbb320181ad4d7d6f2c0d 100644 (file)
@@ -157,6 +157,7 @@ sctp_state_fn_t sctp_sf_error_shutdown;
 sctp_state_fn_t sctp_sf_ignore_primitive;
 sctp_state_fn_t sctp_sf_do_prm_requestheartbeat;
 sctp_state_fn_t sctp_sf_do_prm_asconf;
+sctp_state_fn_t sctp_sf_do_prm_reconf;
 
 /* Prototypes for other event state functions.  */
 sctp_state_fn_t sctp_sf_do_no_pending_tsn;
@@ -167,6 +168,7 @@ sctp_state_fn_t sctp_sf_cookie_wait_icmp_abort;
 
 /* Prototypes for timeout event state functions.  */
 sctp_state_fn_t sctp_sf_do_6_3_3_rtx;
+sctp_state_fn_t sctp_sf_send_reconf;
 sctp_state_fn_t sctp_sf_do_6_2_sack;
 sctp_state_fn_t sctp_sf_autoclose_timer_expire;
 
@@ -259,7 +261,15 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
                                    __u32 new_cum_tsn, size_t nstreams,
                                    struct sctp_fwdtsn_skip *skiplist);
 struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc);
-
+struct sctp_chunk *sctp_make_strreset_req(
+                               const struct sctp_association *asoc,
+                               __u16 stream_num, __u16 *stream_list,
+                               bool out, bool in);
+struct sctp_chunk *sctp_make_strreset_tsnreq(
+                               const struct sctp_association *asoc);
+struct sctp_chunk *sctp_make_strreset_addstrm(
+                               const struct sctp_association *asoc,
+                               __u16 out, __u16 in);
 void sctp_chunk_assign_tsn(struct sctp_chunk *);
 void sctp_chunk_assign_ssn(struct sctp_chunk *);
 
@@ -275,6 +285,7 @@ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
 /* 2nd level prototypes */
 void sctp_generate_t3_rtx_event(unsigned long peer);
 void sctp_generate_heartbeat_event(unsigned long peer);
+void sctp_generate_reconf_event(unsigned long peer);
 void sctp_generate_proto_unreach_event(unsigned long peer);
 
 void sctp_ootb_pkt_free(struct sctp_packet *);
index 92daabdc007d94a544baa10d278d229e42e40eb3..387c802bf248fe60adac88fc47e021a7d38188e6 100644 (file)
@@ -82,7 +82,6 @@ struct sctp_outq;
 struct sctp_bind_addr;
 struct sctp_ulpq;
 struct sctp_ep_common;
-struct sctp_ssnmap;
 struct crypto_shash;
 
 
@@ -375,56 +374,24 @@ typedef struct sctp_sender_hb_info {
        union sctp_addr daddr;
        unsigned long sent_at;
        __u64 hb_nonce;
-} __packed sctp_sender_hb_info_t;
+} sctp_sender_hb_info_t;
 
-/*
- *  RFC 2960 1.3.2 Sequenced Delivery within Streams
- *
- *  The term "stream" is used in SCTP to refer to a sequence of user
- *  messages that are to be delivered to the upper-layer protocol in
- *  order with respect to other messages within the same stream.  This is
- *  in contrast to its usage in TCP, where it refers to a sequence of
- *  bytes (in this document a byte is assumed to be eight bits).
- *  ...
- *
- *  This is the structure we use to track both our outbound and inbound
- *  SSN, or Stream Sequence Numbers.
- */
-
-struct sctp_stream {
-       __u16 *ssn;
-       unsigned int len;
-};
-
-struct sctp_ssnmap {
-       struct sctp_stream in;
-       struct sctp_stream out;
-};
-
-struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
-                                   gfp_t gfp);
-void sctp_ssnmap_free(struct sctp_ssnmap *map);
-void sctp_ssnmap_clear(struct sctp_ssnmap *map);
+struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp);
+void sctp_stream_free(struct sctp_stream *stream);
+void sctp_stream_clear(struct sctp_stream *stream);
 
 /* What is the current SSN number for this stream? */
-static inline __u16 sctp_ssn_peek(struct sctp_stream *stream, __u16 id)
-{
-       return stream->ssn[id];
-}
+#define sctp_ssn_peek(stream, type, sid) \
+       ((stream)->type[sid].ssn)
 
 /* Return the next SSN number for this stream. */
-static inline __u16 sctp_ssn_next(struct sctp_stream *stream, __u16 id)
-{
-       return stream->ssn[id]++;
-}
+#define sctp_ssn_next(stream, type, sid) \
+       ((stream)->type[sid].ssn++)
 
 /* Skip over this ssn and all below. */
-static inline void sctp_ssn_skip(struct sctp_stream *stream, __u16 id, 
-                                __u16 ssn)
-{
-       stream->ssn[id] = ssn+1;
-}
-              
+#define sctp_ssn_skip(stream, type, sid, ssn) \
+       ((stream)->type[sid].ssn = ssn + 1)
+
 /*
  * Pointers to address related SCTP functions.
  * (i.e. things that depend on the address family.)
@@ -722,10 +689,9 @@ struct sctp_packet {
            ipfragok:1;         /* So let ip fragment this packet */
 };
 
-struct sctp_packet *sctp_packet_init(struct sctp_packet *,
-                                    struct sctp_transport *,
-                                    __u16 sport, __u16 dport);
-struct sctp_packet *sctp_packet_config(struct sctp_packet *, __u32 vtag, int);
+void sctp_packet_init(struct sctp_packet *, struct sctp_transport *,
+                     __u16 sport, __u16 dport);
+void sctp_packet_config(struct sctp_packet *, __u32 vtag, int);
 sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *,
                                       struct sctp_chunk *, int, gfp_t);
 sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *,
@@ -838,6 +804,8 @@ struct sctp_transport {
 
        __u32 burst_limited;    /* Holds old cwnd when max.burst is applied */
 
+       __u32 dst_pending_confirm;      /* need to confirm neighbour */
+
        /* Destination */
        struct dst_entry *dst;
        /* Source address. */
@@ -911,6 +879,9 @@ struct sctp_transport {
        /* Timer to handle ICMP proto unreachable envets */
        struct timer_list proto_unreach_timer;
 
+       /* Timer to handler reconf chunk rtx */
+       struct timer_list reconf_timer;
+
        /* Since we're using per-destination retransmission timers
         * (see above), we're also using per-destination "transmitted"
         * queues.  This probably ought to be a private struct
@@ -969,6 +940,7 @@ void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
 void sctp_transport_free(struct sctp_transport *);
 void sctp_transport_reset_t3_rtx(struct sctp_transport *);
 void sctp_transport_reset_hb_timer(struct sctp_transport *);
+void sctp_transport_reset_reconf_timer(struct sctp_transport *transport);
 int sctp_transport_hold(struct sctp_transport *);
 void sctp_transport_put(struct sctp_transport *);
 void sctp_transport_update_rto(struct sctp_transport *, __u32);
@@ -980,6 +952,8 @@ unsigned long sctp_transport_timeout(struct sctp_transport *);
 void sctp_transport_reset(struct sctp_transport *);
 void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32);
 void sctp_transport_immediate_rtx(struct sctp_transport *);
+void sctp_transport_dst_release(struct sctp_transport *t);
+void sctp_transport_dst_confirm(struct sctp_transport *t);
 
 
 /* This is the structure we use to queue packets as they come into
@@ -1285,7 +1259,10 @@ struct sctp_endpoint {
        struct list_head endpoint_shared_keys;
        __u16 active_key_id;
        __u8  auth_enable:1,
-             prsctp_enable:1;
+             prsctp_enable:1,
+             reconf_enable:1;
+
+       __u8  strreset_enable;
 };
 
 /* Recover the outter endpoint structure. */
@@ -1332,6 +1309,25 @@ struct sctp_inithdr_host {
        __u32 initial_tsn;
 };
 
+struct sctp_stream_out {
+       __u16   ssn;
+       __u8    state;
+};
+
+struct sctp_stream_in {
+       __u16   ssn;
+};
+
+struct sctp_stream {
+       struct sctp_stream_out *out;
+       struct sctp_stream_in *in;
+       __u16 outcnt;
+       __u16 incnt;
+};
+
+#define SCTP_STREAM_CLOSED             0x00
+#define SCTP_STREAM_OPEN               0x01
+
 /* SCTP_GET_ASSOC_STATS counters */
 struct sctp_priv_assoc_stats {
        /* Maximum observed rto in the association during subsequent
@@ -1519,6 +1515,7 @@ struct sctp_association {
                        hostname_address:1, /* Peer understands DNS addresses? */
                        asconf_capable:1,   /* Does peer support ADDIP? */
                        prsctp_capable:1,   /* Can peer do PR-SCTP? */
+                       reconf_capable:1,   /* Can peer do RE-CONFIG? */
                        auth_capable:1;     /* Is peer doing SCTP-AUTH? */
 
                /* sack_needed : This flag indicates if the next received
@@ -1747,8 +1744,8 @@ struct sctp_association {
        /* Default receive parameters */
        __u32 default_rcv_context;
 
-       /* This tracks outbound ssn for a given stream.  */
-       struct sctp_ssnmap *ssnmap;
+       /* Stream arrays */
+       struct sctp_stream *stream;
 
        /* All outbound chunks go through this structure.  */
        struct sctp_outq outqueue;
@@ -1878,7 +1875,16 @@ struct sctp_association {
 
        __u8 need_ecne:1,       /* Need to send an ECNE Chunk? */
             temp:1,            /* Is it a temporary association? */
-            prsctp_enable:1;
+            prsctp_enable:1,
+            reconf_enable:1;
+
+       __u8 strreset_enable;
+       __u8 strreset_outstanding; /* request param count on the fly */
+
+       __u32 strreset_outseq; /* Update after receiving response */
+       __u32 strreset_inseq; /* Update after receiving request */
+
+       struct sctp_chunk *strreset_chunk; /* save request chunk */
 
        struct sctp_priv_assoc_stats stats;
 
diff --git a/include/net/smc.h b/include/net/smc.h
new file mode 100644 (file)
index 0000000..12d2635
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ *  Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ *  Definitions for the SMC module (socket related)
+ *
+ *  Copyright IBM Corp. 2016
+ *
+ *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+#ifndef _SMC_H
+#define _SMC_H
+
+struct smc_hashinfo {
+       rwlock_t lock;
+       struct hlist_head ht;
+};
+
+int smc_hash_sk(struct sock *sk);
+void smc_unhash_sk(struct sock *sk);
+#endif /* _SMC_H */
index c4f5e6fca17cf4e0029080410202cb66ce0fad37..9ccefa5c5487863e69831c3bf45e4f0edf78eb65 100644 (file)
@@ -70,6 +70,7 @@
 #include <net/checksum.h>
 #include <net/tcp_states.h>
 #include <linux/net_tstamp.h>
+#include <net/smc.h>
 
 /*
  * This structure really needs to be cleaned up.
@@ -239,6 +240,7 @@ struct sock_common {
   *    @sk_wq: sock wait queue and async head
   *    @sk_rx_dst: receive input route used by early demux
   *    @sk_dst_cache: destination cache
+  *    @sk_dst_pending_confirm: need to confirm neighbour
   *    @sk_policy: flow policy
   *    @sk_receive_queue: incoming packets
   *    @sk_wmem_alloc: transmit queue bytes committed
@@ -392,6 +394,8 @@ struct sock {
        struct sk_buff_head     sk_write_queue;
        __s32                   sk_peek_off;
        int                     sk_write_pending;
+       __u32                   sk_dst_pending_confirm;
+       /* Note: 32bit hole on 64bit arches */
        long                    sk_sndtimeo;
        struct timer_list       sk_timer;
        __u32                   sk_priority;
@@ -543,8 +547,7 @@ static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
 
 static inline struct sock *sk_next(const struct sock *sk)
 {
-       return sk->sk_node.next ?
-               hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
+       return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
 }
 
 static inline struct sock *sk_nulls_next(const struct sock *sk)
@@ -986,6 +989,7 @@ struct request_sock_ops;
 struct timewait_sock_ops;
 struct inet_hashinfo;
 struct raw_hashinfo;
+struct smc_hashinfo;
 struct module;
 
 /*
@@ -1024,6 +1028,7 @@ struct proto {
        int                     (*getsockopt)(struct sock *sk, int level,
                                        int optname, char __user *optval,
                                        int __user *option);
+       void                    (*keepalive)(struct sock *sk, int valbool);
 #ifdef CONFIG_COMPAT
        int                     (*compat_setsockopt)(struct sock *sk,
                                        int level,
@@ -1093,6 +1098,7 @@ struct proto {
                struct inet_hashinfo    *hashinfo;
                struct udp_table        *udp_table;
                struct raw_hashinfo     *raw_hash;
+               struct smc_hashinfo     *smc_hash;
        } h;
 
        struct module           *owner;
@@ -1531,7 +1537,7 @@ void sock_efree(struct sk_buff *skb);
 #ifdef CONFIG_INET
 void sock_edemux(struct sk_buff *skb);
 #else
-#define sock_edemux(skb) sock_efree(skb)
+#define sock_edemux sock_efree
 #endif
 
 int sock_setsockopt(struct socket *sock, int level, int op,
@@ -1761,6 +1767,7 @@ static inline void dst_negative_advice(struct sock *sk)
                if (ndst != dst) {
                        rcu_assign_pointer(sk->sk_dst_cache, ndst);
                        sk_tx_queue_clear(sk);
+                       sk->sk_dst_pending_confirm = 0;
                }
        }
 }
@@ -1771,6 +1778,7 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
        struct dst_entry *old_dst;
 
        sk_tx_queue_clear(sk);
+       sk->sk_dst_pending_confirm = 0;
        /*
         * This can be called while sk is owned by the caller only,
         * with no state that can be checked in a rcu_dereference_check() cond
@@ -1786,6 +1794,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst)
        struct dst_entry *old_dst;
 
        sk_tx_queue_clear(sk);
+       sk->sk_dst_pending_confirm = 0;
        old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
        dst_release(old_dst);
 }
@@ -1806,6 +1815,26 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
 
 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
 
+static inline void sk_dst_confirm(struct sock *sk)
+{
+       if (!sk->sk_dst_pending_confirm)
+               sk->sk_dst_pending_confirm = 1;
+}
+
+static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
+{
+       if (skb_get_dst_pending_confirm(skb)) {
+               struct sock *sk = skb->sk;
+               unsigned long now = jiffies;
+
+               /* avoid dirtying neighbour */
+               if (n->confirmed != now)
+                       n->confirmed = now;
+               if (sk && sk->sk_dst_pending_confirm)
+                       sk->sk_dst_pending_confirm = 0;
+       }
+}
+
 bool sk_mc_loop(struct sock *sk);
 
 static inline bool sk_can_gso(const struct sock *sk)
index eba80c4fc56fddc907c1df5a063a3e143e1ef13c..929d6af321cde71a509577cb14747ecd5b77ca8c 100644 (file)
@@ -46,8 +46,10 @@ enum switchdev_attr_id {
        SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
        SWITCHDEV_ATTR_ID_PORT_STP_STATE,
        SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
+       SWITCHDEV_ATTR_ID_PORT_MROUTER,
        SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
        SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
+       SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
 };
 
 struct switchdev_attr {
@@ -60,8 +62,10 @@ struct switchdev_attr {
                struct netdev_phys_item_id ppid;        /* PORT_PARENT_ID */
                u8 stp_state;                           /* PORT_STP_STATE */
                unsigned long brport_flags;             /* PORT_BRIDGE_FLAGS */
+               bool mrouter;                           /* PORT_MROUTER */
                clock_t ageing_time;                    /* BRIDGE_AGEING_TIME */
                bool vlan_filtering;                    /* BRIDGE_VLAN_FILTERING */
+               bool mc_disabled;                       /* MC_DISABLED */
        } u;
 };
 
index 9fd2bea0a6e0632c287a8386844dccff6b7c4c9f..30ba459ddd34c7e8664abeeafcf3926d39efc17d 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/rtnetlink.h>
 #include <linux/module.h>
 
-#define IFE_METAHDRLEN 2
 struct tcf_ife_info {
        struct tc_action common;
        u8 eth_dst[ETH_ALEN];
@@ -45,8 +44,6 @@ struct tcf_meta_ops {
 
 int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi);
 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi);
-int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
-                       const void *dval);
 int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
 int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
index 29e38d6823df88adb05275c728c052009000c4ae..dfbd6ee0bc7cd196c052e700da43deddb8d1dfef 100644 (file)
@@ -3,11 +3,17 @@
 
 #include <net/act_api.h>
 
+struct tcf_pedit_key_ex {
+       enum pedit_header_type htype;
+       enum pedit_cmd cmd;
+};
+
 struct tcf_pedit {
        struct tc_action        common;
        unsigned char           tcfp_nkeys;
        unsigned char           tcfp_flags;
        struct tc_pedit_key     *tcfp_keys;
+       struct tcf_pedit_key_ex *tcfp_keys_ex;
 };
 #define to_pedit(a) ((struct tcf_pedit *)a)
 
diff --git a/include/net/tc_act/tc_sample.h b/include/net/tc_act/tc_sample.h
new file mode 100644 (file)
index 0000000..89e9305
--- /dev/null
@@ -0,0 +1,50 @@
+#ifndef __NET_TC_SAMPLE_H
+#define __NET_TC_SAMPLE_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_sample.h>
+#include <net/psample.h>
+
+struct tcf_sample {
+       struct tc_action common;
+       u32 rate;
+       bool truncate;
+       u32 trunc_size;
+       struct psample_group __rcu *psample_group;
+       u32 psample_group_num;
+       struct list_head tcfm_list;
+       struct rcu_head rcu;
+};
+#define to_sample(a) ((struct tcf_sample *)a)
+
+static inline bool is_tcf_sample(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+       return a->ops && a->ops->type == TCA_ACT_SAMPLE;
+#else
+       return false;
+#endif
+}
+
+static inline __u32 tcf_sample_rate(const struct tc_action *a)
+{
+       return to_sample(a)->rate;
+}
+
+static inline bool tcf_sample_truncate(const struct tc_action *a)
+{
+       return to_sample(a)->truncate;
+}
+
+static inline int tcf_sample_trunc_size(const struct tc_action *a)
+{
+       return to_sample(a)->trunc_size;
+}
+
+static inline struct psample_group *
+tcf_sample_psample_group(const struct tc_action *a)
+{
+       return rcu_dereference(to_sample(a)->psample_group);
+}
+
+#endif /* __NET_TC_SAMPLE_H */
index 6061963cca98ed84ef09f788ee0ebdc3867a07c8..6ec4ea652f3f55e53675dbe09f29599af179c41a 100644 (file)
@@ -143,6 +143,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
                                                         * for local resources.
                                                         */
+#define TCP_REO_TIMEOUT_MIN    (2000) /* Min RACK reordering timeout in usec */
 
 #define TCP_KEEPALIVE_TIME     (120*60*HZ)     /* two hours */
 #define TCP_KEEPALIVE_PROBES   9               /* Max of 9 keepalive probes    */
@@ -231,7 +232,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
  */
 #define        TFO_SERVER_WO_SOCKOPT1  0x400
 
-extern struct inet_timewait_death_row tcp_death_row;
 
 /* sysctl variables for tcp */
 extern int sysctl_tcp_timestamps;
@@ -262,6 +262,9 @@ extern int sysctl_tcp_slow_start_after_idle;
 extern int sysctl_tcp_thin_linear_timeouts;
 extern int sysctl_tcp_thin_dupack;
 extern int sysctl_tcp_early_retrans;
+extern int sysctl_tcp_recovery;
+#define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
+
 extern int sysctl_tcp_limit_output_bytes;
 extern int sysctl_tcp_challenge_ack_limit;
 extern int sysctl_tcp_min_tso_segs;
@@ -398,6 +401,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
 int tcp_child_process(struct sock *parent, struct sock *child,
                      struct sk_buff *skb);
 void tcp_enter_loss(struct sock *sk);
+void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
 void tcp_clear_retrans(struct tcp_sock *tp);
 void tcp_update_metrics(struct sock *sk);
 void tcp_init_metrics(struct sock *sk);
@@ -542,6 +546,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
 void tcp_retransmit_timer(struct sock *sk);
 void tcp_xmit_retransmit_queue(struct sock *);
 void tcp_simple_retransmit(struct sock *);
+void tcp_enter_recovery(struct sock *sk, bool ece_ack);
 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
 int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
 
@@ -560,7 +565,6 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb,
                             const struct sk_buff *next_skb);
 
 /* tcp_input.c */
-void tcp_resume_early_retransmit(struct sock *sk);
 void tcp_rearm_rto(struct sock *sk);
 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
 void tcp_reset(struct sock *sk);
@@ -1032,23 +1036,6 @@ static inline void tcp_enable_fack(struct tcp_sock *tp)
        tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
 }
 
-/* TCP early-retransmit (ER) is similar to but more conservative than
- * the thin-dupack feature.  Enable ER only if thin-dupack is disabled.
- */
-static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
-{
-       struct net *net = sock_net((struct sock *)tp);
-
-       tp->do_early_retrans = sysctl_tcp_early_retrans &&
-               sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
-               net->ipv4.sysctl_tcp_reordering == 3;
-}
-
-static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
-{
-       tp->do_early_retrans = 0;
-}
-
 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
 {
        return tp->sacked_out + tp->lost_out;
@@ -1506,6 +1493,9 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
                              struct tcp_fastopen_cookie *foc,
                              struct dst_entry *dst);
 void tcp_fastopen_init_key_once(bool publish);
+bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
+                            struct tcp_fastopen_cookie *cookie);
+bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
 #define TCP_FASTOPEN_KEY_LENGTH 16
 
 /* Fastopen key context */
@@ -1857,17 +1847,11 @@ void tcp_v4_init(void);
 void tcp_init(void);
 
 /* tcp_recovery.c */
-
-/* Flags to enable various loss recovery features. See below */
-extern int sysctl_tcp_recovery;
-
-/* Use TCP RACK to detect (some) tail and retransmit losses */
-#define TCP_RACK_LOST_RETRANS  0x1
-
-extern int tcp_rack_mark_lost(struct sock *sk);
-
-extern void tcp_rack_advance(struct tcp_sock *tp,
-                            const struct skb_mstamp *xmit_time, u8 sacked);
+extern void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now);
+extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
+                            const struct skb_mstamp *xmit_time,
+                            const struct skb_mstamp *ack_time);
+extern void tcp_rack_reo_timeout(struct sock *sk);
 
 /*
  * Save and compile IPv4 options, return a pointer to it
index 1661791e8ca19bbc8f59e5c19263486b24f24b45..c9d8b8e848e05c2e7228f287f88ccdb57b2e10c2 100644 (file)
@@ -204,7 +204,6 @@ static inline void udp_lib_close(struct sock *sk, long timeout)
 }
 
 int udp_lib_get_port(struct sock *sk, unsigned short snum,
-                    int (*)(const struct sock *, const struct sock *, bool),
                     unsigned int hash2_nulladdr);
 
 u32 udp_flow_hashrnd(void);
index 31947b9c21d60e36ae91b08ca96360dc6a283e1e..14d82bf16692a6d0ed66721b1548ba152dbc18de 100644 (file)
@@ -213,6 +213,8 @@ struct xfrm_state {
        /* Last used time */
        unsigned long           lastused;
 
+       struct page_frag xfrag;
+
        /* Reference to data common to all the instances of this
         * transformer. */
        const struct xfrm_type  *type;
@@ -278,9 +280,7 @@ struct net_device;
 struct xfrm_type;
 struct xfrm_dst;
 struct xfrm_policy_afinfo {
-       unsigned short          family;
        struct dst_ops          *dst_ops;
-       void                    (*garbage_collect)(struct net *net);
        struct dst_entry        *(*dst_lookup)(struct net *net,
                                               int tos, int oif,
                                               const xfrm_address_t *saddr,
@@ -301,8 +301,8 @@ struct xfrm_policy_afinfo {
        struct dst_entry        *(*blackhole_route)(struct net *net, struct dst_entry *orig);
 };
 
-int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
-int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
+int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family);
+void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
 void km_policy_notify(struct xfrm_policy *xp, int dir,
                      const struct km_event *c);
 void km_state_notify(struct xfrm_state *x, const struct km_event *c);
@@ -343,17 +343,16 @@ struct xfrm_state_afinfo {
 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
-void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
+struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
 
 struct xfrm_input_afinfo {
        unsigned int            family;
-       struct module           *owner;
        int                     (*callback)(struct sk_buff *skb, u8 protocol,
                                            int err);
 };
 
-int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo);
-int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo);
+int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
+int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
 
 void xfrm_state_delete_tunnel(struct xfrm_state *x);
 
@@ -499,6 +498,7 @@ struct xfrm_tmpl {
 };
 
 #define XFRM_MAX_DEPTH         6
+#define XFRM_MAX_OFFLOAD_DEPTH 1
 
 struct xfrm_policy_walk_entry {
        struct list_head        all;
@@ -682,6 +682,7 @@ struct xfrm_spi_skb_cb {
 
        unsigned int daddroff;
        unsigned int family;
+       __be32 seq;
 };
 
 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
@@ -974,10 +975,41 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
 
 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
 
+struct xfrm_offload {
+       /* Output sequence number for replay protection on offloading. */
+       struct {
+               __u32 low;
+               __u32 hi;
+       } seq;
+
+       __u32                   flags;
+#define        SA_DELETE_REQ           1
+#define        CRYPTO_DONE             2
+#define        CRYPTO_NEXT_DONE        4
+#define        CRYPTO_FALLBACK         8
+#define        XFRM_GSO_SEGMENT        16
+#define        XFRM_GRO                32
+
+       __u32                   status;
+#define CRYPTO_SUCCESS                         1
+#define CRYPTO_GENERIC_ERROR                   2
+#define CRYPTO_TRANSPORT_AH_AUTH_FAILED                4
+#define CRYPTO_TRANSPORT_ESP_AUTH_FAILED       8
+#define CRYPTO_TUNNEL_AH_AUTH_FAILED           16
+#define CRYPTO_TUNNEL_ESP_AUTH_FAILED          32
+#define CRYPTO_INVALID_PACKET_SYNTAX           64
+#define CRYPTO_INVALID_PROTOCOL                        128
+
+       __u8                    proto;
+};
+
 struct sec_path {
        atomic_t                refcnt;
        int                     len;
+       int                     olen;
+
        struct xfrm_state       *xvec[XFRM_MAX_DEPTH];
+       struct xfrm_offload     ovec[XFRM_MAX_OFFLOAD_DEPTH];
 };
 
 static inline int secpath_exists(struct sk_buff *skb)
@@ -1007,6 +1039,7 @@ secpath_put(struct sec_path *sp)
 }
 
 struct sec_path *secpath_dup(struct sec_path *src);
+int secpath_set(struct sk_buff *skb);
 
 static inline void
 secpath_reset(struct sk_buff *skb)
@@ -1168,6 +1201,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
 }
 
 void xfrm_garbage_collect(struct net *net);
+void xfrm_garbage_collect_deferred(struct net *net);
 
 #else
 
@@ -1519,6 +1553,7 @@ int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
                    int encap_type);
 int xfrm4_transport_finish(struct sk_buff *skb, int async);
 int xfrm4_rcv(struct sk_buff *skb);
+int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
 
 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
 {
@@ -1774,6 +1809,15 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
 {
        return skb->sp->xvec[skb->sp->len - 1];
 }
+static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
+{
+       struct sec_path *sp = skb->sp;
+
+       if (!sp || !sp->olen || sp->len != sp->olen)
+               return NULL;
+
+       return &sp->ovec[sp->olen - 1];
+}
 #endif
 
 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
index 1beab5532035dc2126405384d44457f183de2a90..4b34c51f859e89406802d9b5b8c3644c81861629 100644 (file)
@@ -160,8 +160,7 @@ static inline int rdma_addr_gid_offset(struct rdma_dev_addr *dev_addr)
 
 static inline u16 rdma_vlan_dev_vlan_id(const struct net_device *dev)
 {
-       return dev->priv_flags & IFF_802_1Q_VLAN ?
-               vlan_dev_vlan_id(dev) : 0xffff;
+       return is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 0xffff;
 }
 
 static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid)
@@ -326,8 +325,7 @@ static inline u16 rdma_get_vlan_id(union ib_gid *dgid)
 
 static inline struct net_device *rdma_vlan_dev_real_dev(const struct net_device *dev)
 {
-       return dev->priv_flags & IFF_802_1Q_VLAN ?
-               vlan_dev_real_dev(dev) : NULL;
+       return is_vlan_dev(dev) ? vlan_dev_real_dev(dev) : NULL;
 }
 
 #endif /* IB_ADDR_H */
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
new file mode 100644 (file)
index 0000000..8b95c16
--- /dev/null
@@ -0,0 +1,184 @@
+/* AFS tracepoints
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM afs
+
+#if !defined(_TRACE_AFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_AFS_H
+
+#include <linux/tracepoint.h>
+
+/*
+ * Define enums for tracing information.
+ */
+#ifndef __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+enum afs_call_trace {
+       afs_call_trace_alloc,
+       afs_call_trace_free,
+       afs_call_trace_put,
+       afs_call_trace_wake,
+       afs_call_trace_work,
+};
+
+#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */
+
+/*
+ * Declare tracing information enums and their string mappings for display.
+ */
+#define afs_call_traces \
+       EM(afs_call_trace_alloc,                "ALLOC") \
+       EM(afs_call_trace_free,                 "FREE ") \
+       EM(afs_call_trace_put,                  "PUT  ") \
+       EM(afs_call_trace_wake,                 "WAKE ") \
+       E_(afs_call_trace_work,                 "WORK ")
+
+/*
+ * Export enum symbols via userspace.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+afs_call_traces;
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef E_
+#define EM(a, b)       { a, b },
+#define E_(a, b)       { a, b }
+
+TRACE_EVENT(afs_recv_data,
+           TP_PROTO(struct afs_call *call, unsigned count, unsigned offset,
+                    bool want_more, int ret),
+
+           TP_ARGS(call, count, offset, want_more, ret),
+
+           TP_STRUCT__entry(
+                   __field(struct rxrpc_call *,        rxcall          )
+                   __field(struct afs_call *,          call            )
+                   __field(enum afs_call_state,        state           )
+                   __field(unsigned int,               count           )
+                   __field(unsigned int,               offset          )
+                   __field(unsigned short,             unmarshall      )
+                   __field(bool,                       want_more       )
+                   __field(int,                        ret             )
+                            ),
+
+           TP_fast_assign(
+                   __entry->rxcall     = call->rxcall;
+                   __entry->call       = call;
+                   __entry->state      = call->state;
+                   __entry->unmarshall = call->unmarshall;
+                   __entry->count      = count;
+                   __entry->offset     = offset;
+                   __entry->want_more  = want_more;
+                   __entry->ret        = ret;
+                          ),
+
+           TP_printk("c=%p ac=%p s=%u u=%u %u/%u wm=%u ret=%d",
+                     __entry->rxcall,
+                     __entry->call,
+                     __entry->state, __entry->unmarshall,
+                     __entry->offset, __entry->count,
+                     __entry->want_more, __entry->ret)
+           );
+
+TRACE_EVENT(afs_notify_call,
+           TP_PROTO(struct rxrpc_call *rxcall, struct afs_call *call),
+
+           TP_ARGS(rxcall, call),
+
+           TP_STRUCT__entry(
+                   __field(struct rxrpc_call *,        rxcall          )
+                   __field(struct afs_call *,          call            )
+                   __field(enum afs_call_state,        state           )
+                   __field(unsigned short,             unmarshall      )
+                            ),
+
+           TP_fast_assign(
+                   __entry->rxcall     = rxcall;
+                   __entry->call       = call;
+                   __entry->state      = call->state;
+                   __entry->unmarshall = call->unmarshall;
+                          ),
+
+           TP_printk("c=%p ac=%p s=%u u=%u",
+                     __entry->rxcall,
+                     __entry->call,
+                     __entry->state, __entry->unmarshall)
+           );
+
+TRACE_EVENT(afs_cb_call,
+           TP_PROTO(struct afs_call *call),
+
+           TP_ARGS(call),
+
+           TP_STRUCT__entry(
+                   __field(struct rxrpc_call *,        rxcall          )
+                   __field(struct afs_call *,          call            )
+                   __field(const char *,               name            )
+                   __field(u32,                        op              )
+                            ),
+
+           TP_fast_assign(
+                   __entry->rxcall     = call->rxcall;
+                   __entry->call       = call;
+                   __entry->name       = call->type->name;
+                   __entry->op         = call->operation_ID;
+                          ),
+
+           TP_printk("c=%p ac=%p %s o=%u",
+                     __entry->rxcall,
+                     __entry->call,
+                     __entry->name,
+                     __entry->op)
+           );
+
+TRACE_EVENT(afs_call,
+           TP_PROTO(struct afs_call *call, enum afs_call_trace op,
+                    int usage, int outstanding, const void *where),
+
+           TP_ARGS(call, op, usage, outstanding, where),
+
+           TP_STRUCT__entry(
+                   __field(struct afs_call *,          call            )
+                   __field(int,                        op              )
+                   __field(int,                        usage           )
+                   __field(int,                        outstanding     )
+                   __field(const void *,               where           )
+                            ),
+
+           TP_fast_assign(
+                   __entry->call = call;
+                   __entry->op = op;
+                   __entry->usage = usage;
+                   __entry->outstanding = outstanding;
+                   __entry->where = where;
+                          ),
+
+           TP_printk("c=%p %s u=%d o=%d sp=%pSR",
+                     __entry->call,
+                     __print_symbolic(__entry->op, afs_call_traces),
+                     __entry->usage,
+                     __entry->outstanding,
+                     __entry->where)
+           );
+
+#endif /* _TRACE_AFS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/bpf.h b/include/trace/events/bpf.h
new file mode 100644 (file)
index 0000000..c3a53fd
--- /dev/null
@@ -0,0 +1,347 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM bpf
+
+#if !defined(_TRACE_BPF_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BPF_H
+
+#include <linux/filter.h>
+#include <linux/bpf.h>
+#include <linux/fs.h>
+#include <linux/tracepoint.h>
+
+#define __PROG_TYPE_MAP(FN)    \
+       FN(SOCKET_FILTER)       \
+       FN(KPROBE)              \
+       FN(SCHED_CLS)           \
+       FN(SCHED_ACT)           \
+       FN(TRACEPOINT)          \
+       FN(XDP)                 \
+       FN(PERF_EVENT)          \
+       FN(CGROUP_SKB)          \
+       FN(CGROUP_SOCK)         \
+       FN(LWT_IN)              \
+       FN(LWT_OUT)             \
+       FN(LWT_XMIT)
+
+#define __MAP_TYPE_MAP(FN)     \
+       FN(HASH)                \
+       FN(ARRAY)               \
+       FN(PROG_ARRAY)          \
+       FN(PERF_EVENT_ARRAY)    \
+       FN(PERCPU_HASH)         \
+       FN(PERCPU_ARRAY)        \
+       FN(STACK_TRACE)         \
+       FN(CGROUP_ARRAY)        \
+       FN(LRU_HASH)            \
+       FN(LRU_PERCPU_HASH)     \
+       FN(LPM_TRIE)
+
+#define __PROG_TYPE_TP_FN(x)   \
+       TRACE_DEFINE_ENUM(BPF_PROG_TYPE_##x);
+#define __PROG_TYPE_SYM_FN(x)  \
+       { BPF_PROG_TYPE_##x, #x },
+#define __PROG_TYPE_SYM_TAB    \
+       __PROG_TYPE_MAP(__PROG_TYPE_SYM_FN) { -1, 0 }
+__PROG_TYPE_MAP(__PROG_TYPE_TP_FN)
+
+#define __MAP_TYPE_TP_FN(x)    \
+       TRACE_DEFINE_ENUM(BPF_MAP_TYPE_##x);
+#define __MAP_TYPE_SYM_FN(x)   \
+       { BPF_MAP_TYPE_##x, #x },
+#define __MAP_TYPE_SYM_TAB     \
+       __MAP_TYPE_MAP(__MAP_TYPE_SYM_FN) { -1, 0 }
+__MAP_TYPE_MAP(__MAP_TYPE_TP_FN)
+
+DECLARE_EVENT_CLASS(bpf_prog_event,
+
+       TP_PROTO(const struct bpf_prog *prg),
+
+       TP_ARGS(prg),
+
+       TP_STRUCT__entry(
+               __array(u8, prog_tag, 8)
+               __field(u32, type)
+       ),
+
+       TP_fast_assign(
+               BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
+               memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
+               __entry->type = prg->type;
+       ),
+
+       TP_printk("prog=%s type=%s",
+                 __print_hex_str(__entry->prog_tag, 8),
+                 __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB))
+);
+
+DEFINE_EVENT(bpf_prog_event, bpf_prog_get_type,
+
+       TP_PROTO(const struct bpf_prog *prg),
+
+       TP_ARGS(prg)
+);
+
+DEFINE_EVENT(bpf_prog_event, bpf_prog_put_rcu,
+
+       TP_PROTO(const struct bpf_prog *prg),
+
+       TP_ARGS(prg)
+);
+
+TRACE_EVENT(bpf_prog_load,
+
+       TP_PROTO(const struct bpf_prog *prg, int ufd),
+
+       TP_ARGS(prg, ufd),
+
+       TP_STRUCT__entry(
+               __array(u8, prog_tag, 8)
+               __field(u32, type)
+               __field(int, ufd)
+       ),
+
+       TP_fast_assign(
+               BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
+               memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
+               __entry->type = prg->type;
+               __entry->ufd  = ufd;
+       ),
+
+       TP_printk("prog=%s type=%s ufd=%d",
+                 __print_hex_str(__entry->prog_tag, 8),
+                 __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB),
+                 __entry->ufd)
+);
+
+TRACE_EVENT(bpf_map_create,
+
+       TP_PROTO(const struct bpf_map *map, int ufd),
+
+       TP_ARGS(map, ufd),
+
+       TP_STRUCT__entry(
+               __field(u32, type)
+               __field(u32, size_key)
+               __field(u32, size_value)
+               __field(u32, max_entries)
+               __field(u32, flags)
+               __field(int, ufd)
+       ),
+
+       TP_fast_assign(
+               __entry->type        = map->map_type;
+               __entry->size_key    = map->key_size;
+               __entry->size_value  = map->value_size;
+               __entry->max_entries = map->max_entries;
+               __entry->flags       = map->map_flags;
+               __entry->ufd         = ufd;
+       ),
+
+       TP_printk("map type=%s ufd=%d key=%u val=%u max=%u flags=%x",
+                 __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
+                 __entry->ufd, __entry->size_key, __entry->size_value,
+                 __entry->max_entries, __entry->flags)
+);
+
+DECLARE_EVENT_CLASS(bpf_obj_prog,
+
+       TP_PROTO(const struct bpf_prog *prg, int ufd,
+                const struct filename *pname),
+
+       TP_ARGS(prg, ufd, pname),
+
+       TP_STRUCT__entry(
+               __array(u8, prog_tag, 8)
+               __field(int, ufd)
+               __string(path, pname->name)
+       ),
+
+       TP_fast_assign(
+               BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
+               memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
+               __assign_str(path, pname->name);
+               __entry->ufd = ufd;
+       ),
+
+       TP_printk("prog=%s path=%s ufd=%d",
+                 __print_hex_str(__entry->prog_tag, 8),
+                 __get_str(path), __entry->ufd)
+);
+
+DEFINE_EVENT(bpf_obj_prog, bpf_obj_pin_prog,
+
+       TP_PROTO(const struct bpf_prog *prg, int ufd,
+                const struct filename *pname),
+
+       TP_ARGS(prg, ufd, pname)
+);
+
+DEFINE_EVENT(bpf_obj_prog, bpf_obj_get_prog,
+
+       TP_PROTO(const struct bpf_prog *prg, int ufd,
+                const struct filename *pname),
+
+       TP_ARGS(prg, ufd, pname)
+);
+
+DECLARE_EVENT_CLASS(bpf_obj_map,
+
+       TP_PROTO(const struct bpf_map *map, int ufd,
+                const struct filename *pname),
+
+       TP_ARGS(map, ufd, pname),
+
+       TP_STRUCT__entry(
+               __field(u32, type)
+               __field(int, ufd)
+               __string(path, pname->name)
+       ),
+
+       TP_fast_assign(
+               __assign_str(path, pname->name);
+               __entry->type = map->map_type;
+               __entry->ufd  = ufd;
+       ),
+
+       TP_printk("map type=%s ufd=%d path=%s",
+                 __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
+                 __entry->ufd, __get_str(path))
+);
+
+DEFINE_EVENT(bpf_obj_map, bpf_obj_pin_map,
+
+       TP_PROTO(const struct bpf_map *map, int ufd,
+                const struct filename *pname),
+
+       TP_ARGS(map, ufd, pname)
+);
+
+DEFINE_EVENT(bpf_obj_map, bpf_obj_get_map,
+
+       TP_PROTO(const struct bpf_map *map, int ufd,
+                const struct filename *pname),
+
+       TP_ARGS(map, ufd, pname)
+);
+
+DECLARE_EVENT_CLASS(bpf_map_keyval,
+
+       TP_PROTO(const struct bpf_map *map, int ufd,
+                const void *key, const void *val),
+
+       TP_ARGS(map, ufd, key, val),
+
+       TP_STRUCT__entry(
+               __field(u32, type)
+               __field(u32, key_len)
+               __dynamic_array(u8, key, map->key_size)
+               __field(bool, key_trunc)
+               __field(u32, val_len)
+               __dynamic_array(u8, val, map->value_size)
+               __field(bool, val_trunc)
+               __field(int, ufd)
+       ),
+
+       TP_fast_assign(
+               memcpy(__get_dynamic_array(key), key, map->key_size);
+               memcpy(__get_dynamic_array(val), val, map->value_size);
+               __entry->type      = map->map_type;
+               __entry->key_len   = min(map->key_size, 16U);
+               __entry->key_trunc = map->key_size != __entry->key_len;
+               __entry->val_len   = min(map->value_size, 16U);
+               __entry->val_trunc = map->value_size != __entry->val_len;
+               __entry->ufd       = ufd;
+       ),
+
+       TP_printk("map type=%s ufd=%d key=[%s%s] val=[%s%s]",
+                 __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
+                 __entry->ufd,
+                 __print_hex(__get_dynamic_array(key), __entry->key_len),
+                 __entry->key_trunc ? " ..." : "",
+                 __print_hex(__get_dynamic_array(val), __entry->val_len),
+                 __entry->val_trunc ? " ..." : "")
+);
+
+DEFINE_EVENT(bpf_map_keyval, bpf_map_lookup_elem,
+
+       TP_PROTO(const struct bpf_map *map, int ufd,
+                const void *key, const void *val),
+
+       TP_ARGS(map, ufd, key, val)
+);
+
+DEFINE_EVENT(bpf_map_keyval, bpf_map_update_elem,
+
+       TP_PROTO(const struct bpf_map *map, int ufd,
+                const void *key, const void *val),
+
+       TP_ARGS(map, ufd, key, val)
+);
+
+TRACE_EVENT(bpf_map_delete_elem,
+
+       TP_PROTO(const struct bpf_map *map, int ufd,
+                const void *key),
+
+       TP_ARGS(map, ufd, key),
+
+       TP_STRUCT__entry(
+               __field(u32, type)
+               __field(u32, key_len)
+               __dynamic_array(u8, key, map->key_size)
+               __field(bool, key_trunc)
+               __field(int, ufd)
+       ),
+
+       TP_fast_assign(
+               memcpy(__get_dynamic_array(key), key, map->key_size);
+               __entry->type      = map->map_type;
+               __entry->key_len   = min(map->key_size, 16U);
+               __entry->key_trunc = map->key_size != __entry->key_len;
+               __entry->ufd       = ufd;
+       ),
+
+       TP_printk("map type=%s ufd=%d key=[%s%s]",
+                 __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
+                 __entry->ufd,
+                 __print_hex(__get_dynamic_array(key), __entry->key_len),
+                 __entry->key_trunc ? " ..." : "")
+);
+
+TRACE_EVENT(bpf_map_next_key,
+
+       TP_PROTO(const struct bpf_map *map, int ufd,
+                const void *key, const void *key_next),
+
+       TP_ARGS(map, ufd, key, key_next),
+
+       TP_STRUCT__entry(
+               __field(u32, type)
+               __field(u32, key_len)
+               __dynamic_array(u8, key, map->key_size)
+               __dynamic_array(u8, nxt, map->key_size)
+               __field(bool, key_trunc)
+               __field(int, ufd)
+       ),
+
+       TP_fast_assign(
+               memcpy(__get_dynamic_array(key), key, map->key_size);
+               memcpy(__get_dynamic_array(nxt), key_next, map->key_size);
+               __entry->type      = map->map_type;
+               __entry->key_len   = min(map->key_size, 16U);
+               __entry->key_trunc = map->key_size != __entry->key_len;
+               __entry->ufd       = ufd;
+       ),
+
+       TP_printk("map type=%s ufd=%d key=[%s%s] next=[%s%s]",
+                 __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
+                 __entry->ufd,
+                 __print_hex(__get_dynamic_array(key), __entry->key_len),
+                 __entry->key_trunc ? " ..." : "",
+                 __print_hex(__get_dynamic_array(nxt), __entry->key_len),
+                 __entry->key_trunc ? " ..." : "")
+);
+
+#endif /* _TRACE_BPF_H */
+
+#include <trace/define_trace.h>
index 0383e5e9a0f30323b7586f94cf0c6c3dc030d6d8..593f586545eba9477006405d288a731f67e4372e 100644 (file)
 
 #include <linux/tracepoint.h>
 
+/*
+ * Define enums for tracing information.
+ *
+ * These should all be kept sorted, making it easier to match the string
+ * mapping tables further on.
+ */
+#ifndef __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+enum rxrpc_skb_trace {
+       rxrpc_skb_rx_cleaned,
+       rxrpc_skb_rx_freed,
+       rxrpc_skb_rx_got,
+       rxrpc_skb_rx_lost,
+       rxrpc_skb_rx_purged,
+       rxrpc_skb_rx_received,
+       rxrpc_skb_rx_rotated,
+       rxrpc_skb_rx_seen,
+       rxrpc_skb_tx_cleaned,
+       rxrpc_skb_tx_freed,
+       rxrpc_skb_tx_got,
+       rxrpc_skb_tx_new,
+       rxrpc_skb_tx_rotated,
+       rxrpc_skb_tx_seen,
+};
+
+enum rxrpc_conn_trace {
+       rxrpc_conn_got,
+       rxrpc_conn_new_client,
+       rxrpc_conn_new_service,
+       rxrpc_conn_put_client,
+       rxrpc_conn_put_service,
+       rxrpc_conn_queued,
+       rxrpc_conn_seen,
+};
+
+enum rxrpc_client_trace {
+       rxrpc_client_activate_chans,
+       rxrpc_client_alloc,
+       rxrpc_client_chan_activate,
+       rxrpc_client_chan_disconnect,
+       rxrpc_client_chan_pass,
+       rxrpc_client_chan_unstarted,
+       rxrpc_client_cleanup,
+       rxrpc_client_count,
+       rxrpc_client_discard,
+       rxrpc_client_duplicate,
+       rxrpc_client_exposed,
+       rxrpc_client_replace,
+       rxrpc_client_to_active,
+       rxrpc_client_to_culled,
+       rxrpc_client_to_idle,
+       rxrpc_client_to_inactive,
+       rxrpc_client_to_upgrade,
+       rxrpc_client_to_waiting,
+       rxrpc_client_uncount,
+};
+
+enum rxrpc_call_trace {
+       rxrpc_call_connected,
+       rxrpc_call_error,
+       rxrpc_call_got,
+       rxrpc_call_got_kernel,
+       rxrpc_call_got_userid,
+       rxrpc_call_new_client,
+       rxrpc_call_new_service,
+       rxrpc_call_put,
+       rxrpc_call_put_kernel,
+       rxrpc_call_put_noqueue,
+       rxrpc_call_put_userid,
+       rxrpc_call_queued,
+       rxrpc_call_queued_ref,
+       rxrpc_call_release,
+       rxrpc_call_seen,
+};
+
+enum rxrpc_transmit_trace {
+       rxrpc_transmit_await_reply,
+       rxrpc_transmit_end,
+       rxrpc_transmit_queue,
+       rxrpc_transmit_queue_last,
+       rxrpc_transmit_rotate,
+       rxrpc_transmit_rotate_last,
+       rxrpc_transmit_wait,
+};
+
+enum rxrpc_receive_trace {
+       rxrpc_receive_end,
+       rxrpc_receive_front,
+       rxrpc_receive_incoming,
+       rxrpc_receive_queue,
+       rxrpc_receive_queue_last,
+       rxrpc_receive_rotate,
+};
+
+enum rxrpc_recvmsg_trace {
+       rxrpc_recvmsg_cont,
+       rxrpc_recvmsg_data_return,
+       rxrpc_recvmsg_dequeue,
+       rxrpc_recvmsg_enter,
+       rxrpc_recvmsg_full,
+       rxrpc_recvmsg_hole,
+       rxrpc_recvmsg_next,
+       rxrpc_recvmsg_return,
+       rxrpc_recvmsg_terminal,
+       rxrpc_recvmsg_to_be_accepted,
+       rxrpc_recvmsg_wait,
+};
+
+enum rxrpc_rtt_tx_trace {
+       rxrpc_rtt_tx_data,
+       rxrpc_rtt_tx_ping,
+};
+
+enum rxrpc_rtt_rx_trace {
+       rxrpc_rtt_rx_ping_response,
+       rxrpc_rtt_rx_requested_ack,
+};
+
+enum rxrpc_timer_trace {
+       rxrpc_timer_begin,
+       rxrpc_timer_expired,
+       rxrpc_timer_init_for_reply,
+       rxrpc_timer_init_for_send_reply,
+       rxrpc_timer_set_for_ack,
+       rxrpc_timer_set_for_ping,
+       rxrpc_timer_set_for_resend,
+       rxrpc_timer_set_for_send,
+};
+
+enum rxrpc_propose_ack_trace {
+       rxrpc_propose_ack_client_tx_end,
+       rxrpc_propose_ack_input_data,
+       rxrpc_propose_ack_ping_for_lost_ack,
+       rxrpc_propose_ack_ping_for_lost_reply,
+       rxrpc_propose_ack_ping_for_params,
+       rxrpc_propose_ack_processing_op,
+       rxrpc_propose_ack_respond_to_ack,
+       rxrpc_propose_ack_respond_to_ping,
+       rxrpc_propose_ack_retry_tx,
+       rxrpc_propose_ack_rotate_rx,
+       rxrpc_propose_ack_terminal_ack,
+};
+
+enum rxrpc_propose_ack_outcome {
+       rxrpc_propose_ack_subsume,
+       rxrpc_propose_ack_update,
+       rxrpc_propose_ack_use,
+};
+
+enum rxrpc_congest_change {
+       rxrpc_cong_begin_retransmission,
+       rxrpc_cong_cleared_nacks,
+       rxrpc_cong_new_low_nack,
+       rxrpc_cong_no_change,
+       rxrpc_cong_progress,
+       rxrpc_cong_retransmit_again,
+       rxrpc_cong_rtt_window_end,
+       rxrpc_cong_saw_nack,
+};
+
+#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
+
+/*
+ * Declare tracing information enums and their string mappings for display.
+ */
+#define rxrpc_skb_traces \
+       EM(rxrpc_skb_rx_cleaned,                "Rx CLN") \
+       EM(rxrpc_skb_rx_freed,                  "Rx FRE") \
+       EM(rxrpc_skb_rx_got,                    "Rx GOT") \
+       EM(rxrpc_skb_rx_lost,                   "Rx *L*") \
+       EM(rxrpc_skb_rx_purged,                 "Rx PUR") \
+       EM(rxrpc_skb_rx_received,               "Rx RCV") \
+       EM(rxrpc_skb_rx_rotated,                "Rx ROT") \
+       EM(rxrpc_skb_rx_seen,                   "Rx SEE") \
+       EM(rxrpc_skb_tx_cleaned,                "Tx CLN") \
+       EM(rxrpc_skb_tx_freed,                  "Tx FRE") \
+       EM(rxrpc_skb_tx_got,                    "Tx GOT") \
+       EM(rxrpc_skb_tx_new,                    "Tx NEW") \
+       EM(rxrpc_skb_tx_rotated,                "Tx ROT") \
+       E_(rxrpc_skb_tx_seen,                   "Tx SEE")
+
+#define rxrpc_conn_traces \
+       EM(rxrpc_conn_got,                      "GOT") \
+       EM(rxrpc_conn_new_client,               "NWc") \
+       EM(rxrpc_conn_new_service,              "NWs") \
+       EM(rxrpc_conn_put_client,               "PTc") \
+       EM(rxrpc_conn_put_service,              "PTs") \
+       EM(rxrpc_conn_queued,                   "QUE") \
+       E_(rxrpc_conn_seen,                     "SEE")
+
+#define rxrpc_client_traces \
+       EM(rxrpc_client_activate_chans,         "Activa") \
+       EM(rxrpc_client_alloc,                  "Alloc ") \
+       EM(rxrpc_client_chan_activate,          "ChActv") \
+       EM(rxrpc_client_chan_disconnect,        "ChDisc") \
+       EM(rxrpc_client_chan_pass,              "ChPass") \
+       EM(rxrpc_client_chan_unstarted,         "ChUnst") \
+       EM(rxrpc_client_cleanup,                "Clean ") \
+       EM(rxrpc_client_count,                  "Count ") \
+       EM(rxrpc_client_discard,                "Discar") \
+       EM(rxrpc_client_duplicate,              "Duplic") \
+       EM(rxrpc_client_exposed,                "Expose") \
+       EM(rxrpc_client_replace,                "Replac") \
+       EM(rxrpc_client_to_active,              "->Actv") \
+       EM(rxrpc_client_to_culled,              "->Cull") \
+       EM(rxrpc_client_to_idle,                "->Idle") \
+       EM(rxrpc_client_to_inactive,            "->Inac") \
+       EM(rxrpc_client_to_upgrade,             "->Upgd") \
+       EM(rxrpc_client_to_waiting,             "->Wait") \
+       E_(rxrpc_client_uncount,                "Uncoun")
+
+#define rxrpc_conn_cache_states \
+       EM(RXRPC_CONN_CLIENT_INACTIVE,          "Inac") \
+       EM(RXRPC_CONN_CLIENT_WAITING,           "Wait") \
+       EM(RXRPC_CONN_CLIENT_ACTIVE,            "Actv") \
+       EM(RXRPC_CONN_CLIENT_CULLED,            "Cull") \
+       E_(RXRPC_CONN_CLIENT_IDLE,              "Idle") \
+
+#define rxrpc_call_traces \
+       EM(rxrpc_call_connected,                "CON") \
+       EM(rxrpc_call_error,                    "*E*") \
+       EM(rxrpc_call_got,                      "GOT") \
+       EM(rxrpc_call_got_kernel,               "Gke") \
+       EM(rxrpc_call_got_userid,               "Gus") \
+       EM(rxrpc_call_new_client,               "NWc") \
+       EM(rxrpc_call_new_service,              "NWs") \
+       EM(rxrpc_call_put,                      "PUT") \
+       EM(rxrpc_call_put_kernel,               "Pke") \
+       EM(rxrpc_call_put_noqueue,              "PNQ") \
+       EM(rxrpc_call_put_userid,               "Pus") \
+       EM(rxrpc_call_queued,                   "QUE") \
+       EM(rxrpc_call_queued_ref,               "QUR") \
+       EM(rxrpc_call_release,                  "RLS") \
+       E_(rxrpc_call_seen,                     "SEE")
+
+#define rxrpc_transmit_traces \
+       EM(rxrpc_transmit_await_reply,          "AWR") \
+       EM(rxrpc_transmit_end,                  "END") \
+       EM(rxrpc_transmit_queue,                "QUE") \
+       EM(rxrpc_transmit_queue_last,           "QLS") \
+       EM(rxrpc_transmit_rotate,               "ROT") \
+       EM(rxrpc_transmit_rotate_last,          "RLS") \
+       E_(rxrpc_transmit_wait,                 "WAI")
+
+#define rxrpc_receive_traces \
+       EM(rxrpc_receive_end,                   "END") \
+       EM(rxrpc_receive_front,                 "FRN") \
+       EM(rxrpc_receive_incoming,              "INC") \
+       EM(rxrpc_receive_queue,                 "QUE") \
+       EM(rxrpc_receive_queue_last,            "QLS") \
+       E_(rxrpc_receive_rotate,                "ROT")
+
+#define rxrpc_recvmsg_traces \
+       EM(rxrpc_recvmsg_cont,                  "CONT") \
+       EM(rxrpc_recvmsg_data_return,           "DATA") \
+       EM(rxrpc_recvmsg_dequeue,               "DEQU") \
+       EM(rxrpc_recvmsg_enter,                 "ENTR") \
+       EM(rxrpc_recvmsg_full,                  "FULL") \
+       EM(rxrpc_recvmsg_hole,                  "HOLE") \
+       EM(rxrpc_recvmsg_next,                  "NEXT") \
+       EM(rxrpc_recvmsg_return,                "RETN") \
+       EM(rxrpc_recvmsg_terminal,              "TERM") \
+       EM(rxrpc_recvmsg_to_be_accepted,        "TBAC") \
+       E_(rxrpc_recvmsg_wait,                  "WAIT")
+
+#define rxrpc_rtt_tx_traces \
+       EM(rxrpc_rtt_tx_data,                   "DATA") \
+       E_(rxrpc_rtt_tx_ping,                   "PING")
+
+#define rxrpc_rtt_rx_traces \
+       EM(rxrpc_rtt_rx_ping_response,          "PONG") \
+       E_(rxrpc_rtt_rx_requested_ack,          "RACK")
+
+#define rxrpc_timer_traces \
+       EM(rxrpc_timer_begin,                   "Begin ") \
+       EM(rxrpc_timer_expired,                 "*EXPR*") \
+       EM(rxrpc_timer_init_for_reply,          "IniRpl") \
+       EM(rxrpc_timer_init_for_send_reply,     "SndRpl") \
+       EM(rxrpc_timer_set_for_ack,             "SetAck") \
+       EM(rxrpc_timer_set_for_ping,            "SetPng") \
+       EM(rxrpc_timer_set_for_resend,          "SetRTx") \
+       E_(rxrpc_timer_set_for_send,            "SetTx ")
+
+#define rxrpc_propose_ack_traces \
+       EM(rxrpc_propose_ack_client_tx_end,     "ClTxEnd") \
+       EM(rxrpc_propose_ack_input_data,        "DataIn ") \
+       EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
+       EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
+       EM(rxrpc_propose_ack_ping_for_params,   "Params ") \
+       EM(rxrpc_propose_ack_processing_op,     "ProcOp ") \
+       EM(rxrpc_propose_ack_respond_to_ack,    "Rsp2Ack") \
+       EM(rxrpc_propose_ack_respond_to_ping,   "Rsp2Png") \
+       EM(rxrpc_propose_ack_retry_tx,          "RetryTx") \
+       EM(rxrpc_propose_ack_rotate_rx,         "RxAck  ") \
+       E_(rxrpc_propose_ack_terminal_ack,      "ClTerm ")
+
+#define rxrpc_propose_ack_outcomes \
+       EM(rxrpc_propose_ack_subsume,           " Subsume") \
+       EM(rxrpc_propose_ack_update,            " Update") \
+       E_(rxrpc_propose_ack_use,               "")
+
+#define rxrpc_congest_modes \
+       EM(RXRPC_CALL_CONGEST_AVOIDANCE,        "CongAvoid") \
+       EM(RXRPC_CALL_FAST_RETRANSMIT,          "FastReTx ") \
+       EM(RXRPC_CALL_PACKET_LOSS,              "PktLoss  ") \
+       E_(RXRPC_CALL_SLOW_START,               "SlowStart")
+
+#define rxrpc_congest_changes \
+       EM(rxrpc_cong_begin_retransmission,     " Retrans") \
+       EM(rxrpc_cong_cleared_nacks,            " Cleared") \
+       EM(rxrpc_cong_new_low_nack,             " NewLowN") \
+       EM(rxrpc_cong_no_change,                "") \
+       EM(rxrpc_cong_progress,                 " Progres") \
+       EM(rxrpc_cong_retransmit_again,         " ReTxAgn") \
+       EM(rxrpc_cong_rtt_window_end,           " RttWinE") \
+       E_(rxrpc_cong_saw_nack,                 " SawNack")
+
+#define rxrpc_pkts \
+       EM(0,                                   "?00") \
+       EM(RXRPC_PACKET_TYPE_DATA,              "DATA") \
+       EM(RXRPC_PACKET_TYPE_ACK,               "ACK") \
+       EM(RXRPC_PACKET_TYPE_BUSY,              "BUSY") \
+       EM(RXRPC_PACKET_TYPE_ABORT,             "ABORT") \
+       EM(RXRPC_PACKET_TYPE_ACKALL,            "ACKALL") \
+       EM(RXRPC_PACKET_TYPE_CHALLENGE,         "CHALL") \
+       EM(RXRPC_PACKET_TYPE_RESPONSE,          "RESP") \
+       EM(RXRPC_PACKET_TYPE_DEBUG,             "DEBUG") \
+       EM(9,                                   "?09") \
+       EM(10,                                  "?10") \
+       EM(11,                                  "?11") \
+       EM(12,                                  "?12") \
+       EM(RXRPC_PACKET_TYPE_VERSION,           "VERSION") \
+       EM(14,                                  "?14") \
+       E_(15,                                  "?15")
+
+#define rxrpc_ack_names \
+       EM(0,                                   "-0-") \
+       EM(RXRPC_ACK_REQUESTED,                 "REQ") \
+       EM(RXRPC_ACK_DUPLICATE,                 "DUP") \
+       EM(RXRPC_ACK_OUT_OF_SEQUENCE,           "OOS") \
+       EM(RXRPC_ACK_EXCEEDS_WINDOW,            "WIN") \
+       EM(RXRPC_ACK_NOSPACE,                   "MEM") \
+       EM(RXRPC_ACK_PING,                      "PNG") \
+       EM(RXRPC_ACK_PING_RESPONSE,             "PNR") \
+       EM(RXRPC_ACK_DELAY,                     "DLY") \
+       EM(RXRPC_ACK_IDLE,                      "IDL") \
+       E_(RXRPC_ACK__INVALID,                  "-?-")
+
+/*
+ * Export enum symbols via userspace.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+rxrpc_skb_traces;
+rxrpc_conn_traces;
+rxrpc_client_traces;
+rxrpc_call_traces;
+rxrpc_transmit_traces;
+rxrpc_receive_traces;
+rxrpc_recvmsg_traces;
+rxrpc_rtt_tx_traces;
+rxrpc_rtt_rx_traces;
+rxrpc_timer_traces;
+rxrpc_propose_ack_traces;
+rxrpc_propose_ack_outcomes;
+rxrpc_congest_changes;
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef E_
+#define EM(a, b)       { a, b },
+#define E_(a, b)       { a, b }
+
 TRACE_EVENT(rxrpc_conn,
            TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
                     int usage, const void *where),
@@ -38,7 +418,7 @@ TRACE_EVENT(rxrpc_conn,
 
            TP_printk("C=%p %s u=%d sp=%pSR",
                      __entry->conn,
-                     rxrpc_conn_traces[__entry->op],
+                     __print_symbolic(__entry->op, rxrpc_conn_traces),
                      __entry->usage,
                      __entry->where)
            );
@@ -70,8 +450,8 @@ TRACE_EVENT(rxrpc_client,
            TP_printk("C=%p h=%2d %s %s i=%08x u=%d",
                      __entry->conn,
                      __entry->channel,
-                     rxrpc_client_traces[__entry->op],
-                     rxrpc_conn_cache_states[__entry->cs],
+                     __print_symbolic(__entry->op, rxrpc_client_traces),
+                     __print_symbolic(__entry->cs, rxrpc_conn_cache_states),
                      __entry->cid,
                      __entry->usage)
            );
@@ -100,7 +480,7 @@ TRACE_EVENT(rxrpc_call,
 
            TP_printk("c=%p %s u=%d sp=%pSR a=%p",
                      __entry->call,
-                     rxrpc_call_traces[__entry->op],
+                     __print_symbolic(__entry->op, rxrpc_call_traces),
                      __entry->usage,
                      __entry->where,
                      __entry->aux)
@@ -130,7 +510,7 @@ TRACE_EVENT(rxrpc_skb,
 
            TP_printk("s=%p %s u=%d m=%d p=%pSR",
                      __entry->skb,
-                     rxrpc_skb_traces[__entry->op],
+                     __print_symbolic(__entry->op, rxrpc_skb_traces),
                      __entry->usage,
                      __entry->mod_count,
                      __entry->where)
@@ -154,7 +534,8 @@ TRACE_EVENT(rxrpc_rx_packet,
                      __entry->hdr.callNumber, __entry->hdr.serviceId,
                      __entry->hdr.serial, __entry->hdr.seq,
                      __entry->hdr.type, __entry->hdr.flags,
-                     __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
+                     __entry->hdr.type <= 15 ?
+                     __print_symbolic(__entry->hdr.type, rxrpc_pkts) : "?UNK")
            );
 
 TRACE_EVENT(rxrpc_rx_done,
@@ -214,6 +595,7 @@ TRACE_EVENT(rxrpc_transmit,
                    __field(enum rxrpc_transmit_trace,  why             )
                    __field(rxrpc_seq_t,                tx_hard_ack     )
                    __field(rxrpc_seq_t,                tx_top          )
+                   __field(int,                        tx_winsize      )
                             ),
 
            TP_fast_assign(
@@ -221,38 +603,81 @@ TRACE_EVENT(rxrpc_transmit,
                    __entry->why = why;
                    __entry->tx_hard_ack = call->tx_hard_ack;
                    __entry->tx_top = call->tx_top;
+                   __entry->tx_winsize = call->tx_winsize;
                           ),
 
-           TP_printk("c=%p %s f=%08x n=%u",
+           TP_printk("c=%p %s f=%08x n=%u/%u",
                      __entry->call,
-                     rxrpc_transmit_traces[__entry->why],
+                     __print_symbolic(__entry->why, rxrpc_transmit_traces),
                      __entry->tx_hard_ack + 1,
-                     __entry->tx_top - __entry->tx_hard_ack)
+                     __entry->tx_top - __entry->tx_hard_ack,
+                     __entry->tx_winsize)
+           );
+
+TRACE_EVENT(rxrpc_rx_data,
+           TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+                    rxrpc_serial_t serial, u8 flags, u8 anno),
+
+           TP_ARGS(call, seq, serial, flags, anno),
+
+           TP_STRUCT__entry(
+                   __field(struct rxrpc_call *,        call            )
+                   __field(rxrpc_seq_t,                seq             )
+                   __field(rxrpc_serial_t,             serial          )
+                   __field(u8,                         flags           )
+                   __field(u8,                         anno            )
+                            ),
+
+           TP_fast_assign(
+                   __entry->call = call;
+                   __entry->seq = seq;
+                   __entry->serial = serial;
+                   __entry->flags = flags;
+                   __entry->anno = anno;
+                          ),
+
+           TP_printk("c=%p DATA %08x q=%08x fl=%02x a=%02x",
+                     __entry->call,
+                     __entry->serial,
+                     __entry->seq,
+                     __entry->flags,
+                     __entry->anno)
            );
 
 TRACE_EVENT(rxrpc_rx_ack,
-           TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t first, u8 reason, u8 n_acks),
+           TP_PROTO(struct rxrpc_call *call,
+                    rxrpc_serial_t serial, rxrpc_serial_t ack_serial,
+                    rxrpc_seq_t first, rxrpc_seq_t prev, u8 reason, u8 n_acks),
 
-           TP_ARGS(call, first, reason, n_acks),
+           TP_ARGS(call, serial, ack_serial, first, prev, reason, n_acks),
 
            TP_STRUCT__entry(
                    __field(struct rxrpc_call *,        call            )
+                   __field(rxrpc_serial_t,             serial          )
+                   __field(rxrpc_serial_t,             ack_serial      )
                    __field(rxrpc_seq_t,                first           )
+                   __field(rxrpc_seq_t,                prev            )
                    __field(u8,                         reason          )
                    __field(u8,                         n_acks          )
                             ),
 
            TP_fast_assign(
                    __entry->call = call;
+                   __entry->serial = serial;
+                   __entry->ack_serial = ack_serial;
                    __entry->first = first;
+                   __entry->prev = prev;
                    __entry->reason = reason;
                    __entry->n_acks = n_acks;
                           ),
 
-           TP_printk("c=%p %s f=%08x n=%u",
+           TP_printk("c=%p %08x %s r=%08x f=%08x p=%08x n=%u",
                      __entry->call,
-                     rxrpc_ack_names[__entry->reason],
+                     __entry->serial,
+                     __print_symbolic(__entry->reason, rxrpc_ack_names),
+                     __entry->ack_serial,
                      __entry->first,
+                     __entry->prev,
                      __entry->n_acks)
            );
 
@@ -317,7 +742,7 @@ TRACE_EVENT(rxrpc_tx_ack,
            TP_printk(" c=%p ACK  %08x %s f=%08x r=%08x n=%u",
                      __entry->call,
                      __entry->serial,
-                     rxrpc_ack_names[__entry->reason],
+                     __print_symbolic(__entry->reason, rxrpc_ack_names),
                      __entry->ack_first,
                      __entry->ack_serial,
                      __entry->n_acks)
@@ -349,7 +774,7 @@ TRACE_EVENT(rxrpc_receive,
 
            TP_printk("c=%p %s r=%08x q=%08x w=%08x-%08x",
                      __entry->call,
-                     rxrpc_receive_traces[__entry->why],
+                     __print_symbolic(__entry->why, rxrpc_receive_traces),
                      __entry->serial,
                      __entry->seq,
                      __entry->hard_ack,
@@ -383,7 +808,7 @@ TRACE_EVENT(rxrpc_recvmsg,
 
            TP_printk("c=%p %s q=%08x o=%u l=%u ret=%d",
                      __entry->call,
-                     rxrpc_recvmsg_traces[__entry->why],
+                     __print_symbolic(__entry->why, rxrpc_recvmsg_traces),
                      __entry->seq,
                      __entry->offset,
                      __entry->len,
@@ -410,7 +835,7 @@ TRACE_EVENT(rxrpc_rtt_tx,
 
            TP_printk("c=%p %s sr=%08x",
                      __entry->call,
-                     rxrpc_rtt_tx_traces[__entry->why],
+                     __print_symbolic(__entry->why, rxrpc_rtt_tx_traces),
                      __entry->send_serial)
            );
 
@@ -443,7 +868,7 @@ TRACE_EVENT(rxrpc_rtt_rx,
 
            TP_printk("c=%p %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld",
                      __entry->call,
-                     rxrpc_rtt_rx_traces[__entry->why],
+                     __print_symbolic(__entry->why, rxrpc_rtt_rx_traces),
                      __entry->send_serial,
                      __entry->resp_serial,
                      __entry->rtt,
@@ -481,7 +906,7 @@ TRACE_EVENT(rxrpc_timer,
 
            TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
                      __entry->call,
-                     rxrpc_timer_traces[__entry->why],
+                     __print_symbolic(__entry->why, rxrpc_timer_traces),
                      ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
                      ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
                      ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
@@ -506,7 +931,8 @@ TRACE_EVENT(rxrpc_rx_lose,
                      __entry->hdr.callNumber, __entry->hdr.serviceId,
                      __entry->hdr.serial, __entry->hdr.seq,
                      __entry->hdr.type, __entry->hdr.flags,
-                     __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
+                     __entry->hdr.type <= 15 ?
+                     __print_symbolic(__entry->hdr.type, rxrpc_pkts) : "?UNK")
            );
 
 TRACE_EVENT(rxrpc_propose_ack,
@@ -539,12 +965,12 @@ TRACE_EVENT(rxrpc_propose_ack,
 
            TP_printk("c=%p %s %s r=%08x i=%u b=%u%s",
                      __entry->call,
-                     rxrpc_propose_ack_traces[__entry->why],
-                     rxrpc_ack_names[__entry->ack_reason],
+                     __print_symbolic(__entry->why, rxrpc_propose_ack_traces),
+                     __print_symbolic(__entry->ack_reason, rxrpc_ack_names),
                      __entry->serial,
                      __entry->immediate,
                      __entry->background,
-                     rxrpc_propose_ack_outcomes[__entry->outcome])
+                     __print_symbolic(__entry->outcome, rxrpc_propose_ack_outcomes))
            );
 
 TRACE_EVENT(rxrpc_retransmit,
@@ -603,9 +1029,9 @@ TRACE_EVENT(rxrpc_congest,
            TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
                      __entry->call,
                      __entry->ack_serial,
-                     rxrpc_ack_names[__entry->sum.ack_reason],
+                     __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
                      __entry->hard_ack,
-                     rxrpc_congest_modes[__entry->sum.mode],
+                     __print_symbolic(__entry->sum.mode, rxrpc_congest_modes),
                      __entry->sum.cwnd,
                      __entry->sum.ssthresh,
                      __entry->sum.nr_acks, __entry->sum.nr_nacks,
@@ -615,10 +1041,50 @@ TRACE_EVENT(rxrpc_congest,
                      __entry->sum.cumulative_acks,
                      __entry->sum.dup_acks,
                      __entry->lowest_nak, __entry->sum.new_low_nack ? "!" : "",
-                     rxrpc_congest_changes[__entry->change],
+                     __print_symbolic(__entry->change, rxrpc_congest_changes),
                      __entry->sum.retrans_timeo ? " rTxTo" : "")
            );
 
+TRACE_EVENT(rxrpc_disconnect_call,
+           TP_PROTO(struct rxrpc_call *call),
+
+           TP_ARGS(call),
+
+           TP_STRUCT__entry(
+                   __field(struct rxrpc_call *,        call            )
+                   __field(u32,                        abort_code      )
+                            ),
+
+           TP_fast_assign(
+                   __entry->call = call;
+                   __entry->abort_code = call->abort_code;
+                          ),
+
+           TP_printk("c=%p ab=%08x",
+                     __entry->call,
+                     __entry->abort_code)
+           );
+
+TRACE_EVENT(rxrpc_improper_term,
+           TP_PROTO(struct rxrpc_call *call),
+
+           TP_ARGS(call),
+
+           TP_STRUCT__entry(
+                   __field(struct rxrpc_call *,        call            )
+                   __field(u32,                        abort_code      )
+                            ),
+
+           TP_fast_assign(
+                   __entry->call = call;
+                   __entry->abort_code = call->abort_code;
+                          ),
+
+           TP_printk("c=%p ab=%08x",
+                     __entry->call,
+                     __entry->abort_code)
+           );
+
 #endif /* _TRACE_RXRPC_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
new file mode 100644 (file)
index 0000000..1b61357
--- /dev/null
@@ -0,0 +1,53 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xdp
+
+#if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_XDP_H
+
+#include <linux/netdevice.h>
+#include <linux/filter.h>
+#include <linux/tracepoint.h>
+
+#define __XDP_ACT_MAP(FN)      \
+       FN(ABORTED)             \
+       FN(DROP)                \
+       FN(PASS)                \
+       FN(TX)
+
+#define __XDP_ACT_TP_FN(x)     \
+       TRACE_DEFINE_ENUM(XDP_##x);
+#define __XDP_ACT_SYM_FN(x)    \
+       { XDP_##x, #x },
+#define __XDP_ACT_SYM_TAB      \
+       __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 }
+__XDP_ACT_MAP(__XDP_ACT_TP_FN)
+
+TRACE_EVENT(xdp_exception,
+
+       TP_PROTO(const struct net_device *dev,
+                const struct bpf_prog *xdp, u32 act),
+
+       TP_ARGS(dev, xdp, act),
+
+       TP_STRUCT__entry(
+               __string(name, dev->name)
+               __array(u8, prog_tag, 8)
+               __field(u32, act)
+       ),
+
+       TP_fast_assign(
+               BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag));
+               memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag));
+               __assign_str(name, dev->name);
+               __entry->act = act;
+       ),
+
+       TP_printk("prog=%s device=%s action=%s",
+                 __print_hex_str(__entry->prog_tag, 8),
+                 __get_str(name),
+                 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB))
+);
+
+#endif /* _TRACE_XDP_H */
+
+#include <trace/define_trace.h>
index 467e12f780d863956b1b1b650c406d9d4cb5c0c2..5c06f4af8323032f78ba447f5f66cd31bcb28282 100644 (file)
@@ -297,7 +297,12 @@ TRACE_MAKE_SYSTEM_STR();
 #endif
 
 #undef __print_hex
-#define __print_hex(buf, buf_len) trace_print_hex_seq(p, buf, buf_len)
+#define __print_hex(buf, buf_len)                                      \
+       trace_print_hex_seq(p, buf, buf_len, false)
+
+#undef __print_hex_str
+#define __print_hex_str(buf, buf_len)                                  \
+       trace_print_hex_seq(p, buf, buf_len, true)
 
 #undef __print_array
 #define __print_array(array, count, el_size)                           \
@@ -711,6 +716,7 @@ static inline void ftrace_test_probe_##call(void)                   \
 #undef __print_flags
 #undef __print_symbolic
 #undef __print_hex
+#undef __print_hex_str
 #undef __get_dynamic_array
 #undef __get_dynamic_array_len
 #undef __get_str
index f330ba4547cfd4c59da3d8f309e4ab76d8bd8b40..a2e90722a4c4ff9693e9f7b5aeb1131676fb2688 100644 (file)
@@ -64,6 +64,7 @@ header-y += auto_fs.h
 header-y += auxvec.h
 header-y += ax25.h
 header-y += b1lli.h
+header-y += batman_adv.h
 header-y += baycom.h
 header-y += bcm933xx_hcs.h
 header-y += bfs_fs.h
@@ -194,6 +195,7 @@ header-y += if_tun.h
 header-y += if_tunnel.h
 header-y += if_vlan.h
 header-y += if_x25.h
+header-y += ife.h
 header-y += igmp.h
 header-y += ila.h
 header-y += in6.h
@@ -305,6 +307,7 @@ header-y += netrom.h
 header-y += net_namespace.h
 header-y += net_tstamp.h
 header-y += nfc.h
+header-y += psample.h
 header-y += nfs2.h
 header-y += nfs3.h
 header-y += nfs4.h
@@ -379,6 +382,10 @@ header-y += sctp.h
 header-y += sdla.h
 header-y += seccomp.h
 header-y += securebits.h
+header-y += seg6_genl.h
+header-y += seg6.h
+header-y += seg6_hmac.h
+header-y += seg6_iptunnel.h
 header-y += selinux_netlink.h
 header-y += sem.h
 header-y += serial_core.h
index 734fe83ab6457d79943705f1707f128c6ed355c3..a83ddb7b63dbf3205ecba7b12fa4f66187efa65a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2016-2017  B.A.T.M.A.N. contributors:
  *
  * Matthias Schiffer
  *
index d2b0ac799d03c925a6eec2b49bdb14525331687c..0539a0ceef38155835552360667070552ebce641 100644 (file)
@@ -63,6 +63,12 @@ struct bpf_insn {
        __s32   imm;            /* signed immediate constant */
 };
 
+/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
+struct bpf_lpm_trie_key {
+       __u32   prefixlen;      /* up to 32 for AF_INET, 128 for AF_INET6 */
+       __u8    data[0];        /* Arbitrary size */
+};
+
 /* BPF syscall commands, see bpf(2) man-page for details. */
 enum bpf_cmd {
        BPF_MAP_CREATE,
@@ -89,6 +95,7 @@ enum bpf_map_type {
        BPF_MAP_TYPE_CGROUP_ARRAY,
        BPF_MAP_TYPE_LRU_HASH,
        BPF_MAP_TYPE_LRU_PERCPU_HASH,
+       BPF_MAP_TYPE_LPM_TRIE,
 };
 
 enum bpf_prog_type {
@@ -437,6 +444,18 @@ union bpf_attr {
  *     @xdp_md: pointer to xdp_md
  *     @delta: An positive/negative integer to be added to xdp_md.data
  *     Return: 0 on success or negative on error
+ *
+ * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+ *     Copy a NUL terminated string from unsafe address. In case the string
+ *     length is smaller than size, the target is not padded with further NUL
+ *     bytes. In case the string length is larger than size, just count-1
+ *     bytes are copied and the last byte is set to NUL.
+ *     @dst: destination address
+ *     @size: maximum number of bytes to copy, including the trailing NUL
+ *     @unsafe_ptr: unsafe address
+ *     Return:
+ *       > 0 length of the string including the trailing NUL on success
+ *       < 0 error
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -483,7 +502,8 @@ union bpf_attr {
        FN(set_hash_invalid),           \
        FN(get_numa_node_id),           \
        FN(skb_change_head),            \
-       FN(xdp_adjust_head),
+       FN(xdp_adjust_head),            \
+       FN(probe_read_str),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
@@ -509,6 +529,7 @@ enum bpf_func_id {
 /* BPF_FUNC_l4_csum_replace flags. */
 #define BPF_F_PSEUDO_HDR               (1ULL << 4)
 #define BPF_F_MARK_MANGLED_0           (1ULL << 5)
+#define BPF_F_MARK_ENFORCE             (1ULL << 6)
 
 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
 #define BPF_F_INGRESS                  (1ULL << 0)
index 94ffe0c83ce72cf5e396a615fe039bec6fe468d4..fdf75f74fdaf16424076e33ac4a188525b4b3a58 100644 (file)
@@ -127,9 +127,16 @@ enum {
        IFLA_CAN_BERR_COUNTER,
        IFLA_CAN_DATA_BITTIMING,
        IFLA_CAN_DATA_BITTIMING_CONST,
+       IFLA_CAN_TERMINATION,
+       IFLA_CAN_TERMINATION_CONST,
+       IFLA_CAN_BITRATE_CONST,
+       IFLA_CAN_DATA_BITRATE_CONST,
        __IFLA_CAN_MAX
 };
 
 #define IFLA_CAN_MAX   (__IFLA_CAN_MAX - 1)
 
+/* u16 termination range: 1..65535 Ohms */
+#define CAN_TERMINATION_DISABLED 0
+
 #endif /* !_UAPI_CAN_NETLINK_H */
index 9014c33d4e77bf409be507f6fb8da59edc14b2f8..0f1f3a12e23c30e511cdb332059e30ee8d3d5efb 100644 (file)
@@ -57,8 +57,14 @@ enum devlink_command {
        DEVLINK_CMD_SB_OCC_SNAPSHOT,
        DEVLINK_CMD_SB_OCC_MAX_CLEAR,
 
-       DEVLINK_CMD_ESWITCH_MODE_GET,
-       DEVLINK_CMD_ESWITCH_MODE_SET,
+       DEVLINK_CMD_ESWITCH_GET,
+#define DEVLINK_CMD_ESWITCH_MODE_GET /* obsolete, never use this! */ \
+       DEVLINK_CMD_ESWITCH_GET
+
+       DEVLINK_CMD_ESWITCH_SET,
+#define DEVLINK_CMD_ESWITCH_MODE_SET /* obsolete, never use this! */ \
+       DEVLINK_CMD_ESWITCH_SET
+
        /* add new commands above here */
 
        __DEVLINK_CMD_MAX,
index ab92bca6d448bb8040f366df430b7db6950aae5c..a9e6244ce43803339584f3f6e43757005363e621 100644 (file)
@@ -118,6 +118,7 @@ enum {
        IFLA_BRIDGE_FLAGS,
        IFLA_BRIDGE_MODE,
        IFLA_BRIDGE_VLAN_INFO,
+       IFLA_BRIDGE_VLAN_TUNNEL_INFO,
        __IFLA_BRIDGE_MAX,
 };
 #define IFLA_BRIDGE_MAX (__IFLA_BRIDGE_MAX - 1)
@@ -134,6 +135,16 @@ struct bridge_vlan_info {
        __u16 vid;
 };
 
+enum {
+       IFLA_BRIDGE_VLAN_TUNNEL_UNSPEC,
+       IFLA_BRIDGE_VLAN_TUNNEL_ID,
+       IFLA_BRIDGE_VLAN_TUNNEL_VID,
+       IFLA_BRIDGE_VLAN_TUNNEL_FLAGS,
+       __IFLA_BRIDGE_VLAN_TUNNEL_MAX,
+};
+
+#define IFLA_BRIDGE_VLAN_TUNNEL_MAX (__IFLA_BRIDGE_VLAN_TUNNEL_MAX - 1)
+
 struct bridge_vlan_xstats {
        __u64 rx_bytes;
        __u64 rx_packets;
index 6b13e591abc9e646ce11b15d7fe5ee8209a509c1..320fc1e747ee9623db56fbaf26b2a514b5d5a3d1 100644 (file)
@@ -321,6 +321,8 @@ enum {
        IFLA_BRPORT_MULTICAST_ROUTER,
        IFLA_BRPORT_PAD,
        IFLA_BRPORT_MCAST_FLOOD,
+       IFLA_BRPORT_MCAST_TO_UCAST,
+       IFLA_BRPORT_VLAN_TUNNEL,
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -847,6 +849,7 @@ enum {
        IFLA_STATS_LINK_XSTATS,
        IFLA_STATS_LINK_XSTATS_SLAVE,
        IFLA_STATS_LINK_OFFLOAD_XSTATS,
+       IFLA_STATS_AF_SPEC,
        __IFLA_STATS_MAX,
 };
 
diff --git a/include/uapi/linux/ife.h b/include/uapi/linux/ife.h
new file mode 100644 (file)
index 0000000..2954da3
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef __UAPI_IFE_H
+#define __UAPI_IFE_H
+
+#define IFE_METAHDRLEN 2
+
+enum {
+       IFE_META_SKBMARK = 1,
+       IFE_META_HASHID,
+       IFE_META_PRIO,
+       IFE_META_QMAP,
+       IFE_META_TCINDEX,
+       __IFE_META_MAX
+};
+
+/*Can be overridden at runtime by module option*/
+#define IFE_META_MAX (__IFE_META_MAX - 1)
+
+#endif
index ccbb32aa67044f949080551c9f4049189f944fa9..a97f9a7568cfe54ca3bfea53957cf2e80f8a1a24 100644 (file)
@@ -53,7 +53,7 @@ struct igmpv3_grec {
 struct igmpv3_report {
        __u8 type;
        __u8 resv1;
-       __be16 csum;
+       __sum16 csum;
        __be16 resv2;
        __be16 ngrec;
        struct igmpv3_grec grec[0];
@@ -62,7 +62,7 @@ struct igmpv3_report {
 struct igmpv3_query {
        __u8 type;
        __u8 code;
-       __be16 csum;
+       __sum16 csum;
        __be32 group;
 #if defined(__LITTLE_ENDIAN_BITFIELD)
        __u8 qrv:3,
index eaf65dc82e227c9142942d09fe90ddacf00c7d9e..8ef9e75e004ebbf951cb50b4b4b17f2c5b907dd4 100644 (file)
@@ -182,6 +182,7 @@ enum {
        DEVCONF_SEG6_ENABLED,
        DEVCONF_SEG6_REQUIRE_HMAC,
        DEVCONF_ENHANCED_DAD,
+       DEVCONF_ADDR_GEN_MODE,
        DEVCONF_MAX
 };
 
index 24a6cb1aec86f2a73ffcb717779249970bbb31dc..77a19dfe3990d5deede42363d9511d110032f019 100644 (file)
@@ -43,4 +43,34 @@ struct mpls_label {
 
 #define MPLS_LABEL_FIRST_UNRESERVED    16 /* RFC3032 */
 
+/* These are embedded into IFLA_STATS_AF_SPEC:
+ * [IFLA_STATS_AF_SPEC]
+ * -> [AF_MPLS]
+ *    -> [MPLS_STATS_xxx]
+ *
+ * Attributes:
+ * [MPLS_STATS_LINK] = {
+ *     struct mpls_link_stats
+ * }
+ */
+enum {
+       MPLS_STATS_UNSPEC, /* also used as 64bit pad attribute */
+       MPLS_STATS_LINK,
+       __MPLS_STATS_MAX,
+};
+
+#define MPLS_STATS_MAX (__MPLS_STATS_MAX - 1)
+
+struct mpls_link_stats {
+       __u64   rx_packets;             /* total packets received       */
+       __u64   tx_packets;             /* total packets transmitted    */
+       __u64   rx_bytes;               /* total bytes received         */
+       __u64   tx_bytes;               /* total bytes transmitted      */
+       __u64   rx_errors;              /* bad packets received         */
+       __u64   tx_errors;              /* packet transmit problems     */
+       __u64   rx_dropped;             /* packet dropped on receive    */
+       __u64   tx_dropped;             /* packet dropped on transmit   */
+       __u64   rx_noroute;             /* no route for packet dest     */
+};
+
 #endif /* _UAPI_MPLS_H */
index bd99a8d80f36e527094f05ceabb55c21ea004ae7..f3d16dbe09d64424d2d92c581f30771f6add6e0b 100644 (file)
@@ -26,6 +26,7 @@ enum {
        NDA_IFINDEX,
        NDA_MASTER,
        NDA_LINK_NETNSID,
+       NDA_SRC_VNI,
        __NDA_MAX
 };
 
index e3f27e09eb2be460ebfea6a3b8f2d1ffb38d3010..05215d30fe5c9853b7871e799ccdce4878a04ef1 100644 (file)
@@ -207,6 +207,7 @@ enum nft_chain_attributes {
  * @NFTA_RULE_COMPAT: compatibility specifications of the rule (NLA_NESTED: nft_rule_compat_attributes)
  * @NFTA_RULE_POSITION: numeric handle of the previous rule (NLA_U64)
  * @NFTA_RULE_USERDATA: user data (NLA_BINARY, NFT_USERDATA_MAXLEN)
+ * @NFTA_RULE_ID: uniquely identifies a rule in a transaction (NLA_U32)
  */
 enum nft_rule_attributes {
        NFTA_RULE_UNSPEC,
@@ -218,6 +219,7 @@ enum nft_rule_attributes {
        NFTA_RULE_POSITION,
        NFTA_RULE_USERDATA,
        NFTA_RULE_PAD,
+       NFTA_RULE_ID,
        __NFTA_RULE_MAX
 };
 #define NFTA_RULE_MAX          (__NFTA_RULE_MAX - 1)
@@ -704,13 +706,32 @@ enum nft_payload_attributes {
 };
 #define NFTA_PAYLOAD_MAX       (__NFTA_PAYLOAD_MAX - 1)
 
+enum nft_exthdr_flags {
+       NFT_EXTHDR_F_PRESENT = (1 << 0),
+};
+
+/**
+ * enum nft_exthdr_op - nf_tables match options
+ *
+ * @NFT_EXTHDR_OP_IPV6: match against ipv6 extension headers
+ * @NFT_EXTHDR_OP_TCP: match against tcp options
+ */
+enum nft_exthdr_op {
+       NFT_EXTHDR_OP_IPV6,
+       NFT_EXTHDR_OP_TCPOPT,
+       __NFT_EXTHDR_OP_MAX
+};
+#define NFT_EXTHDR_OP_MAX      (__NFT_EXTHDR_OP_MAX - 1)
+
 /**
- * enum nft_exthdr_attributes - nf_tables IPv6 extension header expression netlink attributes
+ * enum nft_exthdr_attributes - nf_tables extension header expression netlink attributes
  *
  * @NFTA_EXTHDR_DREG: destination register (NLA_U32: nft_registers)
  * @NFTA_EXTHDR_TYPE: extension header type (NLA_U8)
  * @NFTA_EXTHDR_OFFSET: extension header offset (NLA_U32)
  * @NFTA_EXTHDR_LEN: extension header length (NLA_U32)
+ * @NFTA_EXTHDR_FLAGS: extension header flags (NLA_U32)
+ * @NFTA_EXTHDR_OP: option match type (NLA_U8)
  */
 enum nft_exthdr_attributes {
        NFTA_EXTHDR_UNSPEC,
@@ -718,6 +739,8 @@ enum nft_exthdr_attributes {
        NFTA_EXTHDR_TYPE,
        NFTA_EXTHDR_OFFSET,
        NFTA_EXTHDR_LEN,
+       NFTA_EXTHDR_FLAGS,
+       NFTA_EXTHDR_OP,
        __NFTA_EXTHDR_MAX
 };
 #define NFTA_EXTHDR_MAX                (__NFTA_EXTHDR_MAX - 1)
@@ -860,6 +883,11 @@ enum nft_rt_attributes {
  * @NFT_CT_PROTOCOL: conntrack layer 4 protocol
  * @NFT_CT_PROTO_SRC: conntrack layer 4 protocol source
  * @NFT_CT_PROTO_DST: conntrack layer 4 protocol destination
+ * @NFT_CT_LABELS: conntrack labels
+ * @NFT_CT_PKTS: conntrack packets
+ * @NFT_CT_BYTES: conntrack bytes
+ * @NFT_CT_AVGPKT: conntrack average bytes per packet
+ * @NFT_CT_ZONE: conntrack zone
  */
 enum nft_ct_keys {
        NFT_CT_STATE,
@@ -878,6 +906,8 @@ enum nft_ct_keys {
        NFT_CT_LABELS,
        NFT_CT_PKTS,
        NFT_CT_BYTES,
+       NFT_CT_AVGPKT,
+       NFT_CT_ZONE,
 };
 
 /**
index 4bb8cb7730e7d26272f3be04ae1392c42b9d6970..a09906a30d771699f21c636655e634884110efcf 100644 (file)
@@ -65,4 +65,16 @@ struct nfgenmsg {
 #define NFNL_MSG_BATCH_BEGIN           NLMSG_MIN_TYPE
 #define NFNL_MSG_BATCH_END             NLMSG_MIN_TYPE+1
 
+/**
+ * enum nfnl_batch_attributes - nfnetlink batch netlink attributes
+ *
+ * @NFNL_BATCH_GENID: generation ID for this changeset (NLA_U32)
+ */
+enum nfnl_batch_attributes {
+        NFNL_BATCH_UNSPEC,
+        NFNL_BATCH_GENID,
+        __NFNL_BATCH_MAX
+};
+#define NFNL_BATCH_MAX                 (__NFNL_BATCH_MAX - 1)
+
 #endif /* _UAPI_NFNETLINK_H */
index 0dba4e4ed2be21af9f02df5bd844cbb087a51706..f3946a27bd07d5164fac6964785c86044f031790 100644 (file)
@@ -27,6 +27,7 @@
 #define NETLINK_ECRYPTFS       19
 #define NETLINK_RDMA           20
 #define NETLINK_CRYPTO         21      /* Crypto layer */
+#define NETLINK_SMC            22      /* SMC monitoring */
 
 #define NETLINK_INET_DIAG      NETLINK_SOCK_DIAG
 
index bea982af9cfb80a3081f50af90145a3b9758185c..5ed257c4cd4eaad8fcbd8dbde4ca88f385629271 100644 (file)
@@ -10,7 +10,7 @@
  * Copyright 2008, 2009 Luis R. Rodriguez <lrodriguez@atheros.com>
  * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
  * Copyright 2008 Colin McCabe <colin@cozybit.com>
- * Copyright 2015      Intel Deutschland GmbH
+ * Copyright 2015-2017 Intel Deutschland GmbH
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
  *     cfg80211_scan_done().
  *
  * @NL80211_CMD_START_NAN: Start NAN operation, identified by its
- *     %NL80211_ATTR_WDEV interface. This interface must have been previously
- *     created with %NL80211_CMD_NEW_INTERFACE. After it has been started, the
- *     NAN interface will create or join a cluster. This command must have a
- *     valid %NL80211_ATTR_NAN_MASTER_PREF attribute and optional
- *     %NL80211_ATTR_NAN_DUAL attributes.
- *     After this command NAN functions can be added.
+ *     %NL80211_ATTR_WDEV interface. This interface must have been
+ *     previously created with %NL80211_CMD_NEW_INTERFACE. After it
+ *     has been started, the NAN interface will create or join a
+ *     cluster. This command must have a valid
+ *     %NL80211_ATTR_NAN_MASTER_PREF attribute and optional
+ *     %NL80211_ATTR_BANDS attributes.  If %NL80211_ATTR_BANDS is
+ *     omitted or set to 0, it means don't-care and the device will
+ *     decide what to use.  After this command NAN functions can be
+ *     added.
  * @NL80211_CMD_STOP_NAN: Stop the NAN operation, identified by
  *     its %NL80211_ATTR_WDEV interface.
  * @NL80211_CMD_ADD_NAN_FUNCTION: Add a NAN function. The function is defined
  *     This command is also used as a notification sent when a NAN function is
  *     terminated. This will contain a %NL80211_ATTR_NAN_FUNC_INST_ID
  *     and %NL80211_ATTR_COOKIE attributes.
- * @NL80211_CMD_CHANGE_NAN_CONFIG: Change current NAN configuration. NAN
- *     must be operational (%NL80211_CMD_START_NAN was executed).
- *     It must contain at least one of the following attributes:
- *     %NL80211_ATTR_NAN_MASTER_PREF, %NL80211_ATTR_NAN_DUAL.
+ * @NL80211_CMD_CHANGE_NAN_CONFIG: Change current NAN
+ *     configuration. NAN must be operational (%NL80211_CMD_START_NAN
+ *     was executed).  It must contain at least one of the following
+ *     attributes: %NL80211_ATTR_NAN_MASTER_PREF,
+ *     %NL80211_ATTR_BANDS.  If %NL80211_ATTR_BANDS is omitted, the
+ *     current configuration is not changed.  If it is present but
+ *     set to zero, the configuration is changed to don't-care
+ *     (i.e. the device can decide what to do).
  * @NL80211_CMD_NAN_FUNC_MATCH: Notification sent when a match is reported.
  *     This will contain a %NL80211_ATTR_NAN_MATCH nested attribute and
  *     %NL80211_ATTR_COOKIE.
@@ -1822,6 +1829,8 @@ enum nl80211_commands {
  *     and remove functions. NAN notifications will be sent in unicast to that
  *     socket. Without this attribute, any socket can add functions and the
  *     notifications will be sent to the %NL80211_MCGRP_NAN multicast group.
+ *     If set during %NL80211_CMD_ASSOCIATE or %NL80211_CMD_CONNECT the
+ *     station will deauthenticate when the socket is closed.
  *
  * @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
  *     the TDLS link initiator.
@@ -1961,10 +1970,13 @@ enum nl80211_commands {
  *     %NL80211_CMD_CHANGE_NAN_CONFIG. Its type is u8 and it can't be 0.
  *     Also, values 1 and 255 are reserved for certification purposes and
  *     should not be used during a normal device operation.
- * @NL80211_ATTR_NAN_DUAL: NAN dual band operation config (see
- *     &enum nl80211_nan_dual_band_conf). This attribute is used with
- *     %NL80211_CMD_START_NAN and optionally with
- *     %NL80211_CMD_CHANGE_NAN_CONFIG.
+ * @NL80211_ATTR_BANDS: operating bands configuration.  This is a u32
+ *     bitmask of BIT(NL80211_BAND_*) as described in %enum
+ *     nl80211_band.  For instance, for NL80211_BAND_2GHZ, bit 0
+ *     would be set.  This attribute is used with
+ *     %NL80211_CMD_START_NAN and %NL80211_CMD_CHANGE_NAN_CONFIG, and
+ *     it is optional.  If no bands are set, it means don't-care and
+ *     the device will decide what to use.
  * @NL80211_ATTR_NAN_FUNC: a function that can be added to NAN. See
  *     &enum nl80211_nan_func_attributes for description of this nested
  *     attribute.
@@ -1982,6 +1994,24 @@ enum nl80211_commands {
  * @NL80211_ATTR_BSSID: The BSSID of the AP. Note that %NL80211_ATTR_MAC is also
  *     used in various commands/events for specifying the BSSID.
  *
+ * @NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI: Relative RSSI threshold by which
+ *     other BSSs has to be better or slightly worse than the current
+ *     connected BSS so that they get reported to user space.
+ *     This will give an opportunity to userspace to consider connecting to
+ *     other matching BSSs which have better or slightly worse RSSI than
+ *     the current connected BSS by using an offloaded operation to avoid
+ *     unnecessary wakeups.
+ *
+ * @NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST: When present the RSSI level for BSSs in
+ *     the specified band is to be adjusted before doing
+ *     %NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparision to figure out
+ *     better BSSs. The attribute value is a packed structure
+ *     value as specified by &struct nl80211_bss_select_rssi_adjust.
+ *
+ * @NL80211_ATTR_TIMEOUT_REASON: The reason for which an operation timed out.
+ *     u32 attribute with an &enum nl80211_timeout_reason value. This is used,
+ *     e.g., with %NL80211_CMD_CONNECT event.
+ *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2377,7 +2407,7 @@ enum nl80211_attrs {
        NL80211_ATTR_MESH_PEER_AID,
 
        NL80211_ATTR_NAN_MASTER_PREF,
-       NL80211_ATTR_NAN_DUAL,
+       NL80211_ATTR_BANDS,
        NL80211_ATTR_NAN_FUNC,
        NL80211_ATTR_NAN_MATCH,
 
@@ -2388,6 +2418,11 @@ enum nl80211_attrs {
 
        NL80211_ATTR_BSSID,
 
+       NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI,
+       NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST,
+
+       NL80211_ATTR_TIMEOUT_REASON,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
@@ -3080,6 +3115,13 @@ enum nl80211_reg_rule_attr {
  *     how this API was implemented in the past. Also, due to the same problem,
  *     the only way to create a matchset with only an RSSI filter (with this
  *     attribute) is if there's only a single matchset with the RSSI attribute.
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI: Flag indicating whether
+ *     %NL80211_SCHED_SCAN_MATCH_ATTR_RSSI to be used as absolute RSSI or
+ *     relative to current bss's RSSI.
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST: When present the RSSI level for
+ *     BSS-es in the specified band is to be adjusted before doing
+ *     RSSI-based BSS selection. The attribute value is a packed structure
+ *     value as specified by &struct nl80211_bss_select_rssi_adjust.
  * @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
  *     attribute number currently defined
  * @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -3089,6 +3131,8 @@ enum nl80211_sched_scan_match_attr {
 
        NL80211_SCHED_SCAN_MATCH_ATTR_SSID,
        NL80211_SCHED_SCAN_MATCH_ATTR_RSSI,
+       NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI,
+       NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST,
 
        /* keep last */
        __NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST,
@@ -3918,6 +3962,8 @@ enum nl80211_ps_state {
  *     %NL80211_CMD_NOTIFY_CQM. Set to 0 to turn off TX error reporting.
  * @NL80211_ATTR_CQM_BEACON_LOSS_EVENT: flag attribute that's set in a beacon
  *     loss event
+ * @NL80211_ATTR_CQM_RSSI_LEVEL: the RSSI value in dBm that triggered the
+ *     RSSI threshold event.
  * @__NL80211_ATTR_CQM_AFTER_LAST: internal
  * @NL80211_ATTR_CQM_MAX: highest key attribute
  */
@@ -3931,6 +3977,7 @@ enum nl80211_attr_cqm {
        NL80211_ATTR_CQM_TXE_PKTS,
        NL80211_ATTR_CQM_TXE_INTVL,
        NL80211_ATTR_CQM_BEACON_LOSS_EVENT,
+       NL80211_ATTR_CQM_RSSI_LEVEL,
 
        /* keep last */
        __NL80211_ATTR_CQM_AFTER_LAST,
@@ -4699,6 +4746,13 @@ enum nl80211_feature_flags {
  *     configuration (AP/mesh) with VHT rates.
  * @NL80211_EXT_FEATURE_FILS_STA: This driver supports Fast Initial Link Setup
  *     with user space SME (NL80211_CMD_AUTHENTICATE) in station mode.
+ * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA: This driver supports randomized TA
+ *     in @NL80211_CMD_FRAME while not associated.
+ * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED: This driver supports
+ *     randomized TA in @NL80211_CMD_FRAME while associated.
+ * @NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI: The driver supports sched_scan
+ *     for reporting BSSs with better RSSI than the current connected BSS
+ *     (%NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI).
  *
  * @NUM_NL80211_EXT_FEATURES: number of extended features.
  * @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4714,6 +4768,9 @@ enum nl80211_ext_feature_index {
        NL80211_EXT_FEATURE_BEACON_RATE_HT,
        NL80211_EXT_FEATURE_BEACON_RATE_VHT,
        NL80211_EXT_FEATURE_FILS_STA,
+       NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA,
+       NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED,
+       NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI,
 
        /* add new features before the definition below */
        NUM_NL80211_EXT_FEATURES,
@@ -4752,6 +4809,21 @@ enum nl80211_connect_failed_reason {
        NL80211_CONN_FAIL_BLOCKED_CLIENT,
 };
 
+/**
+ * enum nl80211_timeout_reason - timeout reasons
+ *
+ * @NL80211_TIMEOUT_UNSPECIFIED: Timeout reason unspecified.
+ * @NL80211_TIMEOUT_SCAN: Scan (AP discovery) timed out.
+ * @NL80211_TIMEOUT_AUTH: Authentication timed out.
+ * @NL80211_TIMEOUT_ASSOC: Association timed out.
+ */
+enum nl80211_timeout_reason {
+       NL80211_TIMEOUT_UNSPECIFIED,
+       NL80211_TIMEOUT_SCAN,
+       NL80211_TIMEOUT_AUTH,
+       NL80211_TIMEOUT_ASSOC,
+};
+
 /**
  * enum nl80211_scan_flags -  scan request control flags
  *
@@ -4966,8 +5038,9 @@ enum nl80211_sched_scan_plan {
 /**
  * struct nl80211_bss_select_rssi_adjust - RSSI adjustment parameters.
  *
- * @band: band of BSS that must match for RSSI value adjustment.
- * @delta: value used to adjust the RSSI value of matching BSS.
+ * @band: band of BSS that must match for RSSI value adjustment. The value
+ *     of this field is according to &enum nl80211_band.
+ * @delta: value used to adjust the RSSI value of matching BSS in dB.
  */
 struct nl80211_bss_select_rssi_adjust {
        __u8 band;
@@ -5007,21 +5080,6 @@ enum nl80211_bss_select_attr {
        NL80211_BSS_SELECT_ATTR_MAX = __NL80211_BSS_SELECT_ATTR_AFTER_LAST - 1
 };
 
-/**
- * enum nl80211_nan_dual_band_conf - NAN dual band configuration
- *
- * Defines the NAN dual band mode of operation
- *
- * @NL80211_NAN_BAND_DEFAULT: device default mode
- * @NL80211_NAN_BAND_2GHZ: 2.4GHz mode
- * @NL80211_NAN_BAND_5GHZ: 5GHz mode
-  */
-enum nl80211_nan_dual_band_conf {
-       NL80211_NAN_BAND_DEFAULT        = 1 << 0,
-       NL80211_NAN_BAND_2GHZ           = 1 << 1,
-       NL80211_NAN_BAND_5GHZ           = 1 << 2,
-};
-
 /**
  * enum nl80211_nan_function_type - NAN function type
  *
index 375d812fea36f31dc6fa353be96bc30ba87ce20b..7f41f7d0000f9f0ee36c274d88ad0d330fa8f5d6 100644 (file)
@@ -1,6 +1,6 @@
 
 /*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2017 Nicira, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -331,6 +331,8 @@ enum ovs_key_attr {
        OVS_KEY_ATTR_CT_ZONE,   /* u16 connection tracking zone. */
        OVS_KEY_ATTR_CT_MARK,   /* u32 connection tracking mark */
        OVS_KEY_ATTR_CT_LABELS, /* 16-octet connection tracking label */
+       OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,   /* struct ovs_key_ct_tuple_ipv4 */
+       OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,   /* struct ovs_key_ct_tuple_ipv6 */
 
 #ifdef __KERNEL__
        OVS_KEY_ATTR_TUNNEL_INFO,  /* struct ip_tunnel_info */
@@ -446,9 +448,13 @@ struct ovs_key_nd {
        __u8    nd_tll[ETH_ALEN];
 };
 
-#define OVS_CT_LABELS_LEN      16
+#define OVS_CT_LABELS_LEN_32   4
+#define OVS_CT_LABELS_LEN      (OVS_CT_LABELS_LEN_32 * sizeof(__u32))
 struct ovs_key_ct_labels {
-       __u8    ct_labels[OVS_CT_LABELS_LEN];
+       union {
+               __u8    ct_labels[OVS_CT_LABELS_LEN];
+               __u32   ct_labels_32[OVS_CT_LABELS_LEN_32];
+       };
 };
 
 /* OVS_KEY_ATTR_CT_STATE flags */
@@ -468,6 +474,22 @@ struct ovs_key_ct_labels {
 
 #define OVS_CS_F_NAT_MASK (OVS_CS_F_SRC_NAT | OVS_CS_F_DST_NAT)
 
+struct ovs_key_ct_tuple_ipv4 {
+       __be32 ipv4_src;
+       __be32 ipv4_dst;
+       __be16 src_port;
+       __be16 dst_port;
+       __u8   ipv4_proto;
+};
+
+struct ovs_key_ct_tuple_ipv6 {
+       __be32 ipv6_src[4];
+       __be32 ipv6_dst[4];
+       __be16 src_port;
+       __be16 dst_port;
+       __u8   ipv6_proto;
+};
+
 /**
  * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
  * @OVS_FLOW_ATTR_KEY: Nested %OVS_KEY_ATTR_* attributes specifying the flow
@@ -652,6 +674,10 @@ struct ovs_action_hash {
  * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
  * @OVS_CT_ATTR_NAT: Nested OVS_NAT_ATTR_* for performing L3 network address
  * translation (NAT) on the packet.
+ * @OVS_CT_ATTR_FORCE_COMMIT: Like %OVS_CT_ATTR_COMMIT, but instead of doing
+ * nothing if the connection is already committed will check that the current
+ * packet is in conntrack entry's original direction.  If directionality does
+ * not match, will delete the existing conntrack entry and commit a new one.
  */
 enum ovs_ct_attr {
        OVS_CT_ATTR_UNSPEC,
@@ -662,6 +688,7 @@ enum ovs_ct_attr {
        OVS_CT_ATTR_HELPER,     /* netlink helper to assist detection of
                                   related connections. */
        OVS_CT_ATTR_NAT,        /* Nested OVS_NAT_ATTR_* */
+       OVS_CT_ATTR_FORCE_COMMIT,  /* No argument */
        __OVS_CT_ATTR_MAX
 };
 
index a4dcd88ec2718621c2cf8e801566e5264c75c071..7a69f2a4ca0c06a68487ff382c6b84f8acab323b 100644 (file)
@@ -4,60 +4,7 @@
 #include <linux/types.h>
 #include <linux/pkt_sched.h>
 
-#ifdef __KERNEL__
-/* I think i could have done better macros ; for now this is stolen from
- * some arch/mips code - jhs
-*/
-#define _TC_MAKE32(x) ((x))
-
-#define _TC_MAKEMASK1(n) (_TC_MAKE32(1) << _TC_MAKE32(n))
-#define _TC_MAKEMASK(v,n) (_TC_MAKE32((_TC_MAKE32(1)<<(v))-1) << _TC_MAKE32(n))
-#define _TC_MAKEVALUE(v,n) (_TC_MAKE32(v) << _TC_MAKE32(n))
-#define _TC_GETVALUE(v,n,m) ((_TC_MAKE32(v) & _TC_MAKE32(m)) >> _TC_MAKE32(n))
-
-/* verdict bit breakdown 
- *
-bit 0: when set -> this packet has been munged already
-
-bit 1: when set -> It is ok to munge this packet
-
-bit 2,3,4,5: Reclassify counter - sort of reverse TTL - if exceeded
-assume loop
-
-bit 6,7: Where this packet was last seen 
-0: Above the transmit example at the socket level
-1: on the Ingress
-2: on the Egress
-
-bit 8: when set --> Request not to classify on ingress. 
-
-bits 9,10,11: redirect counter -  redirect TTL. Loop avoidance
-
- *
- * */
-
-#define S_TC_FROM          _TC_MAKE32(6)
-#define M_TC_FROM          _TC_MAKEMASK(2,S_TC_FROM)
-#define G_TC_FROM(x)       _TC_GETVALUE(x,S_TC_FROM,M_TC_FROM)
-#define V_TC_FROM(x)       _TC_MAKEVALUE(x,S_TC_FROM)
-#define SET_TC_FROM(v,n)   ((V_TC_FROM(n)) | (v & ~M_TC_FROM))
-#define AT_STACK       0x0
-#define AT_INGRESS     0x1
-#define AT_EGRESS      0x2
-
-#define TC_NCLS          _TC_MAKEMASK1(8)
-#define SET_TC_NCLS(v)   ( TC_NCLS | (v & ~TC_NCLS))
-#define CLR_TC_NCLS(v)   ( v & ~TC_NCLS)
-
-#define S_TC_AT          _TC_MAKE32(12)
-#define M_TC_AT          _TC_MAKEMASK(2,S_TC_AT)
-#define G_TC_AT(x)       _TC_GETVALUE(x,S_TC_AT,M_TC_AT)
-#define V_TC_AT(x)       _TC_MAKEVALUE(x,S_TC_AT)
-#define SET_TC_AT(v,n)   ((V_TC_AT(n)) | (v & ~M_TC_AT))
-
-#define MAX_REC_LOOP 4
-#define MAX_RED_LOOP 4
-#endif
+#define TC_COOKIE_MAX_SIZE 16
 
 /* Action attributes */
 enum {
@@ -67,6 +14,7 @@ enum {
        TCA_ACT_INDEX,
        TCA_ACT_STATS,
        TCA_ACT_PAD,
+       TCA_ACT_COOKIE,
        __TCA_ACT_MAX
 };
 
@@ -155,8 +103,10 @@ enum {
 #define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1)
 
 /* tca flags definitions */
-#define TCA_CLS_FLAGS_SKIP_HW  (1 << 0)
-#define TCA_CLS_FLAGS_SKIP_SW  (1 << 1)
+#define TCA_CLS_FLAGS_SKIP_HW  (1 << 0) /* don't offload filter to HW */
+#define TCA_CLS_FLAGS_SKIP_SW  (1 << 1) /* don't use filter in SW */
+#define TCA_CLS_FLAGS_IN_HW    (1 << 2) /* filter is offloaded to HW */
+#define TCA_CLS_FLAGS_NOT_IN_HW (1 << 3) /* filter isn't offloaded to HW */
 
 /* U32 filters */
 
@@ -471,6 +421,17 @@ enum {
        TCA_FLOWER_KEY_ICMPV6_TYPE,     /* u8 */
        TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,/* u8 */
 
+       TCA_FLOWER_KEY_ARP_SIP,         /* be32 */
+       TCA_FLOWER_KEY_ARP_SIP_MASK,    /* be32 */
+       TCA_FLOWER_KEY_ARP_TIP,         /* be32 */
+       TCA_FLOWER_KEY_ARP_TIP_MASK,    /* be32 */
+       TCA_FLOWER_KEY_ARP_OP,          /* u8 */
+       TCA_FLOWER_KEY_ARP_OP_MASK,     /* u8 */
+       TCA_FLOWER_KEY_ARP_SHA,         /* ETH_ALEN */
+       TCA_FLOWER_KEY_ARP_SHA_MASK,    /* ETH_ALEN */
+       TCA_FLOWER_KEY_ARP_THA,         /* ETH_ALEN */
+       TCA_FLOWER_KEY_ARP_THA_MASK,    /* ETH_ALEN */
+
        __TCA_FLOWER_MAX,
 };
 
diff --git a/include/uapi/linux/psample.h b/include/uapi/linux/psample.h
new file mode 100644 (file)
index 0000000..ed48996
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef __UAPI_PSAMPLE_H
+#define __UAPI_PSAMPLE_H
+
+enum {
+       /* sampled packet metadata */
+       PSAMPLE_ATTR_IIFINDEX,
+       PSAMPLE_ATTR_OIFINDEX,
+       PSAMPLE_ATTR_ORIGSIZE,
+       PSAMPLE_ATTR_SAMPLE_GROUP,
+       PSAMPLE_ATTR_GROUP_SEQ,
+       PSAMPLE_ATTR_SAMPLE_RATE,
+       PSAMPLE_ATTR_DATA,
+
+       /* commands attributes */
+       PSAMPLE_ATTR_GROUP_REFCOUNT,
+
+       __PSAMPLE_ATTR_MAX
+};
+
+enum psample_command {
+       PSAMPLE_CMD_SAMPLE,
+       PSAMPLE_CMD_GET_GROUP,
+       PSAMPLE_CMD_NEW_GROUP,
+       PSAMPLE_CMD_DEL_GROUP,
+};
+
+/* Can be overridden at runtime by module option */
+#define PSAMPLE_ATTR_MAX (__PSAMPLE_ATTR_MAX - 1)
+
+#define PSAMPLE_NL_MCGRP_CONFIG_NAME "config"
+#define PSAMPLE_NL_MCGRP_SAMPLE_NAME "packets"
+#define PSAMPLE_GENL_NAME "psample"
+#define PSAMPLE_GENL_VERSION 1
+
+#endif
index 0f9265cb2a96999f92d415a5cca12af237451433..3833113ab2c049080bf24008c2c7b50bc393de81 100644 (file)
 #define RDS_GET_MR_FOR_DEST            7
 #define SO_RDS_TRANSPORT               8
 
+/* Socket option to tap receive path latency
+ *     SO_RDS: SO_RDS_MSG_RXPATH_LATENCY
+ *     Format used struct rds_rx_trace_so
+ */
+#define SO_RDS_MSG_RXPATH_LATENCY      10
+
+
 /* supported values for SO_RDS_TRANSPORT */
 #define        RDS_TRANS_IB    0
 #define        RDS_TRANS_IWARP 1
  *     the same as for the GET_MR setsockopt.
  * RDS_CMSG_RDMA_STATUS (recvmsg)
  *     Returns the status of a completed RDMA operation.
+ * RDS_CMSG_RXPATH_LATENCY(recvmsg)
+ *     Returns rds message latencies in various stages of receive
+ *     path in nS. Its set per socket using SO_RDS_MSG_RXPATH_LATENCY
+ *     socket option. Legitimate points are defined in
+ *     enum rds_message_rxpath_latency. More points can be added in
+ *     future. CSMG format is struct rds_cmsg_rx_trace.
  */
 #define RDS_CMSG_RDMA_ARGS             1
 #define RDS_CMSG_RDMA_DEST             2
 #define RDS_CMSG_ATOMIC_CSWP           7
 #define RDS_CMSG_MASKED_ATOMIC_FADD    8
 #define RDS_CMSG_MASKED_ATOMIC_CSWP    9
+#define RDS_CMSG_RXPATH_LATENCY                11
 
 #define RDS_INFO_FIRST                 10000
 #define RDS_INFO_COUNTERS              10000
@@ -171,6 +185,25 @@ struct rds_info_rdma_connection {
        uint32_t        rdma_mr_size;
 };
 
+/* RDS message Receive Path Latency points */
+enum rds_message_rxpath_latency {
+       RDS_MSG_RX_HDR_TO_DGRAM_START = 0,
+       RDS_MSG_RX_DGRAM_REASSEMBLE,
+       RDS_MSG_RX_DGRAM_DELIVERED,
+       RDS_MSG_RX_DGRAM_TRACE_MAX
+};
+
+struct rds_rx_trace_so {
+       u8 rx_traces;
+       u8 rx_trace_pos[RDS_MSG_RX_DGRAM_TRACE_MAX];
+};
+
+struct rds_cmsg_rx_trace {
+       u8 rx_traces;
+       u8 rx_trace_pos[RDS_MSG_RX_DGRAM_TRACE_MAX];
+       u64 rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
+};
+
 /*
  * Congestion monitoring.
  * Congestion control in RDS happens at the host connection
index e14377f2ec273a183ae3b2e3da887ef2f893f7d1..8c93ad1ef9abee1b539459d237036b18dab536e2 100644 (file)
@@ -350,6 +350,7 @@ struct rtnexthop {
 #define RTNH_F_ONLINK          4       /* Gateway is forced on link    */
 #define RTNH_F_OFFLOAD         8       /* offloaded route */
 #define RTNH_F_LINKDOWN                16      /* carrier-down on nexthop */
+#define RTNH_F_UNRESOLVED      32      /* The entry is unresolved (ipmr) */
 
 #define RTNH_COMPARE_MASK      (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD)
 
index a406adcc0793e0f3a09706cb693867b49d0ef914..a91a9cccbae69024dcce98266e4084f3dee86b8a 100644 (file)
@@ -115,6 +115,10 @@ typedef __s32 sctp_assoc_t;
 #define SCTP_PR_SUPPORTED      113
 #define SCTP_DEFAULT_PRINFO    114
 #define SCTP_PR_ASSOC_STATUS   115
+#define SCTP_ENABLE_STREAM_RESET       118
+#define SCTP_RESET_STREAMS     119
+#define SCTP_RESET_ASSOC       120
+#define SCTP_ADD_STREAMS       121
 
 /* PR-SCTP policies */
 #define SCTP_PR_SCTP_NONE      0x0000
@@ -138,6 +142,15 @@ typedef __s32 sctp_assoc_t;
 #define SCTP_PR_RTX_ENABLED(x) (SCTP_PR_POLICY(x) == SCTP_PR_SCTP_RTX)
 #define SCTP_PR_PRIO_ENABLED(x)        (SCTP_PR_POLICY(x) == SCTP_PR_SCTP_PRIO)
 
+/* For enable stream reset */
+#define SCTP_ENABLE_RESET_STREAM_REQ   0x01
+#define SCTP_ENABLE_RESET_ASSOC_REQ    0x02
+#define SCTP_ENABLE_CHANGE_ASSOC_REQ   0x04
+#define SCTP_ENABLE_STRRESET_MASK      0x07
+
+#define SCTP_STREAM_RESET_INCOMING     0x01
+#define SCTP_STREAM_RESET_OUTGOING     0x02
+
 /* These are bit fields for msghdr->msg_flags.  See section 5.1.  */
 /* On user space Linux, these live in <bits/socket.h> as an enum.  */
 enum sctp_msg_flags {
@@ -1008,4 +1021,17 @@ struct sctp_info {
        __u32   __reserved3;
 };
 
+struct sctp_reset_streams {
+       sctp_assoc_t srs_assoc_id;
+       uint16_t srs_flags;
+       uint16_t srs_number_streams;    /* 0 == ALL */
+       uint16_t srs_stream_list[];     /* list if srs_num_streams is not 0 */
+};
+
+struct sctp_add_streams {
+       sctp_assoc_t sas_assoc_id;
+       uint16_t sas_instrms;
+       uint16_t sas_outstrms;
+};
+
 #endif /* _UAPI_SCTP_H */
index 052799e4d751c805de01bc8ed47c3d0a1ecde936..61df8d392f41f4c197419b2260481d1a41897318 100644 (file)
@@ -14,6 +14,8 @@
 #ifndef _UAPI_LINUX_SEG6_H
 #define _UAPI_LINUX_SEG6_H
 
+#include <linux/types.h>
+
 /*
  * SRH
  */
index b652dfd51bc5f9db3f8c624f275c1c03edc56019..e691c753fc3f379645f8ba82e74fcc6d34552881 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _UAPI_LINUX_SEG6_HMAC_H
 #define _UAPI_LINUX_SEG6_HMAC_H
 
+#include <linux/types.h>
 #include <linux/seg6.h>
 
 #define SEG6_HMAC_SECRET_LEN   64
index 0f7dbd280a9c6492835862a812333e270ed273db..7a7183d4062af9788fd7358278d74d83aef70b18 100644 (file)
@@ -33,6 +33,8 @@ enum {
        SEG6_IPTUN_MODE_ENCAP,
 };
 
+#ifdef __KERNEL__
+
 static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo)
 {
        int encap = (tuninfo->mode == SEG6_IPTUN_MODE_ENCAP);
@@ -42,3 +44,5 @@ static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo)
 }
 
 #endif
+
+#endif
diff --git a/include/uapi/linux/smc.h b/include/uapi/linux/smc.h
new file mode 100644 (file)
index 0000000..ab1dea8
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ *  Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ *  Definitions for generic netlink based configuration of an SMC-R PNET table
+ *
+ *  Copyright IBM Corp. 2016
+ *
+ *  Author(s):  Thomas Richter <tmricht@linux.vnet.ibm.com>
+ */
+
+#ifndef _UAPI_LINUX_SMC_H_
+#define _UAPI_LINUX_SMC_H_
+
+/* Netlink SMC_PNETID attributes */
+enum {
+       SMC_PNETID_UNSPEC,
+       SMC_PNETID_NAME,
+       SMC_PNETID_ETHNAME,
+       SMC_PNETID_IBNAME,
+       SMC_PNETID_IBPORT,
+       __SMC_PNETID_MAX,
+       SMC_PNETID_MAX = __SMC_PNETID_MAX - 1
+};
+
+enum {                         /* SMC PNET Table commands */
+       SMC_PNETID_GET = 1,
+       SMC_PNETID_ADD,
+       SMC_PNETID_DEL,
+       SMC_PNETID_FLUSH
+};
+
+#define SMCR_GENL_FAMILY_NAME          "SMC_PNETID"
+#define SMCR_GENL_FAMILY_VERSION       1
+
+#endif /* _UAPI_LINUX_SMC_H */
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h
new file mode 100644 (file)
index 0000000..0063919
--- /dev/null
@@ -0,0 +1,85 @@
+#ifndef _UAPI_SMC_DIAG_H_
+#define _UAPI_SMC_DIAG_H_
+
+#include <linux/types.h>
+#include <linux/inet_diag.h>
+#include <rdma/ib_verbs.h>
+
+/* Request structure */
+struct smc_diag_req {
+       __u8    diag_family;
+       __u8    pad[2];
+       __u8    diag_ext;               /* Query extended information */
+       struct inet_diag_sockid id;
+};
+
+/* Base info structure. It contains socket identity (addrs/ports/cookie) based
+ * on the internal clcsock, and more SMC-related socket data
+ */
+struct smc_diag_msg {
+       __u8    diag_family;
+       __u8    diag_state;
+       __u8    diag_fallback;
+       __u8    diag_shutdown;
+       struct inet_diag_sockid id;
+
+       __u32   diag_uid;
+       __u64   diag_inode;
+};
+
+/* Extensions */
+
+enum {
+       SMC_DIAG_NONE,
+       SMC_DIAG_CONNINFO,
+       SMC_DIAG_LGRINFO,
+       SMC_DIAG_SHUTDOWN,
+       __SMC_DIAG_MAX,
+};
+
+#define SMC_DIAG_MAX (__SMC_DIAG_MAX - 1)
+
+/* SMC_DIAG_CONNINFO */
+
+struct smc_diag_cursor {
+       __u16   reserved;
+       __u16   wrap;
+       __u32   count;
+};
+
+struct smc_diag_conninfo {
+       __u32                   token;          /* unique connection id */
+       __u32                   sndbuf_size;    /* size of send buffer */
+       __u32                   rmbe_size;      /* size of RMB element */
+       __u32                   peer_rmbe_size; /* size of peer RMB element */
+       /* local RMB element cursors */
+       struct smc_diag_cursor  rx_prod;        /* received producer cursor */
+       struct smc_diag_cursor  rx_cons;        /* received consumer cursor */
+       /* peer RMB element cursors */
+       struct smc_diag_cursor  tx_prod;        /* sent producer cursor */
+       struct smc_diag_cursor  tx_cons;        /* sent consumer cursor */
+       __u8                    rx_prod_flags;  /* received producer flags */
+       __u8                    rx_conn_state_flags; /* recvd connection flags*/
+       __u8                    tx_prod_flags;  /* sent producer flags */
+       __u8                    tx_conn_state_flags; /* sent connection flags*/
+       /* send buffer cursors */
+       struct smc_diag_cursor  tx_prep;        /* prepared to be sent cursor */
+       struct smc_diag_cursor  tx_sent;        /* sent cursor */
+       struct smc_diag_cursor  tx_fin;         /* confirmed sent cursor */
+};
+
+/* SMC_DIAG_LINKINFO */
+
+struct smc_diag_linkinfo {
+       __u8 link_id;                   /* link identifier */
+       __u8 ibname[IB_DEVICE_NAME_MAX]; /* name of the RDMA device */
+       __u8 ibport;                    /* RDMA device port number */
+       __u8 gid[40];                   /* local GID */
+       __u8 peer_gid[40];              /* peer GID */
+};
+
+struct smc_diag_lgrinfo {
+       struct smc_diag_linkinfo        lnk[1];
+       __u8                            role;
+};
+#endif /* _UAPI_SMC_DIAG_H_ */
index e7a31f8306903f53bc5881ae4c271f85cad2e361..3b2bed7ca9a4d92c5671e614f2bc598668805f75 100644 (file)
@@ -240,6 +240,7 @@ enum
        LINUX_MIB_SACKMERGED,
        LINUX_MIB_SACKSHIFTFALLBACK,
        LINUX_MIB_TCPBACKLOGDROP,
+       LINUX_MIB_PFMEMALLOCDROP,
        LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */
        LINUX_MIB_TCPDEFERACCEPTDROP,
        LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */
index e3db7403296fdf8a7f72ec44523acea9ba8b0b9b..ba62ddf0e58ab3dba056ae17b5b5379e98b7627e 100644 (file)
@@ -4,6 +4,7 @@ header-y += tc_defact.h
 header-y += tc_gact.h
 header-y += tc_ipt.h
 header-y += tc_mirred.h
+header-y += tc_sample.h
 header-y += tc_nat.h
 header-y += tc_pedit.h
 header-y += tc_skbedit.h
index 8ac8041ab5f134b74391f5470cef16f3f435ed1c..a11bb355dbfb2ba9b65a2e2b6c2eac9dc3b0e6c0 100644 (file)
@@ -21,7 +21,8 @@ enum {
        TCA_CSUM_UPDATE_FLAG_IGMP    = 4,
        TCA_CSUM_UPDATE_FLAG_TCP     = 8,
        TCA_CSUM_UPDATE_FLAG_UDP     = 16,
-       TCA_CSUM_UPDATE_FLAG_UDPLITE = 32
+       TCA_CSUM_UPDATE_FLAG_UDPLITE = 32,
+       TCA_CSUM_UPDATE_FLAG_SCTP    = 64,
 };
 
 struct tc_csum {
index cd18360eca249be09c6a649efcb53a147ed76fd2..7c2817866c97e2f72f3f72315e42a34443eff022 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/types.h>
 #include <linux/pkt_cls.h>
+#include <linux/ife.h>
 
 #define TCA_ACT_IFE 25
 /* Flag bits for now just encoding/decoding; mutually exclusive */
@@ -28,13 +29,4 @@ enum {
 };
 #define TCA_IFE_MAX (__TCA_IFE_MAX - 1)
 
-#define IFE_META_SKBMARK 1
-#define IFE_META_HASHID 2
-#define        IFE_META_PRIO 3
-#define        IFE_META_QMAP 4
-#define        IFE_META_TCINDEX 5
-/*Can be overridden at runtime by module option*/
-#define        __IFE_META_MAX 6
-#define IFE_META_MAX (__IFE_META_MAX - 1)
-
 #endif
index 6389959a5157cf1f43338a3742093f66b14b564e..143d2b31a316624fbec24d40b7fb04da2b7eede5 100644 (file)
@@ -11,10 +11,41 @@ enum {
        TCA_PEDIT_TM,
        TCA_PEDIT_PARMS,
        TCA_PEDIT_PAD,
+       TCA_PEDIT_PARMS_EX,
+       TCA_PEDIT_KEYS_EX,
+       TCA_PEDIT_KEY_EX,
        __TCA_PEDIT_MAX
 };
 #define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1)
                                                                                 
+enum {
+       TCA_PEDIT_KEY_EX_HTYPE = 1,
+       TCA_PEDIT_KEY_EX_CMD = 2,
+       __TCA_PEDIT_KEY_EX_MAX
+};
+#define TCA_PEDIT_KEY_EX_MAX (__TCA_PEDIT_KEY_EX_MAX - 1)
+
+ /* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It
+  * means no specific header type - offset is relative to the network layer
+  */
+enum pedit_header_type {
+       TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK = 0,
+       TCA_PEDIT_KEY_EX_HDR_TYPE_ETH = 1,
+       TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 = 2,
+       TCA_PEDIT_KEY_EX_HDR_TYPE_IP6 = 3,
+       TCA_PEDIT_KEY_EX_HDR_TYPE_TCP = 4,
+       TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5,
+       __PEDIT_HDR_TYPE_MAX,
+};
+#define TCA_PEDIT_HDR_TYPE_MAX (__PEDIT_HDR_TYPE_MAX - 1)
+
+enum pedit_cmd {
+       TCA_PEDIT_KEY_EX_CMD_SET = 0,
+       TCA_PEDIT_KEY_EX_CMD_ADD = 1,
+       __PEDIT_CMD_MAX,
+};
+#define TCA_PEDIT_CMD_MAX (__PEDIT_CMD_MAX - 1)
+
 struct tc_pedit_key {
        __u32           mask;  /* AND */
        __u32           val;   /*XOR */
diff --git a/include/uapi/linux/tc_act/tc_sample.h b/include/uapi/linux/tc_act/tc_sample.h
new file mode 100644 (file)
index 0000000..edc9058
--- /dev/null
@@ -0,0 +1,26 @@
+#ifndef __LINUX_TC_SAMPLE_H
+#define __LINUX_TC_SAMPLE_H
+
+#include <linux/types.h>
+#include <linux/pkt_cls.h>
+#include <linux/if_ether.h>
+
+#define TCA_ACT_SAMPLE 26
+
+struct tc_sample {
+       tc_gen;
+};
+
+enum {
+       TCA_SAMPLE_UNSPEC,
+       TCA_SAMPLE_TM,
+       TCA_SAMPLE_PARMS,
+       TCA_SAMPLE_RATE,
+       TCA_SAMPLE_TRUNC_SIZE,
+       TCA_SAMPLE_PSAMPLE_GROUP,
+       TCA_SAMPLE_PAD,
+       __TCA_SAMPLE_MAX
+};
+#define TCA_SAMPLE_MAX (__TCA_SAMPLE_MAX - 1)
+
+#endif
index c53de2691cecec43f4dbde55dd69da3c07da719f..38a2b07afdff255914d29c7906321c19338786c0 100644 (file)
@@ -116,6 +116,7 @@ enum {
 #define TCP_SAVE_SYN           27      /* Record SYN headers for new connections */
 #define TCP_SAVED_SYN          28      /* Get SYN headers recorded for connection */
 #define TCP_REPAIR_WINDOW      29      /* Get/set window parameters */
+#define TCP_FASTOPEN_CONNECT   30      /* Attempt FastOpen with connect */
 
 struct tcp_repair_opt {
        __u32   opt_code;
@@ -226,6 +227,8 @@ enum {
        TCP_NLA_BUSY,           /* Time (usec) busy sending data */
        TCP_NLA_RWND_LIMITED,   /* Time (usec) limited by receive window */
        TCP_NLA_SNDBUF_LIMITED, /* Time (usec) limited by send buffer */
+       TCP_NLA_DATA_SEGS_OUT,  /* Data pkts sent including retransmission */
+       TCP_NLA_TOTAL_RETRANS,  /* Data pkts retransmitted */
 };
 
 /* for TCP_MD5SIG socket option */
index bf049e8fe31bf5036eed3e47df90118a240a762a..5351b08c897a22568e586ccbf3da3b28b47aeb8f 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * include/uapi/linux/tipc.h: Header for TIPC socket interface
  *
- * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2003-2006, 2015-2016 Ericsson AB
  * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
@@ -220,7 +220,7 @@ struct sockaddr_tipc {
 #define TIPC_DESTNAME  3       /* destination name */
 
 /*
- * TIPC-specific socket option values
+ * TIPC-specific socket option names
  */
 
 #define TIPC_IMPORTANCE                127     /* Default: TIPC_LOW_IMPORTANCE */
@@ -229,6 +229,8 @@ struct sockaddr_tipc {
 #define TIPC_CONN_TIMEOUT      130     /* Default: 8000 (ms)  */
 #define TIPC_NODE_RECVQ_DEPTH  131     /* Default: none (read only) */
 #define TIPC_SOCK_RECVQ_DEPTH  132     /* Default: none (read only) */
+#define TIPC_MCAST_BROADCAST    133     /* Default: TIPC selects. No arg */
+#define TIPC_MCAST_REPLICAST    134     /* Default: TIPC selects. No arg */
 
 /*
  * Maximum sizes of TIPC bearer-related names (including terminating NULL)
index 3ed3e46c1b1f7ff4688a52edefbba051fba600bb..4f0ab3a548ad429963d803c802041b15a1332121 100644 (file)
@@ -10,4 +10,6 @@ struct sockaddr_un {
        char sun_path[UNIX_PATH_MAX];   /* pathname */
 };
 
+#define SIOCUNIXFILE (SIOCPROTOPRIVATE + 0) /* open a socket file with O_PATH */
+
 #endif /* _LINUX_UN_H */
index fae6cdaeb56d6c6a9e12f787041e0aff406bf78d..da7cd62bace746879e154829b5ecad5f6cd87c00 100644 (file)
@@ -61,19 +61,24 @@ enum {
  */
 
 struct mlx5_ib_alloc_ucontext_req {
-       __u32   total_num_uuars;
-       __u32   num_low_latency_uuars;
+       __u32   total_num_bfregs;
+       __u32   num_low_latency_bfregs;
+};
+
+enum mlx5_lib_caps {
+       MLX5_LIB_CAP_4K_UAR     = (u64)1 << 0,
 };
 
 struct mlx5_ib_alloc_ucontext_req_v2 {
-       __u32   total_num_uuars;
-       __u32   num_low_latency_uuars;
+       __u32   total_num_bfregs;
+       __u32   num_low_latency_bfregs;
        __u32   flags;
        __u32   comp_mask;
        __u8    max_cqe_version;
        __u8    reserved0;
        __u16   reserved1;
        __u32   reserved2;
+       __u64   lib_caps;
 };
 
 enum mlx5_ib_alloc_ucontext_resp_mask {
@@ -85,10 +90,21 @@ enum mlx5_user_cmds_supp_uhw {
        MLX5_USER_CMDS_SUPP_UHW_CREATE_AH    = 1 << 1,
 };
 
+/* The eth_min_inline response value is set to off-by-one vs the FW
+ * returned value to allow user-space to deal with older kernels.
+ */
+enum mlx5_user_inline_mode {
+       MLX5_USER_INLINE_MODE_NA,
+       MLX5_USER_INLINE_MODE_NONE,
+       MLX5_USER_INLINE_MODE_L2,
+       MLX5_USER_INLINE_MODE_IP,
+       MLX5_USER_INLINE_MODE_TCP_UDP,
+};
+
 struct mlx5_ib_alloc_ucontext_resp {
        __u32   qp_tab_size;
        __u32   bf_reg_size;
-       __u32   tot_uuars;
+       __u32   tot_bfregs;
        __u32   cache_line_size;
        __u16   max_sq_desc_sz;
        __u16   max_rq_desc_sz;
@@ -101,8 +117,11 @@ struct mlx5_ib_alloc_ucontext_resp {
        __u32   response_length;
        __u8    cqe_version;
        __u8    cmds_supp_uhw;
-       __u16   reserved2;
+       __u8    eth_min_inline;
+       __u8    reserved2;
        __u64   hca_core_clock_offset;
+       __u32   log_uar_size;
+       __u32   num_uars_per_page;
 };
 
 struct mlx5_ib_alloc_pd_resp {
@@ -241,7 +260,7 @@ struct mlx5_ib_create_qp_rss {
 };
 
 struct mlx5_ib_create_qp_resp {
-       __u32   uuar_index;
+       __u32   bfreg_index;
 };
 
 struct mlx5_ib_alloc_mw {
index 1276474ac3cd9ddf2a87f37312edc661d2ec335c..e1ce4f4fd7fd47fda2c18776573c0f65479c6728 100644 (file)
@@ -1,7 +1,7 @@
 obj-y := core.o
 
 obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o
-obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o
+obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o
 ifeq ($(CONFIG_PERF_EVENTS),y)
 obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
 endif
index 3d55d95dcf49e600fe8f99c92eb57e7cde043208..6b6f41f0b21164a3cc2c26bb71be2b89098bbdbd 100644 (file)
@@ -269,7 +269,7 @@ static const struct bpf_map_ops array_ops = {
        .map_delete_elem = array_map_delete_elem,
 };
 
-static struct bpf_map_type_list array_type __read_mostly = {
+static struct bpf_map_type_list array_type __ro_after_init = {
        .ops = &array_ops,
        .type = BPF_MAP_TYPE_ARRAY,
 };
@@ -283,7 +283,7 @@ static const struct bpf_map_ops percpu_array_ops = {
        .map_delete_elem = array_map_delete_elem,
 };
 
-static struct bpf_map_type_list percpu_array_type __read_mostly = {
+static struct bpf_map_type_list percpu_array_type __ro_after_init = {
        .ops = &percpu_array_ops,
        .type = BPF_MAP_TYPE_PERCPU_ARRAY,
 };
@@ -409,7 +409,7 @@ static const struct bpf_map_ops prog_array_ops = {
        .map_fd_put_ptr = prog_fd_array_put_ptr,
 };
 
-static struct bpf_map_type_list prog_array_type __read_mostly = {
+static struct bpf_map_type_list prog_array_type __ro_after_init = {
        .ops = &prog_array_ops,
        .type = BPF_MAP_TYPE_PROG_ARRAY,
 };
@@ -522,7 +522,7 @@ static const struct bpf_map_ops perf_event_array_ops = {
        .map_release = perf_event_fd_array_release,
 };
 
-static struct bpf_map_type_list perf_event_array_type __read_mostly = {
+static struct bpf_map_type_list perf_event_array_type __ro_after_init = {
        .ops = &perf_event_array_ops,
        .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
 };
@@ -564,7 +564,7 @@ static const struct bpf_map_ops cgroup_array_ops = {
        .map_fd_put_ptr = cgroup_fd_array_put_ptr,
 };
 
-static struct bpf_map_type_list cgroup_array_type __read_mostly = {
+static struct bpf_map_type_list cgroup_array_type __ro_after_init = {
        .ops = &cgroup_array_ops,
        .type = BPF_MAP_TYPE_CGROUP_ARRAY,
 };
index 89b7ef41c86bef2892f9a30850ce62fa32eaa7e1..f62d1d56f41d0c5f3e3ef2c12b7dea3eb6578a10 100644 (file)
@@ -213,11 +213,10 @@ __bpf_lru_list_shrink_inactive(struct bpf_lru *lru,
                               enum bpf_lru_list_type tgt_free_type)
 {
        struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
-       struct bpf_lru_node *node, *tmp_node, *first_node;
+       struct bpf_lru_node *node, *tmp_node;
        unsigned int nshrinked = 0;
        unsigned int i = 0;
 
-       first_node = list_first_entry(inactive, struct bpf_lru_node, list);
        list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) {
                if (bpf_lru_node_is_ref(node)) {
                        __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
@@ -361,7 +360,8 @@ static void __local_list_add_pending(struct bpf_lru *lru,
        list_add(&node->list, local_pending_list(loc_l));
 }
 
-struct bpf_lru_node *__local_list_pop_free(struct bpf_lru_locallist *loc_l)
+static struct bpf_lru_node *
+__local_list_pop_free(struct bpf_lru_locallist *loc_l)
 {
        struct bpf_lru_node *node;
 
@@ -374,8 +374,8 @@ struct bpf_lru_node *__local_list_pop_free(struct bpf_lru_locallist *loc_l)
        return node;
 }
 
-struct bpf_lru_node *__local_list_pop_pending(struct bpf_lru *lru,
-                                             struct bpf_lru_locallist *loc_l)
+static struct bpf_lru_node *
+__local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l)
 {
        struct bpf_lru_node *node;
        bool force = false;
@@ -558,8 +558,9 @@ void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node)
                bpf_common_lru_push_free(lru, node);
 }
 
-void bpf_common_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
-                            u32 elem_size, u32 nr_elems)
+static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
+                                   u32 node_offset, u32 elem_size,
+                                   u32 nr_elems)
 {
        struct bpf_lru_list *l = &lru->common_lru.lru_list;
        u32 i;
@@ -575,8 +576,9 @@ void bpf_common_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
        }
 }
 
-void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
-                            u32 elem_size, u32 nr_elems)
+static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf,
+                                   u32 node_offset, u32 elem_size,
+                                   u32 nr_elems)
 {
        u32 i, pcpu_entries;
        int cpu;
index 503d4211988afe1d3eddd6d39aba520dc4c245ef..f45827e205d3f491a818a024dca5122c68924697 100644 (file)
@@ -28,6 +28,9 @@
 #include <linux/moduleloader.h>
 #include <linux/bpf.h>
 #include <linux/frame.h>
+#include <linux/rbtree_latch.h>
+#include <linux/kallsyms.h>
+#include <linux/rcupdate.h>
 
 #include <asm/unaligned.h>
 
@@ -95,6 +98,8 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
        fp->aux = aux;
        fp->aux->prog = fp;
 
+       INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode);
+
        return fp;
 }
 EXPORT_SYMBOL_GPL(bpf_prog_alloc);
@@ -290,6 +295,206 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 }
 
 #ifdef CONFIG_BPF_JIT
+static __always_inline void
+bpf_get_prog_addr_region(const struct bpf_prog *prog,
+                        unsigned long *symbol_start,
+                        unsigned long *symbol_end)
+{
+       const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog);
+       unsigned long addr = (unsigned long)hdr;
+
+       WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog));
+
+       *symbol_start = addr;
+       *symbol_end   = addr + hdr->pages * PAGE_SIZE;
+}
+
+static void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
+{
+       BUILD_BUG_ON(sizeof("bpf_prog_") +
+                    sizeof(prog->tag) * 2 + 1 > KSYM_NAME_LEN);
+
+       sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_");
+       sym  = bin2hex(sym, prog->tag, sizeof(prog->tag));
+       *sym = 0;
+}
+
+static __always_inline unsigned long
+bpf_get_prog_addr_start(struct latch_tree_node *n)
+{
+       unsigned long symbol_start, symbol_end;
+       const struct bpf_prog_aux *aux;
+
+       aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
+       bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
+
+       return symbol_start;
+}
+
+static __always_inline bool bpf_tree_less(struct latch_tree_node *a,
+                                         struct latch_tree_node *b)
+{
+       return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b);
+}
+
+static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n)
+{
+       unsigned long val = (unsigned long)key;
+       unsigned long symbol_start, symbol_end;
+       const struct bpf_prog_aux *aux;
+
+       aux = container_of(n, struct bpf_prog_aux, ksym_tnode);
+       bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
+
+       if (val < symbol_start)
+               return -1;
+       if (val >= symbol_end)
+               return  1;
+
+       return 0;
+}
+
+static const struct latch_tree_ops bpf_tree_ops = {
+       .less   = bpf_tree_less,
+       .comp   = bpf_tree_comp,
+};
+
+static DEFINE_SPINLOCK(bpf_lock);
+static LIST_HEAD(bpf_kallsyms);
+static struct latch_tree_root bpf_tree __cacheline_aligned;
+
+int bpf_jit_kallsyms __read_mostly;
+
+static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux)
+{
+       WARN_ON_ONCE(!list_empty(&aux->ksym_lnode));
+       list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms);
+       latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
+}
+
+static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux)
+{
+       if (list_empty(&aux->ksym_lnode))
+               return;
+
+       latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops);
+       list_del_rcu(&aux->ksym_lnode);
+}
+
+static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp)
+{
+       return fp->jited && !bpf_prog_was_classic(fp);
+}
+
+static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp)
+{
+       return list_empty(&fp->aux->ksym_lnode) ||
+              fp->aux->ksym_lnode.prev == LIST_POISON2;
+}
+
+void bpf_prog_kallsyms_add(struct bpf_prog *fp)
+{
+       unsigned long flags;
+
+       if (!bpf_prog_kallsyms_candidate(fp) ||
+           !capable(CAP_SYS_ADMIN))
+               return;
+
+       spin_lock_irqsave(&bpf_lock, flags);
+       bpf_prog_ksym_node_add(fp->aux);
+       spin_unlock_irqrestore(&bpf_lock, flags);
+}
+
+void bpf_prog_kallsyms_del(struct bpf_prog *fp)
+{
+       unsigned long flags;
+
+       if (!bpf_prog_kallsyms_candidate(fp))
+               return;
+
+       spin_lock_irqsave(&bpf_lock, flags);
+       bpf_prog_ksym_node_del(fp->aux);
+       spin_unlock_irqrestore(&bpf_lock, flags);
+}
+
+static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr)
+{
+       struct latch_tree_node *n;
+
+       if (!bpf_jit_kallsyms_enabled())
+               return NULL;
+
+       n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops);
+       return n ?
+              container_of(n, struct bpf_prog_aux, ksym_tnode)->prog :
+              NULL;
+}
+
+const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
+                                unsigned long *off, char *sym)
+{
+       unsigned long symbol_start, symbol_end;
+       struct bpf_prog *prog;
+       char *ret = NULL;
+
+       rcu_read_lock();
+       prog = bpf_prog_kallsyms_find(addr);
+       if (prog) {
+               bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end);
+               bpf_get_prog_name(prog, sym);
+
+               ret = sym;
+               if (size)
+                       *size = symbol_end - symbol_start;
+               if (off)
+                       *off  = addr - symbol_start;
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
+bool is_bpf_text_address(unsigned long addr)
+{
+       bool ret;
+
+       rcu_read_lock();
+       ret = bpf_prog_kallsyms_find(addr) != NULL;
+       rcu_read_unlock();
+
+       return ret;
+}
+
+int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+                   char *sym)
+{
+       unsigned long symbol_start, symbol_end;
+       struct bpf_prog_aux *aux;
+       unsigned int it = 0;
+       int ret = -ERANGE;
+
+       if (!bpf_jit_kallsyms_enabled())
+               return ret;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) {
+               if (it++ != symnum)
+                       continue;
+
+               bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
+               bpf_get_prog_name(aux->prog, sym);
+
+               *value = symbol_start;
+               *type  = BPF_SYM_ELF_TYPE;
+
+               ret = 0;
+               break;
+       }
+       rcu_read_unlock();
+
+       return ret;
+}
+
 struct bpf_binary_header *
 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
                     unsigned int alignment,
@@ -326,6 +531,24 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr)
        module_memfree(hdr);
 }
 
+/* This symbol is only overridden by archs that have different
+ * requirements than the usual eBPF JITs, f.e. when they only
+ * implement cBPF JIT, do not set images read-only, etc.
+ */
+void __weak bpf_jit_free(struct bpf_prog *fp)
+{
+       if (fp->jited) {
+               struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
+
+               bpf_jit_binary_unlock_ro(hdr);
+               bpf_jit_binary_free(hdr);
+
+               WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp));
+       }
+
+       bpf_prog_unlock_free(fp);
+}
+
 int bpf_jit_harden __read_mostly;
 
 static int bpf_jit_blind_insn(const struct bpf_insn *from,
@@ -1154,12 +1377,22 @@ const struct bpf_func_proto bpf_tail_call_proto = {
        .arg3_type      = ARG_ANYTHING,
 };
 
-/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
+/* Stub for JITs that only support cBPF. eBPF programs are interpreted.
+ * It is encouraged to implement bpf_int_jit_compile() instead, so that
+ * eBPF and implicitly also cBPF can get JITed!
+ */
 struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
 {
        return prog;
 }
 
+/* Stub for JITs that support eBPF. All cBPF code gets transformed into
+ * eBPF by the kernel and is later compiled by bpf_int_jit_compile().
+ */
+void __weak bpf_jit_compile(struct bpf_prog *prog)
+{
+}
+
 bool __weak bpf_helper_changes_pkt_data(void *func)
 {
        return false;
@@ -1173,3 +1406,12 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
 {
        return -EFAULT;
 }
+
+/* All definitions of tracepoints related to BPF. */
+#define CREATE_TRACE_POINTS
+#include <linux/bpf_trace.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);
index a753bbe7df0a1747658ca75325e28880173613f8..3ea87fb19a9416771985d9236f148ffb927ad19b 100644 (file)
@@ -1023,7 +1023,7 @@ static const struct bpf_map_ops htab_ops = {
        .map_delete_elem = htab_map_delete_elem,
 };
 
-static struct bpf_map_type_list htab_type __read_mostly = {
+static struct bpf_map_type_list htab_type __ro_after_init = {
        .ops = &htab_ops,
        .type = BPF_MAP_TYPE_HASH,
 };
@@ -1037,7 +1037,7 @@ static const struct bpf_map_ops htab_lru_ops = {
        .map_delete_elem = htab_lru_map_delete_elem,
 };
 
-static struct bpf_map_type_list htab_lru_type __read_mostly = {
+static struct bpf_map_type_list htab_lru_type __ro_after_init = {
        .ops = &htab_lru_ops,
        .type = BPF_MAP_TYPE_LRU_HASH,
 };
@@ -1124,7 +1124,7 @@ static const struct bpf_map_ops htab_percpu_ops = {
        .map_delete_elem = htab_map_delete_elem,
 };
 
-static struct bpf_map_type_list htab_percpu_type __read_mostly = {
+static struct bpf_map_type_list htab_percpu_type __ro_after_init = {
        .ops = &htab_percpu_ops,
        .type = BPF_MAP_TYPE_PERCPU_HASH,
 };
@@ -1138,7 +1138,7 @@ static const struct bpf_map_ops htab_lru_percpu_ops = {
        .map_delete_elem = htab_lru_map_delete_elem,
 };
 
-static struct bpf_map_type_list htab_lru_percpu_type __read_mostly = {
+static struct bpf_map_type_list htab_lru_percpu_type __ro_after_init = {
        .ops = &htab_lru_percpu_ops,
        .type = BPF_MAP_TYPE_LRU_PERCPU_HASH,
 };
index 045cbe673356f90c365928475574bb108a057e0a..3d24e238221ecdf4208fa31fef63b645d1e0437a 100644 (file)
@@ -176,6 +176,6 @@ const struct bpf_func_proto bpf_get_current_comm_proto = {
        .func           = bpf_get_current_comm,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
-       .arg1_type      = ARG_PTR_TO_RAW_STACK,
-       .arg2_type      = ARG_CONST_STACK_SIZE,
+       .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
+       .arg2_type      = ARG_CONST_SIZE,
 };
index 0b030c9126d3a5f5452461f4df3a5349d80ad4a7..fddcae801724be149b73794ab42f9f177ecf4f56 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/parser.h>
 #include <linux/filter.h>
 #include <linux/bpf.h>
+#include <linux/bpf_trace.h>
 
 enum bpf_type {
        BPF_TYPE_UNSPEC = 0,
@@ -281,6 +282,13 @@ int bpf_obj_pin_user(u32 ufd, const char __user *pathname)
        ret = bpf_obj_do_pin(pname, raw, type);
        if (ret != 0)
                bpf_any_put(raw, type);
+       if ((trace_bpf_obj_pin_prog_enabled() ||
+            trace_bpf_obj_pin_map_enabled()) && !ret) {
+               if (type == BPF_TYPE_PROG)
+                       trace_bpf_obj_pin_prog(raw, ufd, pname);
+               if (type == BPF_TYPE_MAP)
+                       trace_bpf_obj_pin_map(raw, ufd, pname);
+       }
 out:
        putname(pname);
        return ret;
@@ -342,8 +350,15 @@ int bpf_obj_get_user(const char __user *pathname)
        else
                goto out;
 
-       if (ret < 0)
+       if (ret < 0) {
                bpf_any_put(raw, type);
+       } else if (trace_bpf_obj_get_prog_enabled() ||
+                  trace_bpf_obj_get_map_enabled()) {
+               if (type == BPF_TYPE_PROG)
+                       trace_bpf_obj_get_prog(raw, ret, pname);
+               if (type == BPF_TYPE_MAP)
+                       trace_bpf_obj_get_map(raw, ret, pname);
+       }
 out:
        putname(pname);
        return ret;
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
new file mode 100644 (file)
index 0000000..8bfe0af
--- /dev/null
@@ -0,0 +1,521 @@
+/*
+ * Longest prefix match list implementation
+ *
+ * Copyright (c) 2016,2017 Daniel Mack
+ * Copyright (c) 2016 David Herrmann
+ *
+ * This file is subject to the terms and conditions of version 2 of the GNU
+ * General Public License.  See the file COPYING in the main directory of the
+ * Linux distribution for more details.
+ */
+
+#include <linux/bpf.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <net/ipv6.h>
+
+/* Intermediate node */
+#define LPM_TREE_NODE_FLAG_IM BIT(0)
+
+struct lpm_trie_node;
+
+struct lpm_trie_node {
+       struct rcu_head rcu;
+       struct lpm_trie_node __rcu      *child[2];
+       u32                             prefixlen;
+       u32                             flags;
+       u8                              data[0];
+};
+
+struct lpm_trie {
+       struct bpf_map                  map;
+       struct lpm_trie_node __rcu      *root;
+       size_t                          n_entries;
+       size_t                          max_prefixlen;
+       size_t                          data_size;
+       raw_spinlock_t                  lock;
+};
+
+/* This trie implements a longest prefix match algorithm that can be used to
+ * match IP addresses to a stored set of ranges.
+ *
+ * Data stored in @data of struct bpf_lpm_key and struct lpm_trie_node is
+ * interpreted as big endian, so data[0] stores the most significant byte.
+ *
+ * Match ranges are internally stored in instances of struct lpm_trie_node
+ * which each contain their prefix length as well as two pointers that may
+ * lead to more nodes containing more specific matches. Each node also stores
+ * a value that is defined by and returned to userspace via the update_elem
+ * and lookup functions.
+ *
+ * For instance, let's start with a trie that was created with a prefix length
+ * of 32, so it can be used for IPv4 addresses, and one single element that
+ * matches 192.168.0.0/16. The data array would hence contain
+ * [0xc0, 0xa8, 0x00, 0x00] in big-endian notation. This documentation will
+ * stick to IP-address notation for readability though.
+ *
+ * As the trie is empty initially, the new node (1) will be places as root
+ * node, denoted as (R) in the example below. As there are no other node, both
+ * child pointers are %NULL.
+ *
+ *              +----------------+
+ *              |       (1)  (R) |
+ *              | 192.168.0.0/16 |
+ *              |    value: 1    |
+ *              |   [0]    [1]   |
+ *              +----------------+
+ *
+ * Next, let's add a new node (2) matching 192.168.0.0/24. As there is already
+ * a node with the same data and a smaller prefix (ie, a less specific one),
+ * node (2) will become a child of (1). In child index depends on the next bit
+ * that is outside of what (1) matches, and that bit is 0, so (2) will be
+ * child[0] of (1):
+ *
+ *              +----------------+
+ *              |       (1)  (R) |
+ *              | 192.168.0.0/16 |
+ *              |    value: 1    |
+ *              |   [0]    [1]   |
+ *              +----------------+
+ *                   |
+ *    +----------------+
+ *    |       (2)      |
+ *    | 192.168.0.0/24 |
+ *    |    value: 2    |
+ *    |   [0]    [1]   |
+ *    +----------------+
+ *
+ * The child[1] slot of (1) could be filled with another node which has bit #17
+ * (the next bit after the ones that (1) matches on) set to 1. For instance,
+ * 192.168.128.0/24:
+ *
+ *              +----------------+
+ *              |       (1)  (R) |
+ *              | 192.168.0.0/16 |
+ *              |    value: 1    |
+ *              |   [0]    [1]   |
+ *              +----------------+
+ *                   |      |
+ *    +----------------+  +------------------+
+ *    |       (2)      |  |        (3)       |
+ *    | 192.168.0.0/24 |  | 192.168.128.0/24 |
+ *    |    value: 2    |  |     value: 3     |
+ *    |   [0]    [1]   |  |    [0]    [1]    |
+ *    +----------------+  +------------------+
+ *
+ * Let's add another node (4) to the game for 192.168.1.0/24. In order to place
+ * it, node (1) is looked at first, and because (4) of the semantics laid out
+ * above (bit #17 is 0), it would normally be attached to (1) as child[0].
+ * However, that slot is already allocated, so a new node is needed in between.
+ * That node does not have a value attached to it and it will never be
+ * returned to users as result of a lookup. It is only there to differentiate
+ * the traversal further. It will get a prefix as wide as necessary to
+ * distinguish its two children:
+ *
+ *                      +----------------+
+ *                      |       (1)  (R) |
+ *                      | 192.168.0.0/16 |
+ *                      |    value: 1    |
+ *                      |   [0]    [1]   |
+ *                      +----------------+
+ *                           |      |
+ *            +----------------+  +------------------+
+ *            |       (4)  (I) |  |        (3)       |
+ *            | 192.168.0.0/23 |  | 192.168.128.0/24 |
+ *            |    value: ---  |  |     value: 3     |
+ *            |   [0]    [1]   |  |    [0]    [1]    |
+ *            +----------------+  +------------------+
+ *                 |      |
+ *  +----------------+  +----------------+
+ *  |       (2)      |  |       (5)      |
+ *  | 192.168.0.0/24 |  | 192.168.1.0/24 |
+ *  |    value: 2    |  |     value: 5   |
+ *  |   [0]    [1]   |  |   [0]    [1]   |
+ *  +----------------+  +----------------+
+ *
+ * 192.168.1.1/32 would be a child of (5) etc.
+ *
+ * An intermediate node will be turned into a 'real' node on demand. In the
+ * example above, (4) would be re-used if 192.168.0.0/23 is added to the trie.
+ *
+ * A fully populated trie would have a height of 32 nodes, as the trie was
+ * created with a prefix length of 32.
+ *
+ * The lookup starts at the root node. If the current node matches and if there
+ * is a child that can be used to become more specific, the trie is traversed
+ * downwards. The last node in the traversal that is a non-intermediate one is
+ * returned.
+ */
+
+static inline int extract_bit(const u8 *data, size_t index)
+{
+       return !!(data[index / 8] & (1 << (7 - (index % 8))));
+}
+
+/**
+ * longest_prefix_match() - determine the longest prefix
+ * @trie:      The trie to get internal sizes from
+ * @node:      The node to operate on
+ * @key:       The key to compare to @node
+ *
+ * Determine the longest prefix of @node that matches the bits in @key.
+ */
+static size_t longest_prefix_match(const struct lpm_trie *trie,
+                                  const struct lpm_trie_node *node,
+                                  const struct bpf_lpm_trie_key *key)
+{
+       size_t prefixlen = 0;
+       size_t i;
+
+       for (i = 0; i < trie->data_size; i++) {
+               size_t b;
+
+               b = 8 - fls(node->data[i] ^ key->data[i]);
+               prefixlen += b;
+
+               if (prefixlen >= node->prefixlen || prefixlen >= key->prefixlen)
+                       return min(node->prefixlen, key->prefixlen);
+
+               if (b < 8)
+                       break;
+       }
+
+       return prefixlen;
+}
+
+/* Called from syscall or from eBPF program */
+static void *trie_lookup_elem(struct bpf_map *map, void *_key)
+{
+       struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+       struct lpm_trie_node *node, *found = NULL;
+       struct bpf_lpm_trie_key *key = _key;
+
+       /* Start walking the trie from the root node ... */
+
+       for (node = rcu_dereference(trie->root); node;) {
+               unsigned int next_bit;
+               size_t matchlen;
+
+               /* Determine the longest prefix of @node that matches @key.
+                * If it's the maximum possible prefix for this trie, we have
+                * an exact match and can return it directly.
+                */
+               matchlen = longest_prefix_match(trie, node, key);
+               if (matchlen == trie->max_prefixlen) {
+                       found = node;
+                       break;
+               }
+
+               /* If the number of bits that match is smaller than the prefix
+                * length of @node, bail out and return the node we have seen
+                * last in the traversal (ie, the parent).
+                */
+               if (matchlen < node->prefixlen)
+                       break;
+
+               /* Consider this node as return candidate unless it is an
+                * artificially added intermediate one.
+                */
+               if (!(node->flags & LPM_TREE_NODE_FLAG_IM))
+                       found = node;
+
+               /* If the node match is fully satisfied, let's see if we can
+                * become more specific. Determine the next bit in the key and
+                * traverse down.
+                */
+               next_bit = extract_bit(key->data, node->prefixlen);
+               node = rcu_dereference(node->child[next_bit]);
+       }
+
+       if (!found)
+               return NULL;
+
+       return found->data + trie->data_size;
+}
+
+static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie,
+                                                const void *value)
+{
+       struct lpm_trie_node *node;
+       size_t size = sizeof(struct lpm_trie_node) + trie->data_size;
+
+       if (value)
+               size += trie->map.value_size;
+
+       node = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN);
+       if (!node)
+               return NULL;
+
+       node->flags = 0;
+
+       if (value)
+               memcpy(node->data + trie->data_size, value,
+                      trie->map.value_size);
+
+       return node;
+}
+
+/* Called from syscall or from eBPF program */
+static int trie_update_elem(struct bpf_map *map,
+                           void *_key, void *value, u64 flags)
+{
+       struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+       struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL;
+       struct lpm_trie_node __rcu **slot;
+       struct bpf_lpm_trie_key *key = _key;
+       unsigned long irq_flags;
+       unsigned int next_bit;
+       size_t matchlen = 0;
+       int ret = 0;
+
+       if (unlikely(flags > BPF_EXIST))
+               return -EINVAL;
+
+       if (key->prefixlen > trie->max_prefixlen)
+               return -EINVAL;
+
+       raw_spin_lock_irqsave(&trie->lock, irq_flags);
+
+       /* Allocate and fill a new node */
+
+       if (trie->n_entries == trie->map.max_entries) {
+               ret = -ENOSPC;
+               goto out;
+       }
+
+       new_node = lpm_trie_node_alloc(trie, value);
+       if (!new_node) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       trie->n_entries++;
+
+       new_node->prefixlen = key->prefixlen;
+       RCU_INIT_POINTER(new_node->child[0], NULL);
+       RCU_INIT_POINTER(new_node->child[1], NULL);
+       memcpy(new_node->data, key->data, trie->data_size);
+
+       /* Now find a slot to attach the new node. To do that, walk the tree
+        * from the root and match as many bits as possible for each node until
+        * we either find an empty slot or a slot that needs to be replaced by
+        * an intermediate node.
+        */
+       slot = &trie->root;
+
+       while ((node = rcu_dereference_protected(*slot,
+                                       lockdep_is_held(&trie->lock)))) {
+               matchlen = longest_prefix_match(trie, node, key);
+
+               if (node->prefixlen != matchlen ||
+                   node->prefixlen == key->prefixlen ||
+                   node->prefixlen == trie->max_prefixlen)
+                       break;
+
+               next_bit = extract_bit(key->data, node->prefixlen);
+               slot = &node->child[next_bit];
+       }
+
+       /* If the slot is empty (a free child pointer or an empty root),
+        * simply assign the @new_node to that slot and be done.
+        */
+       if (!node) {
+               rcu_assign_pointer(*slot, new_node);
+               goto out;
+       }
+
+       /* If the slot we picked already exists, replace it with @new_node
+        * which already has the correct data array set.
+        */
+       if (node->prefixlen == matchlen) {
+               new_node->child[0] = node->child[0];
+               new_node->child[1] = node->child[1];
+
+               if (!(node->flags & LPM_TREE_NODE_FLAG_IM))
+                       trie->n_entries--;
+
+               rcu_assign_pointer(*slot, new_node);
+               kfree_rcu(node, rcu);
+
+               goto out;
+       }
+
+       /* If the new node matches the prefix completely, it must be inserted
+        * as an ancestor. Simply insert it between @node and *@slot.
+        */
+       if (matchlen == key->prefixlen) {
+               next_bit = extract_bit(node->data, matchlen);
+               rcu_assign_pointer(new_node->child[next_bit], node);
+               rcu_assign_pointer(*slot, new_node);
+               goto out;
+       }
+
+       im_node = lpm_trie_node_alloc(trie, NULL);
+       if (!im_node) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       im_node->prefixlen = matchlen;
+       im_node->flags |= LPM_TREE_NODE_FLAG_IM;
+       memcpy(im_node->data, node->data, trie->data_size);
+
+       /* Now determine which child to install in which slot */
+       if (extract_bit(key->data, matchlen)) {
+               rcu_assign_pointer(im_node->child[0], node);
+               rcu_assign_pointer(im_node->child[1], new_node);
+       } else {
+               rcu_assign_pointer(im_node->child[0], new_node);
+               rcu_assign_pointer(im_node->child[1], node);
+       }
+
+       /* Finally, assign the intermediate node to the determined spot */
+       rcu_assign_pointer(*slot, im_node);
+
+out:
+       if (ret) {
+               if (new_node)
+                       trie->n_entries--;
+
+               kfree(new_node);
+               kfree(im_node);
+       }
+
+       raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
+
+       return ret;
+}
+
+static int trie_delete_elem(struct bpf_map *map, void *key)
+{
+       /* TODO */
+       return -ENOSYS;
+}
+
+#define LPM_DATA_SIZE_MAX      256
+#define LPM_DATA_SIZE_MIN      1
+
+#define LPM_VAL_SIZE_MAX       (KMALLOC_MAX_SIZE - LPM_DATA_SIZE_MAX - \
+                                sizeof(struct lpm_trie_node))
+#define LPM_VAL_SIZE_MIN       1
+
+#define LPM_KEY_SIZE(X)                (sizeof(struct bpf_lpm_trie_key) + (X))
+#define LPM_KEY_SIZE_MAX       LPM_KEY_SIZE(LPM_DATA_SIZE_MAX)
+#define LPM_KEY_SIZE_MIN       LPM_KEY_SIZE(LPM_DATA_SIZE_MIN)
+
+static struct bpf_map *trie_alloc(union bpf_attr *attr)
+{
+       struct lpm_trie *trie;
+       u64 cost = sizeof(*trie), cost_per_node;
+       int ret;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return ERR_PTR(-EPERM);
+
+       /* check sanity of attributes */
+       if (attr->max_entries == 0 ||
+           attr->map_flags != BPF_F_NO_PREALLOC ||
+           attr->key_size < LPM_KEY_SIZE_MIN ||
+           attr->key_size > LPM_KEY_SIZE_MAX ||
+           attr->value_size < LPM_VAL_SIZE_MIN ||
+           attr->value_size > LPM_VAL_SIZE_MAX)
+               return ERR_PTR(-EINVAL);
+
+       trie = kzalloc(sizeof(*trie), GFP_USER | __GFP_NOWARN);
+       if (!trie)
+               return ERR_PTR(-ENOMEM);
+
+       /* copy mandatory map attributes */
+       trie->map.map_type = attr->map_type;
+       trie->map.key_size = attr->key_size;
+       trie->map.value_size = attr->value_size;
+       trie->map.max_entries = attr->max_entries;
+       trie->data_size = attr->key_size -
+                         offsetof(struct bpf_lpm_trie_key, data);
+       trie->max_prefixlen = trie->data_size * 8;
+
+       cost_per_node = sizeof(struct lpm_trie_node) +
+                       attr->value_size + trie->data_size;
+       cost += (u64) attr->max_entries * cost_per_node;
+       if (cost >= U32_MAX - PAGE_SIZE) {
+               ret = -E2BIG;
+               goto out_err;
+       }
+
+       trie->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+
+       ret = bpf_map_precharge_memlock(trie->map.pages);
+       if (ret)
+               goto out_err;
+
+       raw_spin_lock_init(&trie->lock);
+
+       return &trie->map;
+out_err:
+       kfree(trie);
+       return ERR_PTR(ret);
+}
+
+static void trie_free(struct bpf_map *map)
+{
+       struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+       struct lpm_trie_node __rcu **slot;
+       struct lpm_trie_node *node;
+
+       raw_spin_lock(&trie->lock);
+
+       /* Always start at the root and walk down to a node that has no
+        * children. Then free that node, nullify its reference in the parent
+        * and start over.
+        */
+
+       for (;;) {
+               slot = &trie->root;
+
+               for (;;) {
+                       node = rcu_dereference_protected(*slot,
+                                       lockdep_is_held(&trie->lock));
+                       if (!node)
+                               goto unlock;
+
+                       if (rcu_access_pointer(node->child[0])) {
+                               slot = &node->child[0];
+                               continue;
+                       }
+
+                       if (rcu_access_pointer(node->child[1])) {
+                               slot = &node->child[1];
+                               continue;
+                       }
+
+                       kfree(node);
+                       RCU_INIT_POINTER(*slot, NULL);
+                       break;
+               }
+       }
+
+unlock:
+       raw_spin_unlock(&trie->lock);
+}
+
+static const struct bpf_map_ops trie_ops = {
+       .map_alloc = trie_alloc,
+       .map_free = trie_free,
+       .map_lookup_elem = trie_lookup_elem,
+       .map_update_elem = trie_update_elem,
+       .map_delete_elem = trie_delete_elem,
+};
+
+static struct bpf_map_type_list trie_type __ro_after_init = {
+       .ops = &trie_ops,
+       .type = BPF_MAP_TYPE_LPM_TRIE,
+};
+
+static int __init register_trie_map(void)
+{
+       bpf_register_map_type(&trie_type);
+       return 0;
+}
+late_initcall(register_trie_map);
index be8519148c255efb92704b5e3b0de102ac4c209c..22aa45cd0324e320b9cd8b0f89bf9befdaae74b6 100644 (file)
@@ -273,7 +273,7 @@ static const struct bpf_map_ops stack_map_ops = {
        .map_delete_elem = stack_map_delete_elem,
 };
 
-static struct bpf_map_type_list stack_map_type __read_mostly = {
+static struct bpf_map_type_list stack_map_type __ro_after_init = {
        .ops = &stack_map_ops,
        .type = BPF_MAP_TYPE_STACK_TRACE,
 };
index bbb016adbaeb61c5d46a525f3e7235d11535335e..461eb1e66a0fdf498557c2ff5a85354f730acc3e 100644 (file)
@@ -10,6 +10,7 @@
  * General Public License for more details.
  */
 #include <linux/bpf.h>
+#include <linux/bpf_trace.h>
 #include <linux/syscalls.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
@@ -241,6 +242,7 @@ static int map_create(union bpf_attr *attr)
                /* failed to allocate fd */
                goto free_map;
 
+       trace_bpf_map_create(map, err);
        return err;
 
 free_map:
@@ -365,6 +367,7 @@ static int map_lookup_elem(union bpf_attr *attr)
        if (copy_to_user(uvalue, value, value_size) != 0)
                goto free_value;
 
+       trace_bpf_map_lookup_elem(map, ufd, key, value);
        err = 0;
 
 free_value:
@@ -447,6 +450,8 @@ static int map_update_elem(union bpf_attr *attr)
        __this_cpu_dec(bpf_prog_active);
        preempt_enable();
 
+       if (!err)
+               trace_bpf_map_update_elem(map, ufd, key, value);
 free_value:
        kfree(value);
 free_key:
@@ -492,6 +497,8 @@ static int map_delete_elem(union bpf_attr *attr)
        __this_cpu_dec(bpf_prog_active);
        preempt_enable();
 
+       if (!err)
+               trace_bpf_map_delete_elem(map, ufd, key);
 free_key:
        kfree(key);
 err_put:
@@ -544,6 +551,7 @@ static int map_get_next_key(union bpf_attr *attr)
        if (copy_to_user(unext_key, next_key, map->key_size) != 0)
                goto free_next_key;
 
+       trace_bpf_map_next_key(map, ufd, key, next_key);
        err = 0;
 
 free_next_key:
@@ -697,8 +705,11 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
 
 void bpf_prog_put(struct bpf_prog *prog)
 {
-       if (atomic_dec_and_test(&prog->aux->refcnt))
+       if (atomic_dec_and_test(&prog->aux->refcnt)) {
+               trace_bpf_prog_put_rcu(prog);
+               bpf_prog_kallsyms_del(prog);
                call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
+       }
 }
 EXPORT_SYMBOL_GPL(bpf_prog_put);
 
@@ -807,7 +818,11 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
 
 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
 {
-       return __bpf_prog_get(ufd, &type);
+       struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
+
+       if (!IS_ERR(prog))
+               trace_bpf_prog_get_type(prog);
+       return prog;
 }
 EXPORT_SYMBOL_GPL(bpf_prog_get_type);
 
@@ -889,6 +904,8 @@ static int bpf_prog_load(union bpf_attr *attr)
                /* failed to allocate fd */
                goto free_used_maps;
 
+       bpf_prog_kallsyms_add(prog);
+       trace_bpf_prog_load(prog, err);
        return err;
 
 free_used_maps:
index cdc43b899f281ebd01dd8cf8110e6fb966c2a1fa..d2bded2b250c876750b869d08273ceae0b181cbd 100644 (file)
@@ -481,6 +481,13 @@ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
        regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
 }
 
+static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs,
+                                            u32 regno)
+{
+       mark_reg_unknown_value(regs, regno);
+       reset_reg_range_values(regs, regno);
+}
+
 enum reg_arg_type {
        SRC_OP,         /* register is used as source operand */
        DST_OP,         /* register is used as destination operand */
@@ -532,6 +539,7 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
        switch (type) {
        case PTR_TO_MAP_VALUE:
        case PTR_TO_MAP_VALUE_OR_NULL:
+       case PTR_TO_MAP_VALUE_ADJ:
        case PTR_TO_STACK:
        case PTR_TO_CTX:
        case PTR_TO_PACKET:
@@ -616,7 +624,8 @@ static int check_stack_read(struct bpf_verifier_state *state, int off, int size,
                }
                if (value_regno >= 0)
                        /* have read misc data from the stack */
-                       mark_reg_unknown_value(state->regs, value_regno);
+                       mark_reg_unknown_value_and_range(state->regs,
+                                                        value_regno);
                return 0;
        }
 }
@@ -627,7 +636,7 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
 {
        struct bpf_map *map = env->cur_state.regs[regno].map_ptr;
 
-       if (off < 0 || off + size > map->value_size) {
+       if (off < 0 || size <= 0 || off + size > map->value_size) {
                verbose("invalid access to map value, value_size=%d off=%d size=%d\n",
                        map->value_size, off, size);
                return -EACCES;
@@ -635,6 +644,51 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
        return 0;
 }
 
+/* check read/write into an adjusted map element */
+static int check_map_access_adj(struct bpf_verifier_env *env, u32 regno,
+                               int off, int size)
+{
+       struct bpf_verifier_state *state = &env->cur_state;
+       struct bpf_reg_state *reg = &state->regs[regno];
+       int err;
+
+       /* We adjusted the register to this map value, so we
+        * need to change off and size to min_value and max_value
+        * respectively to make sure our theoretical access will be
+        * safe.
+        */
+       if (log_level)
+               print_verifier_state(state);
+       env->varlen_map_value_access = true;
+       /* The minimum value is only important with signed
+        * comparisons where we can't assume the floor of a
+        * value is 0.  If we are using signed variables for our
+        * index'es we need to make sure that whatever we use
+        * will have a set floor within our range.
+        */
+       if (reg->min_value < 0) {
+               verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
+                       regno);
+               return -EACCES;
+       }
+       err = check_map_access(env, regno, reg->min_value + off, size);
+       if (err) {
+               verbose("R%d min value is outside of the array range\n",
+                       regno);
+               return err;
+       }
+
+       /* If we haven't set a max value then we need to bail
+        * since we can't be sure we won't do bad things.
+        */
+       if (reg->max_value == BPF_REGISTER_MAX_RANGE) {
+               verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n",
+                       regno);
+               return -EACCES;
+       }
+       return check_map_access(env, regno, reg->max_value + off, size);
+}
+
 #define MAX_PACKET_OFF 0xffff
 
 static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
@@ -647,6 +701,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
                /* dst_input() and dst_output() can't write for now */
                if (t == BPF_WRITE)
                        return false;
+               /* fallthrough */
        case BPF_PROG_TYPE_SCHED_CLS:
        case BPF_PROG_TYPE_SCHED_ACT:
        case BPF_PROG_TYPE_XDP:
@@ -775,47 +830,13 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
                        return -EACCES;
                }
 
-               /* If we adjusted the register to this map value at all then we
-                * need to change off and size to min_value and max_value
-                * respectively to make sure our theoretical access will be
-                * safe.
-                */
-               if (reg->type == PTR_TO_MAP_VALUE_ADJ) {
-                       if (log_level)
-                               print_verifier_state(state);
-                       env->varlen_map_value_access = true;
-                       /* The minimum value is only important with signed
-                        * comparisons where we can't assume the floor of a
-                        * value is 0.  If we are using signed variables for our
-                        * index'es we need to make sure that whatever we use
-                        * will have a set floor within our range.
-                        */
-                       if (reg->min_value < 0) {
-                               verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
-                                       regno);
-                               return -EACCES;
-                       }
-                       err = check_map_access(env, regno, reg->min_value + off,
-                                              size);
-                       if (err) {
-                               verbose("R%d min value is outside of the array range\n",
-                                       regno);
-                               return err;
-                       }
-
-                       /* If we haven't set a max value then we need to bail
-                        * since we can't be sure we won't do bad things.
-                        */
-                       if (reg->max_value == BPF_REGISTER_MAX_RANGE) {
-                               verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n",
-                                       regno);
-                               return -EACCES;
-                       }
-                       off += reg->max_value;
-               }
-               err = check_map_access(env, regno, off, size);
+               if (reg->type == PTR_TO_MAP_VALUE_ADJ)
+                       err = check_map_access_adj(env, regno, off, size);
+               else
+                       err = check_map_access(env, regno, off, size);
                if (!err && t == BPF_READ && value_regno >= 0)
-                       mark_reg_unknown_value(state->regs, value_regno);
+                       mark_reg_unknown_value_and_range(state->regs,
+                                                        value_regno);
 
        } else if (reg->type == PTR_TO_CTX) {
                enum bpf_reg_type reg_type = UNKNOWN_VALUE;
@@ -827,7 +848,8 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
                }
                err = check_ctx_access(env, off, size, t, &reg_type);
                if (!err && t == BPF_READ && value_regno >= 0) {
-                       mark_reg_unknown_value(state->regs, value_regno);
+                       mark_reg_unknown_value_and_range(state->regs,
+                                                        value_regno);
                        /* note that reg.[id|off|range] == 0 */
                        state->regs[value_regno].type = reg_type;
                }
@@ -860,7 +882,8 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
                }
                err = check_packet_access(env, regno, off, size);
                if (!err && t == BPF_READ && value_regno >= 0)
-                       mark_reg_unknown_value(state->regs, value_regno);
+                       mark_reg_unknown_value_and_range(state->regs,
+                                                        value_regno);
        } else {
                verbose("R%d invalid mem access '%s'\n",
                        regno, reg_type_str[reg->type]);
@@ -958,6 +981,25 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
        return 0;
 }
 
+static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
+                                  int access_size, bool zero_size_allowed,
+                                  struct bpf_call_arg_meta *meta)
+{
+       struct bpf_reg_state *regs = env->cur_state.regs;
+
+       switch (regs[regno].type) {
+       case PTR_TO_PACKET:
+               return check_packet_access(env, regno, 0, access_size);
+       case PTR_TO_MAP_VALUE:
+               return check_map_access(env, regno, 0, access_size);
+       case PTR_TO_MAP_VALUE_ADJ:
+               return check_map_access_adj(env, regno, 0, access_size);
+       default: /* const_imm|ptr_to_stack or invalid ptr */
+               return check_stack_boundary(env, regno, access_size,
+                                           zero_size_allowed, meta);
+       }
+}
+
 static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
                          enum bpf_arg_type arg_type,
                          struct bpf_call_arg_meta *meta)
@@ -993,10 +1035,13 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
                expected_type = PTR_TO_STACK;
                if (type != PTR_TO_PACKET && type != expected_type)
                        goto err_type;
-       } else if (arg_type == ARG_CONST_STACK_SIZE ||
-                  arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) {
+       } else if (arg_type == ARG_CONST_SIZE ||
+                  arg_type == ARG_CONST_SIZE_OR_ZERO) {
                expected_type = CONST_IMM;
-               if (type != expected_type)
+               /* One exception. Allow UNKNOWN_VALUE registers when the
+                * boundaries are known and don't cause unsafe memory accesses
+                */
+               if (type != UNKNOWN_VALUE && type != expected_type)
                        goto err_type;
        } else if (arg_type == ARG_CONST_MAP_PTR) {
                expected_type = CONST_PTR_TO_MAP;
@@ -1006,8 +1051,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
                expected_type = PTR_TO_CTX;
                if (type != expected_type)
                        goto err_type;
-       } else if (arg_type == ARG_PTR_TO_STACK ||
-                  arg_type == ARG_PTR_TO_RAW_STACK) {
+       } else if (arg_type == ARG_PTR_TO_MEM ||
+                  arg_type == ARG_PTR_TO_UNINIT_MEM) {
                expected_type = PTR_TO_STACK;
                /* One exception here. In case function allows for NULL to be
                 * passed in as argument, it's a CONST_IMM type. Final test
@@ -1015,9 +1060,10 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
                 */
                if (type == CONST_IMM && reg->imm == 0)
                        /* final test in check_stack_boundary() */;
-               else if (type != PTR_TO_PACKET && type != expected_type)
+               else if (type != PTR_TO_PACKET && type != PTR_TO_MAP_VALUE &&
+                        type != PTR_TO_MAP_VALUE_ADJ && type != expected_type)
                        goto err_type;
-               meta->raw_mode = arg_type == ARG_PTR_TO_RAW_STACK;
+               meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
        } else {
                verbose("unsupported arg_type %d\n", arg_type);
                return -EFAULT;
@@ -1063,9 +1109,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
                        err = check_stack_boundary(env, regno,
                                                   meta->map_ptr->value_size,
                                                   false, NULL);
-       } else if (arg_type == ARG_CONST_STACK_SIZE ||
-                  arg_type == ARG_CONST_STACK_SIZE_OR_ZERO) {
-               bool zero_size_allowed = (arg_type == ARG_CONST_STACK_SIZE_OR_ZERO);
+       } else if (arg_type == ARG_CONST_SIZE ||
+                  arg_type == ARG_CONST_SIZE_OR_ZERO) {
+               bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
 
                /* bpf_xxx(..., buf, len) call will access 'len' bytes
                 * from stack pointer 'buf'. Check it
@@ -1073,14 +1119,50 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
                 */
                if (regno == 0) {
                        /* kernel subsystem misconfigured verifier */
-                       verbose("ARG_CONST_STACK_SIZE cannot be first argument\n");
+                       verbose("ARG_CONST_SIZE cannot be first argument\n");
                        return -EACCES;
                }
-               if (regs[regno - 1].type == PTR_TO_PACKET)
-                       err = check_packet_access(env, regno - 1, 0, reg->imm);
-               else
-                       err = check_stack_boundary(env, regno - 1, reg->imm,
-                                                  zero_size_allowed, meta);
+
+               /* If the register is UNKNOWN_VALUE, the access check happens
+                * using its boundaries. Otherwise, just use its imm
+                */
+               if (type == UNKNOWN_VALUE) {
+                       /* For unprivileged variable accesses, disable raw
+                        * mode so that the program is required to
+                        * initialize all the memory that the helper could
+                        * just partially fill up.
+                        */
+                       meta = NULL;
+
+                       if (reg->min_value < 0) {
+                               verbose("R%d min value is negative, either use unsigned or 'var &= const'\n",
+                                       regno);
+                               return -EACCES;
+                       }
+
+                       if (reg->min_value == 0) {
+                               err = check_helper_mem_access(env, regno - 1, 0,
+                                                             zero_size_allowed,
+                                                             meta);
+                               if (err)
+                                       return err;
+                       }
+
+                       if (reg->max_value == BPF_REGISTER_MAX_RANGE) {
+                               verbose("R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n",
+                                       regno);
+                               return -EACCES;
+                       }
+                       err = check_helper_mem_access(env, regno - 1,
+                                                     reg->max_value,
+                                                     zero_size_allowed, meta);
+                       if (err)
+                               return err;
+               } else {
+                       /* register is CONST_IMM */
+                       err = check_helper_mem_access(env, regno - 1, reg->imm,
+                                                     zero_size_allowed, meta);
+               }
        }
 
        return err;
@@ -1154,15 +1236,15 @@ static int check_raw_mode(const struct bpf_func_proto *fn)
 {
        int count = 0;
 
-       if (fn->arg1_type == ARG_PTR_TO_RAW_STACK)
+       if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM)
                count++;
-       if (fn->arg2_type == ARG_PTR_TO_RAW_STACK)
+       if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM)
                count++;
-       if (fn->arg3_type == ARG_PTR_TO_RAW_STACK)
+       if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM)
                count++;
-       if (fn->arg4_type == ARG_PTR_TO_RAW_STACK)
+       if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM)
                count++;
-       if (fn->arg5_type == ARG_PTR_TO_RAW_STACK)
+       if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM)
                count++;
 
        return count > 1 ? -EINVAL : 0;
@@ -1316,7 +1398,7 @@ static int check_packet_ptr_add(struct bpf_verifier_env *env,
                imm = insn->imm;
 
 add_imm:
-               if (imm <= 0) {
+               if (imm < 0) {
                        verbose("addition of negative constant to packet pointer is not allowed\n");
                        return -EACCES;
                }
@@ -1485,22 +1567,54 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
        struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
        struct bpf_reg_state *src_reg = &regs[insn->src_reg];
        u8 opcode = BPF_OP(insn->code);
+       u64 dst_imm = dst_reg->imm;
 
-       /* dst_reg->type == CONST_IMM here, simulate execution of 'add'/'or'
-        * insn. Don't care about overflow or negative values, just add them
+       /* dst_reg->type == CONST_IMM here. Simulate execution of insns
+        * containing ALU ops. Don't care about overflow or negative
+        * values, just add/sub/... them; registers are in u64.
         */
-       if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K)
-               dst_reg->imm += insn->imm;
-       else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X &&
-                src_reg->type == CONST_IMM)
-               dst_reg->imm += src_reg->imm;
-       else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K)
-               dst_reg->imm |= insn->imm;
-       else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X &&
-                src_reg->type == CONST_IMM)
-               dst_reg->imm |= src_reg->imm;
-       else
+       if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K) {
+               dst_imm += insn->imm;
+       } else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X &&
+                  src_reg->type == CONST_IMM) {
+               dst_imm += src_reg->imm;
+       } else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_K) {
+               dst_imm -= insn->imm;
+       } else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_X &&
+                  src_reg->type == CONST_IMM) {
+               dst_imm -= src_reg->imm;
+       } else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_K) {
+               dst_imm *= insn->imm;
+       } else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_X &&
+                  src_reg->type == CONST_IMM) {
+               dst_imm *= src_reg->imm;
+       } else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K) {
+               dst_imm |= insn->imm;
+       } else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X &&
+                  src_reg->type == CONST_IMM) {
+               dst_imm |= src_reg->imm;
+       } else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_K) {
+               dst_imm &= insn->imm;
+       } else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_X &&
+                  src_reg->type == CONST_IMM) {
+               dst_imm &= src_reg->imm;
+       } else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_K) {
+               dst_imm >>= insn->imm;
+       } else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_X &&
+                  src_reg->type == CONST_IMM) {
+               dst_imm >>= src_reg->imm;
+       } else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_K) {
+               dst_imm <<= insn->imm;
+       } else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_X &&
+                  src_reg->type == CONST_IMM) {
+               dst_imm <<= src_reg->imm;
+       } else {
                mark_reg_unknown_value(regs, insn->dst_reg);
+               goto out;
+       }
+
+       dst_reg->imm = dst_imm;
+out:
        return 0;
 }
 
@@ -1894,6 +2008,7 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
        case BPF_JGT:
                /* Unsigned comparison, the minimum value is 0. */
                false_reg->min_value = 0;
+               /* fallthrough */
        case BPF_JSGT:
                /* If this is false then we know the maximum val is val,
                 * otherwise we know the min val is val+1.
@@ -1904,6 +2019,7 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
        case BPF_JGE:
                /* Unsigned comparison, the minimum value is 0. */
                false_reg->min_value = 0;
+               /* fallthrough */
        case BPF_JSGE:
                /* If this is false then we know the maximum value is val - 1,
                 * otherwise we know the mimimum value is val.
@@ -1942,6 +2058,7 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
        case BPF_JGT:
                /* Unsigned comparison, the minimum value is 0. */
                true_reg->min_value = 0;
+               /* fallthrough */
        case BPF_JSGT:
                /*
                 * If this is false, then the val is <= the register, if it is
@@ -1953,6 +2070,7 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
        case BPF_JGE:
                /* Unsigned comparison, the minimum value is 0. */
                true_reg->min_value = 0;
+               /* fallthrough */
        case BPF_JSGE:
                /* If this is false then constant < register, if it is true then
                 * the register < constant.
@@ -2144,14 +2262,8 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
                return err;
 
        if (insn->src_reg == 0) {
-               /* generic move 64-bit immediate into a register,
-                * only analyzer needs to collect the ld_imm value.
-                */
                u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
 
-               if (!env->analyzer_ops)
-                       return 0;
-
                regs[insn->dst_reg].type = CONST_IMM;
                regs[insn->dst_reg].imm = imm;
                return 0;
@@ -2729,7 +2841,6 @@ static int do_check(struct bpf_verifier_env *env)
                        if (err)
                                return err;
 
-                       reset_reg_range_values(regs, insn->dst_reg);
                        if (BPF_SIZE(insn->code) != BPF_W &&
                            BPF_SIZE(insn->code) != BPF_DW) {
                                insn_idx++;
@@ -3085,10 +3196,14 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
        insn = env->prog->insnsi + delta;
 
        for (i = 0; i < insn_cnt; i++, insn++) {
-               if (insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
+               if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
+                   insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
+                   insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
                    insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
                        type = BPF_READ;
-               else if (insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
+               else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
+                        insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
+                        insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
                         insn->code == (BPF_STX | BPF_MEM | BPF_DW))
                        type = BPF_WRITE;
                else
@@ -3097,8 +3212,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
                if (env->insn_aux_data[i].ptr_type != PTR_TO_CTX)
                        continue;
 
-               cnt = ops->convert_ctx_access(type, insn->dst_reg, insn->src_reg,
-                                             insn->off, insn_buf, env->prog);
+               cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog);
                if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
                        verbose("bpf verifier is misconfigured\n");
                        return -EINVAL;
index e3beec4a2339ee1c013698beb4c9ca126d1e2121..bd82117ad424cb99f739be974523b2e68b0d3e2a 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/init.h>
+#include <linux/filter.h>
 
 #include <asm/sections.h>
 #include <linux/uaccess.h>
@@ -104,6 +105,8 @@ int __kernel_text_address(unsigned long addr)
                return 1;
        if (is_ftrace_trampoline(addr))
                return 1;
+       if (is_bpf_text_address(addr))
+               return 1;
        /*
         * There might be init symbols in saved stacktraces.
         * Give those symbols a chance to be printed in
@@ -123,7 +126,11 @@ int kernel_text_address(unsigned long addr)
                return 1;
        if (is_module_text_address(addr))
                return 1;
-       return is_ftrace_trampoline(addr);
+       if (is_ftrace_trampoline(addr))
+               return 1;
+       if (is_bpf_text_address(addr))
+               return 1;
+       return 0;
 }
 
 /*
index fafd1a3ef0da56c6887fb5edd631e8e4a925a0e4..6a3b249a2ae107b04a044f8b4760f3b55591cb43 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mm.h>
 #include <linux/ctype.h>
 #include <linux/slab.h>
+#include <linux/filter.h>
 #include <linux/compiler.h>
 
 #include <asm/sections.h>
@@ -300,10 +301,11 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
                                unsigned long *offset)
 {
        char namebuf[KSYM_NAME_LEN];
+
        if (is_ksym_addr(addr))
                return !!get_symbol_pos(addr, symbolsize, offset);
-
-       return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf);
+       return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
+              !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
 }
 
 /*
@@ -318,6 +320,8 @@ const char *kallsyms_lookup(unsigned long addr,
                            unsigned long *offset,
                            char **modname, char *namebuf)
 {
+       const char *ret;
+
        namebuf[KSYM_NAME_LEN - 1] = 0;
        namebuf[0] = 0;
 
@@ -333,9 +337,13 @@ const char *kallsyms_lookup(unsigned long addr,
                return namebuf;
        }
 
-       /* See if it's in a module. */
-       return module_address_lookup(addr, symbolsize, offset, modname,
-                                    namebuf);
+       /* See if it's in a module or a BPF JITed image. */
+       ret = module_address_lookup(addr, symbolsize, offset,
+                                   modname, namebuf);
+       if (!ret)
+               ret = bpf_address_lookup(addr, symbolsize,
+                                        offset, modname, namebuf);
+       return ret;
 }
 
 int lookup_symbol_name(unsigned long addr, char *symname)
@@ -471,6 +479,7 @@ EXPORT_SYMBOL(__print_symbol);
 /* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
 struct kallsym_iter {
        loff_t pos;
+       loff_t pos_mod_end;
        unsigned long value;
        unsigned int nameoff; /* If iterating in core kernel symbols. */
        char type;
@@ -481,13 +490,27 @@ struct kallsym_iter {
 
 static int get_ksymbol_mod(struct kallsym_iter *iter)
 {
-       if (module_get_kallsym(iter->pos - kallsyms_num_syms, &iter->value,
-                               &iter->type, iter->name, iter->module_name,
-                               &iter->exported) < 0)
+       int ret = module_get_kallsym(iter->pos - kallsyms_num_syms,
+                                    &iter->value, &iter->type,
+                                    iter->name, iter->module_name,
+                                    &iter->exported);
+       if (ret < 0) {
+               iter->pos_mod_end = iter->pos;
                return 0;
+       }
+
        return 1;
 }
 
+static int get_ksymbol_bpf(struct kallsym_iter *iter)
+{
+       iter->module_name[0] = '\0';
+       iter->exported = 0;
+       return bpf_get_kallsym(iter->pos - iter->pos_mod_end,
+                              &iter->value, &iter->type,
+                              iter->name) < 0 ? 0 : 1;
+}
+
 /* Returns space to next name. */
 static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
 {
@@ -508,16 +531,30 @@ static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
        iter->name[0] = '\0';
        iter->nameoff = get_symbol_offset(new_pos);
        iter->pos = new_pos;
+       if (new_pos == 0)
+               iter->pos_mod_end = 0;
+}
+
+static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
+{
+       iter->pos = pos;
+
+       if (iter->pos_mod_end > 0 &&
+           iter->pos_mod_end < iter->pos)
+               return get_ksymbol_bpf(iter);
+
+       if (!get_ksymbol_mod(iter))
+               return get_ksymbol_bpf(iter);
+
+       return 1;
 }
 
 /* Returns false if pos at or past end of file. */
 static int update_iter(struct kallsym_iter *iter, loff_t pos)
 {
        /* Module symbols can be accessed randomly. */
-       if (pos >= kallsyms_num_syms) {
-               iter->pos = pos;
-               return get_ksymbol_mod(iter);
-       }
+       if (pos >= kallsyms_num_syms)
+               return update_iter_mod(iter, pos);
 
        /* If we're not on the desired position, reset to new position. */
        if (pos != iter->pos)
index fa77311dadb23b35d78cf9060daf1dc0cc72b855..cee9802cf3e00f0f5ef1625df14fa9d6892b3581 100644 (file)
@@ -76,8 +76,8 @@ static const struct bpf_func_proto bpf_probe_read_proto = {
        .func           = bpf_probe_read,
        .gpl_only       = true,
        .ret_type       = RET_INTEGER,
-       .arg1_type      = ARG_PTR_TO_RAW_STACK,
-       .arg2_type      = ARG_CONST_STACK_SIZE,
+       .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
+       .arg2_type      = ARG_CONST_SIZE,
        .arg3_type      = ARG_ANYTHING,
 };
 
@@ -109,8 +109,8 @@ static const struct bpf_func_proto bpf_probe_write_user_proto = {
        .gpl_only       = true,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_ANYTHING,
-       .arg2_type      = ARG_PTR_TO_STACK,
-       .arg3_type      = ARG_CONST_STACK_SIZE,
+       .arg2_type      = ARG_PTR_TO_MEM,
+       .arg3_type      = ARG_CONST_SIZE,
 };
 
 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
@@ -213,8 +213,8 @@ static const struct bpf_func_proto bpf_trace_printk_proto = {
        .func           = bpf_trace_printk,
        .gpl_only       = true,
        .ret_type       = RET_INTEGER,
-       .arg1_type      = ARG_PTR_TO_STACK,
-       .arg2_type      = ARG_CONST_STACK_SIZE,
+       .arg1_type      = ARG_PTR_TO_MEM,
+       .arg2_type      = ARG_CONST_SIZE,
 };
 
 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
@@ -329,8 +329,8 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
        .arg1_type      = ARG_PTR_TO_CTX,
        .arg2_type      = ARG_CONST_MAP_PTR,
        .arg3_type      = ARG_ANYTHING,
-       .arg4_type      = ARG_PTR_TO_STACK,
-       .arg5_type      = ARG_CONST_STACK_SIZE,
+       .arg4_type      = ARG_PTR_TO_MEM,
+       .arg5_type      = ARG_CONST_SIZE,
 };
 
 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
@@ -395,6 +395,36 @@ static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
        .arg2_type      = ARG_ANYTHING,
 };
 
+BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
+          const void *, unsafe_ptr)
+{
+       int ret;
+
+       /*
+        * The strncpy_from_unsafe() call will likely not fill the entire
+        * buffer, but that's okay in this circumstance as we're probing
+        * arbitrary memory anyway similar to bpf_probe_read() and might
+        * as well probe the stack. Thus, memory is explicitly cleared
+        * only in error case, so that improper users ignoring return
+        * code altogether don't copy garbage; otherwise length of string
+        * is returned that can be used for bpf_perf_event_output() et al.
+        */
+       ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
+       if (unlikely(ret < 0))
+               memset(dst, 0, size);
+
+       return ret;
+}
+
+static const struct bpf_func_proto bpf_probe_read_str_proto = {
+       .func           = bpf_probe_read_str,
+       .gpl_only       = true,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_UNINIT_MEM,
+       .arg2_type      = ARG_CONST_SIZE,
+       .arg3_type      = ARG_ANYTHING,
+};
+
 static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
 {
        switch (func_id) {
@@ -432,6 +462,8 @@ static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
                return &bpf_current_task_under_cgroup_proto;
        case BPF_FUNC_get_prandom_u32:
                return &bpf_get_prandom_u32_proto;
+       case BPF_FUNC_probe_read_str:
+               return &bpf_probe_read_str_proto;
        default:
                return NULL;
        }
@@ -459,6 +491,13 @@ static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type
                return false;
        if (off % size != 0)
                return false;
+       /*
+        * Assertion for 32 bit to make sure last 8 byte access
+        * (BPF_DW) to the last 4 byte member is disallowed.
+        */
+       if (off + size > sizeof(struct pt_regs))
+               return false;
+
        return true;
 }
 
@@ -467,7 +506,7 @@ static const struct bpf_verifier_ops kprobe_prog_ops = {
        .is_valid_access = kprobe_prog_is_valid_access,
 };
 
-static struct bpf_prog_type_list kprobe_tl = {
+static struct bpf_prog_type_list kprobe_tl __ro_after_init = {
        .ops    = &kprobe_prog_ops,
        .type   = BPF_PROG_TYPE_KPROBE,
 };
@@ -492,8 +531,8 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
        .arg1_type      = ARG_PTR_TO_CTX,
        .arg2_type      = ARG_CONST_MAP_PTR,
        .arg3_type      = ARG_ANYTHING,
-       .arg4_type      = ARG_PTR_TO_STACK,
-       .arg5_type      = ARG_CONST_STACK_SIZE,
+       .arg4_type      = ARG_PTR_TO_MEM,
+       .arg5_type      = ARG_CONST_SIZE,
 };
 
 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
@@ -540,6 +579,8 @@ static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type
                return false;
        if (off % size != 0)
                return false;
+
+       BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
        return true;
 }
 
@@ -548,7 +589,7 @@ static const struct bpf_verifier_ops tracepoint_prog_ops = {
        .is_valid_access = tp_prog_is_valid_access,
 };
 
-static struct bpf_prog_type_list tracepoint_tl = {
+static struct bpf_prog_type_list tracepoint_tl __ro_after_init = {
        .ops    = &tracepoint_prog_ops,
        .type   = BPF_PROG_TYPE_TRACEPOINT,
 };
@@ -572,28 +613,29 @@ static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type
        return true;
 }
 
-static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, int dst_reg,
-                                     int src_reg, int ctx_off,
+static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
+                                     const struct bpf_insn *si,
                                      struct bpf_insn *insn_buf,
                                      struct bpf_prog *prog)
 {
        struct bpf_insn *insn = insn_buf;
 
-       switch (ctx_off) {
+       switch (si->off) {
        case offsetof(struct bpf_perf_event_data, sample_period):
                BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data, period) != sizeof(u64));
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
-                                                      data), dst_reg, src_reg,
+                                                      data), si->dst_reg, si->src_reg,
                                      offsetof(struct bpf_perf_event_data_kern, data));
-               *insn++ = BPF_LDX_MEM(BPF_DW, dst_reg, dst_reg,
+               *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
                                      offsetof(struct perf_sample_data, period));
                break;
        default:
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
-                                                      regs), dst_reg, src_reg,
+                                                      regs), si->dst_reg, si->src_reg,
                                      offsetof(struct bpf_perf_event_data_kern, regs));
-               *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), dst_reg, dst_reg, ctx_off);
+               *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
+                                     si->off);
                break;
        }
 
@@ -606,7 +648,7 @@ static const struct bpf_verifier_ops perf_event_prog_ops = {
        .convert_ctx_access     = pe_prog_convert_ctx_access,
 };
 
-static struct bpf_prog_type_list perf_event_tl = {
+static struct bpf_prog_type_list perf_event_tl __ro_after_init = {
        .ops    = &perf_event_prog_ops,
        .type   = BPF_PROG_TYPE_PERF_EVENT,
 };
index 5d33a7352919853e3d586f20a84f8d12b3d88bbb..aea6a1218c7db6bc7eb307a5ed4630d0b2542640 100644 (file)
@@ -162,15 +162,27 @@ trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
 }
 EXPORT_SYMBOL_GPL(trace_print_bitmask_seq);
 
+/**
+ * trace_print_hex_seq - print buffer as hex sequence
+ * @p: trace seq struct to write to
+ * @buf: The buffer to print
+ * @buf_len: Length of @buf in bytes
+ * @concatenate: Print @buf as single hex string or with spacing
+ *
+ * Prints the passed buffer as a hex sequence either as a whole,
+ * single hex string if @concatenate is true or with spacing after
+ * each byte in case @concatenate is false.
+ */
 const char *
-trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
+trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len,
+                   bool concatenate)
 {
        int i;
        const char *ret = trace_seq_buffer_ptr(p);
 
        for (i = 0; i < buf_len; i++)
-               trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]);
-
+               trace_seq_printf(p, "%s%2.2x", concatenate || i == 0 ? "" : " ",
+                                buf[i]);
        trace_seq_putc(p, 0);
 
        return ret;
index 260a80e313b9022a711f77a8531feb5f864fa91c..5d644f180fe5b6218a2687abf5c2bf577b99fc67 100644 (file)
@@ -550,4 +550,7 @@ config STACKDEPOT
 config SBITMAP
        bool
 
+config PARMAN
+       tristate "parman"
+
 endmenu
index eb9e9a7870fa7bdb0f373f858037c1026880c85f..433a788500be93be3dde4d778b4f9b2bbd91d58e 100644 (file)
@@ -1819,13 +1819,23 @@ config TEST_HASH
        tristate "Perform selftest on hash functions"
        default n
        help
-         Enable this option to test the kernel's integer (<linux/hash,h>)
-         and string (<linux/stringhash.h>) hash functions on boot
-         (or module load).
+         Enable this option to test the kernel's integer (<linux/hash.h>),
+         string (<linux/stringhash.h>), and siphash (<linux/siphash.h>)
+         hash functions on boot (or module load).
 
          This is intended to help people writing architecture-specific
          optimized versions.  If unsure, say N.
 
+config TEST_PARMAN
+       tristate "Perform selftest on priority array manager"
+       default n
+       depends on PARMAN
+       help
+         Enable this option to test priority array manager on boot
+         (or module load).
+
+         If unsure, say N.
+
 endmenu # runtime tests
 
 config PROVIDE_OHCI1394_DMA_INIT
index bc4073a8cd08da8377053c54a09e61f753a3fc7b..1c039a4f60e54febbd01e35147e0d9036a02c44b 100644 (file)
@@ -22,7 +22,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         sha1.o chacha20.o md5.o irq_regs.o argv_split.o \
         flex_proportions.o ratelimit.o show_mem.o \
         is_single_threaded.o plist.o decompress.o kobject_uevent.o \
-        earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o win_minmax.o
+        earlycpio.o seq_buf.o siphash.o \
+        nmi_backtrace.o nodemask.o win_minmax.o
 
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
@@ -44,7 +45,7 @@ obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
 obj-y += kstrtox.o
 obj-$(CONFIG_TEST_BPF) += test_bpf.o
 obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
-obj-$(CONFIG_TEST_HASH) += test_hash.o
+obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
 obj-$(CONFIG_TEST_KASAN) += test_kasan.o
 obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
 obj-$(CONFIG_TEST_LKM) += test_module.o
@@ -55,6 +56,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
 obj-$(CONFIG_TEST_PRINTF) += test_printf.o
 obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
 obj-$(CONFIG_TEST_UUID) += test_uuid.o
+obj-$(CONFIG_TEST_PARMAN) += test_parman.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
@@ -229,3 +231,5 @@ obj-$(CONFIG_UBSAN) += ubsan.o
 UBSAN_SANITIZE_ubsan.o := n
 
 obj-$(CONFIG_SBITMAP) += sbitmap.o
+
+obj-$(CONFIG_PARMAN) += parman.o
diff --git a/lib/parman.c b/lib/parman.c
new file mode 100644 (file)
index 0000000..c6e42a8
--- /dev/null
@@ -0,0 +1,376 @@
+/*
+ * lib/parman.c - Manager for linear priority array areas
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/parman.h>
+
+struct parman_algo {
+       int (*item_add)(struct parman *parman, struct parman_prio *prio,
+                       struct parman_item *item);
+       void (*item_remove)(struct parman *parman, struct parman_prio *prio,
+                           struct parman_item *item);
+};
+
+struct parman {
+       const struct parman_ops *ops;
+       void *priv;
+       const struct parman_algo *algo;
+       unsigned long count;
+       unsigned long limit_count;
+       struct list_head prio_list;
+};
+
+static int parman_enlarge(struct parman *parman)
+{
+       unsigned long new_count = parman->limit_count +
+                                 parman->ops->resize_step;
+       int err;
+
+       err = parman->ops->resize(parman->priv, new_count);
+       if (err)
+               return err;
+       parman->limit_count = new_count;
+       return 0;
+}
+
+static int parman_shrink(struct parman *parman)
+{
+       unsigned long new_count = parman->limit_count -
+                                 parman->ops->resize_step;
+       int err;
+
+       if (new_count < parman->ops->base_count)
+               return 0;
+       err = parman->ops->resize(parman->priv, new_count);
+       if (err)
+               return err;
+       parman->limit_count = new_count;
+       return 0;
+}
+
+static bool parman_prio_used(struct parman_prio *prio)
+
+{
+       return !list_empty(&prio->item_list);
+}
+
+static struct parman_item *parman_prio_first_item(struct parman_prio *prio)
+{
+       return list_first_entry(&prio->item_list,
+                               typeof(struct parman_item), list);
+}
+
+static unsigned long parman_prio_first_index(struct parman_prio *prio)
+{
+       return parman_prio_first_item(prio)->index;
+}
+
+static struct parman_item *parman_prio_last_item(struct parman_prio *prio)
+{
+       return list_last_entry(&prio->item_list,
+                              typeof(struct parman_item), list);
+}
+
+static unsigned long parman_prio_last_index(struct parman_prio *prio)
+{
+       return parman_prio_last_item(prio)->index;
+}
+
+static unsigned long parman_lsort_new_index_find(struct parman *parman,
+                                                struct parman_prio *prio)
+{
+       list_for_each_entry_from_reverse(prio, &parman->prio_list, list) {
+               if (!parman_prio_used(prio))
+                       continue;
+               return parman_prio_last_index(prio) + 1;
+       }
+       return 0;
+}
+
+static void __parman_prio_move(struct parman *parman, struct parman_prio *prio,
+                              struct parman_item *item, unsigned long to_index,
+                              unsigned long count)
+{
+       parman->ops->move(parman->priv, item->index, to_index, count);
+}
+
+static void parman_prio_shift_down(struct parman *parman,
+                                  struct parman_prio *prio)
+{
+       struct parman_item *item;
+       unsigned long to_index;
+
+       if (!parman_prio_used(prio))
+               return;
+       item = parman_prio_first_item(prio);
+       to_index = parman_prio_last_index(prio) + 1;
+       __parman_prio_move(parman, prio, item, to_index, 1);
+       list_move_tail(&item->list, &prio->item_list);
+       item->index = to_index;
+}
+
+static void parman_prio_shift_up(struct parman *parman,
+                                struct parman_prio *prio)
+{
+       struct parman_item *item;
+       unsigned long to_index;
+
+       if (!parman_prio_used(prio))
+               return;
+       item = parman_prio_last_item(prio);
+       to_index = parman_prio_first_index(prio) - 1;
+       __parman_prio_move(parman, prio, item, to_index, 1);
+       list_move(&item->list, &prio->item_list);
+       item->index = to_index;
+}
+
+static void parman_prio_item_remove(struct parman *parman,
+                                   struct parman_prio *prio,
+                                   struct parman_item *item)
+{
+       struct parman_item *last_item;
+       unsigned long to_index;
+
+       last_item = parman_prio_last_item(prio);
+       if (last_item == item) {
+               list_del(&item->list);
+               return;
+       }
+       to_index = item->index;
+       __parman_prio_move(parman, prio, last_item, to_index, 1);
+       list_del(&last_item->list);
+       list_replace(&item->list, &last_item->list);
+       last_item->index = to_index;
+}
+
+static int parman_lsort_item_add(struct parman *parman,
+                                struct parman_prio *prio,
+                                struct parman_item *item)
+{
+       struct parman_prio *prio2;
+       unsigned long new_index;
+       int err;
+
+       if (parman->count + 1 > parman->limit_count) {
+               err = parman_enlarge(parman);
+               if (err)
+                       return err;
+       }
+
+       new_index = parman_lsort_new_index_find(parman, prio);
+       list_for_each_entry_reverse(prio2, &parman->prio_list, list) {
+               if (prio2 == prio)
+                       break;
+               parman_prio_shift_down(parman, prio2);
+       }
+       item->index = new_index;
+       list_add_tail(&item->list, &prio->item_list);
+       parman->count++;
+       return 0;
+}
+
+static void parman_lsort_item_remove(struct parman *parman,
+                                    struct parman_prio *prio,
+                                    struct parman_item *item)
+{
+       parman_prio_item_remove(parman, prio, item);
+       list_for_each_entry_continue(prio, &parman->prio_list, list)
+               parman_prio_shift_up(parman, prio);
+       parman->count--;
+       if (parman->limit_count - parman->count >= parman->ops->resize_step)
+               parman_shrink(parman);
+}
+
+static const struct parman_algo parman_lsort = {
+       .item_add       = parman_lsort_item_add,
+       .item_remove    = parman_lsort_item_remove,
+};
+
+static const struct parman_algo *parman_algos[] = {
+       &parman_lsort,
+};
+
+/**
+ * parman_create - creates a new parman instance
+ * @ops:       caller-specific callbacks
+ * @priv:      pointer to a private data passed to the ops
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Each parman instance manages an array area with chunks of entries
+ * with the same priority. Consider following example:
+ *
+ * item 1 with prio 10
+ * item 2 with prio 10
+ * item 3 with prio 10
+ * item 4 with prio 20
+ * item 5 with prio 20
+ * item 6 with prio 30
+ * item 7 with prio 30
+ * item 8 with prio 30
+ *
+ * In this example, there are 3 priority chunks. The order of the priorities
+ * matters, however the order of items within a single priority chunk does not
+ * matter. So the same array could be ordered as follows:
+ *
+ * item 2 with prio 10
+ * item 3 with prio 10
+ * item 1 with prio 10
+ * item 5 with prio 20
+ * item 4 with prio 20
+ * item 7 with prio 30
+ * item 8 with prio 30
+ * item 6 with prio 30
+ *
+ * The goal of parman is to maintain the priority ordering. The caller
+ * provides @ops with callbacks parman uses to move the items
+ * and resize the array area.
+ *
+ * Returns a pointer to newly created parman instance in case of success,
+ * otherwise it returns NULL.
+ */
+struct parman *parman_create(const struct parman_ops *ops, void *priv)
+{
+       struct parman *parman;
+
+       parman = kzalloc(sizeof(*parman), GFP_KERNEL);
+       if (!parman)
+               return NULL;
+       INIT_LIST_HEAD(&parman->prio_list);
+       parman->ops = ops;
+       parman->priv = priv;
+       parman->limit_count = ops->base_count;
+       parman->algo = parman_algos[ops->algo];
+       return parman;
+}
+EXPORT_SYMBOL(parman_create);
+
+/**
+ * parman_destroy - destroys existing parman instance
+ * @parman:    parman instance
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void parman_destroy(struct parman *parman)
+{
+       WARN_ON(!list_empty(&parman->prio_list));
+       kfree(parman);
+}
+EXPORT_SYMBOL(parman_destroy);
+
+/**
+ * parman_prio_init - initializes a parman priority chunk
+ * @parman:    parman instance
+ * @prio:      parman prio structure to be initialized
+ * @prority:   desired priority of the chunk
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Before caller could add an item with certain priority, he has to
+ * initialize a priority chunk for it using this function.
+ */
+void parman_prio_init(struct parman *parman, struct parman_prio *prio,
+                     unsigned long priority)
+{
+       struct parman_prio *prio2;
+       struct list_head *pos;
+
+       INIT_LIST_HEAD(&prio->item_list);
+       prio->priority = priority;
+
+       /* Position inside the list according to priority */
+       list_for_each(pos, &parman->prio_list) {
+               prio2 = list_entry(pos, typeof(*prio2), list);
+               if (prio2->priority > prio->priority)
+                       break;
+       }
+       list_add_tail(&prio->list, pos);
+}
+EXPORT_SYMBOL(parman_prio_init);
+
+/**
+ * parman_prio_fini - finalizes use of parman priority chunk
+ * @prio:      parman prio structure
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void parman_prio_fini(struct parman_prio *prio)
+{
+       WARN_ON(parman_prio_used(prio));
+       list_del(&prio->list);
+}
+EXPORT_SYMBOL(parman_prio_fini);
+
+/**
+ * parman_item_add - adds a parman item under defined priority
+ * @parman:    parman instance
+ * @prio:      parman prio instance to add the item to
+ * @item:      parman item instance
+ *
+ * Note: all locking must be provided by the caller.
+ *
+ * Adds item to a array managed by parman instance under the specified priority.
+ *
+ * Returns 0 in case of success, negative number to indicate an error.
+ */
+int parman_item_add(struct parman *parman, struct parman_prio *prio,
+                   struct parman_item *item)
+{
+       return parman->algo->item_add(parman, prio, item);
+}
+EXPORT_SYMBOL(parman_item_add);
+
+/**
+ * parman_item_del - deletes parman item
+ * @parman:    parman instance
+ * @prio:      parman prio instance to delete the item from
+ * @item:      parman item instance
+ *
+ * Note: all locking must be provided by the caller.
+ */
+void parman_item_remove(struct parman *parman, struct parman_prio *prio,
+                       struct parman_item *item)
+{
+       parman->algo->item_remove(parman, prio, item);
+}
+EXPORT_SYMBOL(parman_item_remove);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Priority-based array manager");
index 32d0ad0583806c98df3380001a107dad053f0c66..172454e6b979e935ef0d184953b6c35b06c32592 100644 (file)
 #define HASH_MIN_SIZE          4U
 #define BUCKET_LOCKS_PER_CPU   32UL
 
+union nested_table {
+       union nested_table __rcu *table;
+       struct rhash_head __rcu *bucket;
+};
+
 static u32 head_hashfn(struct rhashtable *ht,
                       const struct bucket_table *tbl,
                       const struct rhash_head *he)
@@ -76,6 +81,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
        /* Never allocate more than 0.5 locks per bucket */
        size = min_t(unsigned int, size, tbl->size >> 1);
 
+       if (tbl->nest)
+               size = min(size, 1U << tbl->nest);
+
        if (sizeof(spinlock_t) != 0) {
                tbl->locks = NULL;
 #ifdef CONFIG_NUMA
@@ -99,8 +107,45 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
        return 0;
 }
 
+static void nested_table_free(union nested_table *ntbl, unsigned int size)
+{
+       const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
+       const unsigned int len = 1 << shift;
+       unsigned int i;
+
+       ntbl = rcu_dereference_raw(ntbl->table);
+       if (!ntbl)
+               return;
+
+       if (size > len) {
+               size >>= shift;
+               for (i = 0; i < len; i++)
+                       nested_table_free(ntbl + i, size);
+       }
+
+       kfree(ntbl);
+}
+
+static void nested_bucket_table_free(const struct bucket_table *tbl)
+{
+       unsigned int size = tbl->size >> tbl->nest;
+       unsigned int len = 1 << tbl->nest;
+       union nested_table *ntbl;
+       unsigned int i;
+
+       ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+
+       for (i = 0; i < len; i++)
+               nested_table_free(ntbl + i, size);
+
+       kfree(ntbl);
+}
+
 static void bucket_table_free(const struct bucket_table *tbl)
 {
+       if (tbl->nest)
+               nested_bucket_table_free(tbl);
+
        if (tbl)
                kvfree(tbl->locks);
 
@@ -112,6 +157,59 @@ static void bucket_table_free_rcu(struct rcu_head *head)
        bucket_table_free(container_of(head, struct bucket_table, rcu));
 }
 
+static union nested_table *nested_table_alloc(struct rhashtable *ht,
+                                             union nested_table __rcu **prev,
+                                             unsigned int shifted,
+                                             unsigned int nhash)
+{
+       union nested_table *ntbl;
+       int i;
+
+       ntbl = rcu_dereference(*prev);
+       if (ntbl)
+               return ntbl;
+
+       ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+
+       if (ntbl && shifted) {
+               for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
+                       INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
+                                           (i << shifted) | nhash);
+       }
+
+       rcu_assign_pointer(*prev, ntbl);
+
+       return ntbl;
+}
+
+static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
+                                                     size_t nbuckets,
+                                                     gfp_t gfp)
+{
+       const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
+       struct bucket_table *tbl;
+       size_t size;
+
+       if (nbuckets < (1 << (shift + 1)))
+               return NULL;
+
+       size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
+
+       tbl = kzalloc(size, gfp);
+       if (!tbl)
+               return NULL;
+
+       if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
+                               0, 0)) {
+               kfree(tbl);
+               return NULL;
+       }
+
+       tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
+
+       return tbl;
+}
+
 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
                                               size_t nbuckets,
                                               gfp_t gfp)
@@ -126,10 +224,17 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
                tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
        if (tbl == NULL && gfp == GFP_KERNEL)
                tbl = vzalloc(size);
+
+       size = nbuckets;
+
+       if (tbl == NULL && gfp != GFP_KERNEL) {
+               tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
+               nbuckets = 0;
+       }
        if (tbl == NULL)
                return NULL;
 
-       tbl->size = nbuckets;
+       tbl->size = size;
 
        if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
                bucket_table_free(tbl);
@@ -164,12 +269,17 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
        struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
        struct bucket_table *new_tbl = rhashtable_last_table(ht,
                rht_dereference_rcu(old_tbl->future_tbl, ht));
-       struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
-       int err = -ENOENT;
+       struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
+       int err = -EAGAIN;
        struct rhash_head *head, *next, *entry;
        spinlock_t *new_bucket_lock;
        unsigned int new_hash;
 
+       if (new_tbl->nest)
+               goto out;
+
+       err = -ENOENT;
+
        rht_for_each(entry, old_tbl, old_hash) {
                err = 0;
                next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
@@ -202,19 +312,26 @@ out:
        return err;
 }
 
-static void rhashtable_rehash_chain(struct rhashtable *ht,
+static int rhashtable_rehash_chain(struct rhashtable *ht,
                                    unsigned int old_hash)
 {
        struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
        spinlock_t *old_bucket_lock;
+       int err;
 
        old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
 
        spin_lock_bh(old_bucket_lock);
-       while (!rhashtable_rehash_one(ht, old_hash))
+       while (!(err = rhashtable_rehash_one(ht, old_hash)))
                ;
-       old_tbl->rehash++;
+
+       if (err == -ENOENT) {
+               old_tbl->rehash++;
+               err = 0;
+       }
        spin_unlock_bh(old_bucket_lock);
+
+       return err;
 }
 
 static int rhashtable_rehash_attach(struct rhashtable *ht,
@@ -246,13 +363,17 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
        struct bucket_table *new_tbl;
        struct rhashtable_walker *walker;
        unsigned int old_hash;
+       int err;
 
        new_tbl = rht_dereference(old_tbl->future_tbl, ht);
        if (!new_tbl)
                return 0;
 
-       for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
-               rhashtable_rehash_chain(ht, old_hash);
+       for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
+               err = rhashtable_rehash_chain(ht, old_hash);
+               if (err)
+                       return err;
+       }
 
        /* Publish the new table pointer. */
        rcu_assign_pointer(ht->tbl, new_tbl);
@@ -271,31 +392,16 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
        return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
 }
 
-/**
- * rhashtable_expand - Expand hash table while allowing concurrent lookups
- * @ht:                the hash table to expand
- *
- * A secondary bucket array is allocated and the hash entries are migrated.
- *
- * This function may only be called in a context where it is safe to call
- * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
- *
- * The caller must ensure that no concurrent resizing occurs by holding
- * ht->mutex.
- *
- * It is valid to have concurrent insertions and deletions protected by per
- * bucket locks or concurrent RCU protected lookups and traversals.
- */
-static int rhashtable_expand(struct rhashtable *ht)
+static int rhashtable_rehash_alloc(struct rhashtable *ht,
+                                  struct bucket_table *old_tbl,
+                                  unsigned int size)
 {
-       struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
+       struct bucket_table *new_tbl;
        int err;
 
        ASSERT_RHT_MUTEX(ht);
 
-       old_tbl = rhashtable_last_table(ht, old_tbl);
-
-       new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
+       new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
        if (new_tbl == NULL)
                return -ENOMEM;
 
@@ -324,12 +430,9 @@ static int rhashtable_expand(struct rhashtable *ht)
  */
 static int rhashtable_shrink(struct rhashtable *ht)
 {
-       struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
+       struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
        unsigned int nelems = atomic_read(&ht->nelems);
        unsigned int size = 0;
-       int err;
-
-       ASSERT_RHT_MUTEX(ht);
 
        if (nelems)
                size = roundup_pow_of_two(nelems * 3 / 2);
@@ -342,15 +445,7 @@ static int rhashtable_shrink(struct rhashtable *ht)
        if (rht_dereference(old_tbl->future_tbl, ht))
                return -EEXIST;
 
-       new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
-       if (new_tbl == NULL)
-               return -ENOMEM;
-
-       err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
-       if (err)
-               bucket_table_free(new_tbl);
-
-       return err;
+       return rhashtable_rehash_alloc(ht, old_tbl, size);
 }
 
 static void rht_deferred_worker(struct work_struct *work)
@@ -366,11 +461,14 @@ static void rht_deferred_worker(struct work_struct *work)
        tbl = rhashtable_last_table(ht, tbl);
 
        if (rht_grow_above_75(ht, tbl))
-               rhashtable_expand(ht);
+               err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
        else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
-               rhashtable_shrink(ht);
+               err = rhashtable_shrink(ht);
+       else if (tbl->nest)
+               err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
 
-       err = rhashtable_rehash_table(ht);
+       if (!err)
+               err = rhashtable_rehash_table(ht);
 
        mutex_unlock(&ht->mutex);
 
@@ -439,8 +537,8 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
        int elasticity;
 
        elasticity = ht->elasticity;
-       pprev = &tbl->buckets[hash];
-       rht_for_each(head, tbl, hash) {
+       pprev = rht_bucket_var(tbl, hash);
+       rht_for_each_continue(head, *pprev, tbl, hash) {
                struct rhlist_head *list;
                struct rhlist_head *plist;
 
@@ -477,6 +575,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
                                                  struct rhash_head *obj,
                                                  void *data)
 {
+       struct rhash_head __rcu **pprev;
        struct bucket_table *new_tbl;
        struct rhash_head *head;
 
@@ -499,7 +598,11 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
        if (unlikely(rht_grow_above_100(ht, tbl)))
                return ERR_PTR(-EAGAIN);
 
-       head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
+       pprev = rht_bucket_insert(ht, tbl, hash);
+       if (!pprev)
+               return ERR_PTR(-ENOMEM);
+
+       head = rht_dereference_bucket(*pprev, tbl, hash);
 
        RCU_INIT_POINTER(obj->next, head);
        if (ht->rhlist) {
@@ -509,7 +612,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
                RCU_INIT_POINTER(list->next, NULL);
        }
 
-       rcu_assign_pointer(tbl->buckets[hash], obj);
+       rcu_assign_pointer(*pprev, obj);
 
        atomic_inc(&ht->nelems);
        if (rht_grow_above_75(ht, tbl))
@@ -975,7 +1078,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
                                 void (*free_fn)(void *ptr, void *arg),
                                 void *arg)
 {
-       const struct bucket_table *tbl;
+       struct bucket_table *tbl;
        unsigned int i;
 
        cancel_work_sync(&ht->run_work);
@@ -986,7 +1089,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
                for (i = 0; i < tbl->size; i++) {
                        struct rhash_head *pos, *next;
 
-                       for (pos = rht_dereference(tbl->buckets[i], ht),
+                       for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
                             next = !rht_is_a_nulls(pos) ?
                                        rht_dereference(pos->next, ht) : NULL;
                             !rht_is_a_nulls(pos);
@@ -1007,3 +1110,70 @@ void rhashtable_destroy(struct rhashtable *ht)
        return rhashtable_free_and_destroy(ht, NULL, NULL);
 }
 EXPORT_SYMBOL_GPL(rhashtable_destroy);
+
+struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
+                                           unsigned int hash)
+{
+       const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
+       static struct rhash_head __rcu *rhnull =
+               (struct rhash_head __rcu *)NULLS_MARKER(0);
+       unsigned int index = hash & ((1 << tbl->nest) - 1);
+       unsigned int size = tbl->size >> tbl->nest;
+       unsigned int subhash = hash;
+       union nested_table *ntbl;
+
+       ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+       ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash);
+       subhash >>= tbl->nest;
+
+       while (ntbl && size > (1 << shift)) {
+               index = subhash & ((1 << shift) - 1);
+               ntbl = rht_dereference_bucket(ntbl[index].table, tbl, hash);
+               size >>= shift;
+               subhash >>= shift;
+       }
+
+       if (!ntbl)
+               return &rhnull;
+
+       return &ntbl[subhash].bucket;
+
+}
+EXPORT_SYMBOL_GPL(rht_bucket_nested);
+
+struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
+                                                  struct bucket_table *tbl,
+                                                  unsigned int hash)
+{
+       const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
+       unsigned int index = hash & ((1 << tbl->nest) - 1);
+       unsigned int size = tbl->size >> tbl->nest;
+       union nested_table *ntbl;
+       unsigned int shifted;
+       unsigned int nhash;
+
+       ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
+       hash >>= tbl->nest;
+       nhash = index;
+       shifted = tbl->nest;
+       ntbl = nested_table_alloc(ht, &ntbl[index].table,
+                                 size <= (1 << shift) ? shifted : 0, nhash);
+
+       while (ntbl && size > (1 << shift)) {
+               index = hash & ((1 << shift) - 1);
+               size >>= shift;
+               hash >>= shift;
+               nhash |= index << shifted;
+               shifted += shift;
+               ntbl = nested_table_alloc(ht, &ntbl[index].table,
+                                         size <= (1 << shift) ? shifted : 0,
+                                         nhash);
+       }
+
+       if (!ntbl)
+               return NULL;
+
+       return &ntbl[hash].bucket;
+
+}
+EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
diff --git a/lib/siphash.c b/lib/siphash.c
new file mode 100644 (file)
index 0000000..3ae58b4
--- /dev/null
@@ -0,0 +1,551 @@
+/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#include <linux/siphash.h>
+#include <asm/unaligned.h>
+
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+#include <linux/dcache.h>
+#include <asm/word-at-a-time.h>
+#endif
+
+#define SIPROUND \
+       do { \
+       v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
+       v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
+       v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
+       v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
+       } while (0)
+
+#define PREAMBLE(len) \
+       u64 v0 = 0x736f6d6570736575ULL; \
+       u64 v1 = 0x646f72616e646f6dULL; \
+       u64 v2 = 0x6c7967656e657261ULL; \
+       u64 v3 = 0x7465646279746573ULL; \
+       u64 b = ((u64)(len)) << 56; \
+       v3 ^= key->key[1]; \
+       v2 ^= key->key[0]; \
+       v1 ^= key->key[1]; \
+       v0 ^= key->key[0];
+
+#define POSTAMBLE \
+       v3 ^= b; \
+       SIPROUND; \
+       SIPROUND; \
+       v0 ^= b; \
+       v2 ^= 0xff; \
+       SIPROUND; \
+       SIPROUND; \
+       SIPROUND; \
+       SIPROUND; \
+       return (v0 ^ v1) ^ (v2 ^ v3);
+
+u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
+{
+       const u8 *end = data + len - (len % sizeof(u64));
+       const u8 left = len & (sizeof(u64) - 1);
+       u64 m;
+       PREAMBLE(len)
+       for (; data != end; data += sizeof(u64)) {
+               m = le64_to_cpup(data);
+               v3 ^= m;
+               SIPROUND;
+               SIPROUND;
+               v0 ^= m;
+       }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+       if (left)
+               b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+                                                 bytemask_from_count(left)));
+#else
+       switch (left) {
+       case 7: b |= ((u64)end[6]) << 48;
+       case 6: b |= ((u64)end[5]) << 40;
+       case 5: b |= ((u64)end[4]) << 32;
+       case 4: b |= le32_to_cpup(data); break;
+       case 3: b |= ((u64)end[2]) << 16;
+       case 2: b |= le16_to_cpup(data); break;
+       case 1: b |= end[0];
+       }
+#endif
+       POSTAMBLE
+}
+EXPORT_SYMBOL(__siphash_aligned);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
+{
+       const u8 *end = data + len - (len % sizeof(u64));
+       const u8 left = len & (sizeof(u64) - 1);
+       u64 m;
+       PREAMBLE(len)
+       for (; data != end; data += sizeof(u64)) {
+               m = get_unaligned_le64(data);
+               v3 ^= m;
+               SIPROUND;
+               SIPROUND;
+               v0 ^= m;
+       }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+       if (left)
+               b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+                                                 bytemask_from_count(left)));
+#else
+       switch (left) {
+       case 7: b |= ((u64)end[6]) << 48;
+       case 6: b |= ((u64)end[5]) << 40;
+       case 5: b |= ((u64)end[4]) << 32;
+       case 4: b |= get_unaligned_le32(end); break;
+       case 3: b |= ((u64)end[2]) << 16;
+       case 2: b |= get_unaligned_le16(end); break;
+       case 1: b |= end[0];
+       }
+#endif
+       POSTAMBLE
+}
+EXPORT_SYMBOL(__siphash_unaligned);
+#endif
+
+/**
+ * siphash_1u64 - compute 64-bit siphash PRF value of a u64
+ * @first: first u64
+ * @key: the siphash key
+ */
+u64 siphash_1u64(const u64 first, const siphash_key_t *key)
+{
+       PREAMBLE(8)
+       v3 ^= first;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= first;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_1u64);
+
+/**
+ * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64
+ * @first: first u64
+ * @second: second u64
+ * @key: the siphash key
+ */
+u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key)
+{
+       PREAMBLE(16)
+       v3 ^= first;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= second;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_2u64);
+
+/**
+ * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64
+ * @first: first u64
+ * @second: second u64
+ * @third: third u64
+ * @key: the siphash key
+ */
+u64 siphash_3u64(const u64 first, const u64 second, const u64 third,
+                const siphash_key_t *key)
+{
+       PREAMBLE(24)
+       v3 ^= first;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= second;
+       v3 ^= third;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= third;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_3u64);
+
+/**
+ * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64
+ * @first: first u64
+ * @second: second u64
+ * @third: third u64
+ * @forth: forth u64
+ * @key: the siphash key
+ */
+u64 siphash_4u64(const u64 first, const u64 second, const u64 third,
+                const u64 forth, const siphash_key_t *key)
+{
+       PREAMBLE(32)
+       v3 ^= first;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= second;
+       v3 ^= third;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= third;
+       v3 ^= forth;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= forth;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_4u64);
+
+u64 siphash_1u32(const u32 first, const siphash_key_t *key)
+{
+       PREAMBLE(4)
+       b |= first;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_1u32);
+
+u64 siphash_3u32(const u32 first, const u32 second, const u32 third,
+                const siphash_key_t *key)
+{
+       u64 combined = (u64)second << 32 | first;
+       PREAMBLE(12)
+       v3 ^= combined;
+       SIPROUND;
+       SIPROUND;
+       v0 ^= combined;
+       b |= third;
+       POSTAMBLE
+}
+EXPORT_SYMBOL(siphash_3u32);
+
+#if BITS_PER_LONG == 64
+/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for
+ * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3.
+ */
+
+#define HSIPROUND SIPROUND
+#define HPREAMBLE(len) PREAMBLE(len)
+#define HPOSTAMBLE \
+       v3 ^= b; \
+       HSIPROUND; \
+       v0 ^= b; \
+       v2 ^= 0xff; \
+       HSIPROUND; \
+       HSIPROUND; \
+       HSIPROUND; \
+       return (v0 ^ v1) ^ (v2 ^ v3);
+
+u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
+{
+       const u8 *end = data + len - (len % sizeof(u64));
+       const u8 left = len & (sizeof(u64) - 1);
+       u64 m;
+       HPREAMBLE(len)
+       for (; data != end; data += sizeof(u64)) {
+               m = le64_to_cpup(data);
+               v3 ^= m;
+               HSIPROUND;
+               v0 ^= m;
+       }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+       if (left)
+               b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+                                                 bytemask_from_count(left)));
+#else
+       switch (left) {
+       case 7: b |= ((u64)end[6]) << 48;
+       case 6: b |= ((u64)end[5]) << 40;
+       case 5: b |= ((u64)end[4]) << 32;
+       case 4: b |= le32_to_cpup(data); break;
+       case 3: b |= ((u64)end[2]) << 16;
+       case 2: b |= le16_to_cpup(data); break;
+       case 1: b |= end[0];
+       }
+#endif
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_aligned);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+                        const hsiphash_key_t *key)
+{
+       const u8 *end = data + len - (len % sizeof(u64));
+       const u8 left = len & (sizeof(u64) - 1);
+       u64 m;
+       HPREAMBLE(len)
+       for (; data != end; data += sizeof(u64)) {
+               m = get_unaligned_le64(data);
+               v3 ^= m;
+               HSIPROUND;
+               v0 ^= m;
+       }
+#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
+       if (left)
+               b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
+                                                 bytemask_from_count(left)));
+#else
+       switch (left) {
+       case 7: b |= ((u64)end[6]) << 48;
+       case 6: b |= ((u64)end[5]) << 40;
+       case 5: b |= ((u64)end[4]) << 32;
+       case 4: b |= get_unaligned_le32(end); break;
+       case 3: b |= ((u64)end[2]) << 16;
+       case 2: b |= get_unaligned_le16(end); break;
+       case 1: b |= end[0];
+       }
+#endif
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_unaligned);
+#endif
+
+/**
+ * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
+ * @first: first u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
+{
+       HPREAMBLE(4)
+       b |= first;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_1u32);
+
+/**
+ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
+ * @first: first u32
+ * @second: second u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
+{
+       u64 combined = (u64)second << 32 | first;
+       HPREAMBLE(8)
+       v3 ^= combined;
+       HSIPROUND;
+       v0 ^= combined;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_2u32);
+
+/**
+ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
+                 const hsiphash_key_t *key)
+{
+       u64 combined = (u64)second << 32 | first;
+       HPREAMBLE(12)
+       v3 ^= combined;
+       HSIPROUND;
+       v0 ^= combined;
+       b |= third;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_3u32);
+
+/**
+ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @forth: forth u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
+                 const u32 forth, const hsiphash_key_t *key)
+{
+       u64 combined = (u64)second << 32 | first;
+       HPREAMBLE(16)
+       v3 ^= combined;
+       HSIPROUND;
+       v0 ^= combined;
+       combined = (u64)forth << 32 | third;
+       v3 ^= combined;
+       HSIPROUND;
+       v0 ^= combined;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_4u32);
+#else
+#define HSIPROUND \
+       do { \
+       v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \
+       v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \
+       v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \
+       v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \
+       } while (0)
+
+#define HPREAMBLE(len) \
+       u32 v0 = 0; \
+       u32 v1 = 0; \
+       u32 v2 = 0x6c796765U; \
+       u32 v3 = 0x74656462U; \
+       u32 b = ((u32)(len)) << 24; \
+       v3 ^= key->key[1]; \
+       v2 ^= key->key[0]; \
+       v1 ^= key->key[1]; \
+       v0 ^= key->key[0];
+
+#define HPOSTAMBLE \
+       v3 ^= b; \
+       HSIPROUND; \
+       v0 ^= b; \
+       v2 ^= 0xff; \
+       HSIPROUND; \
+       HSIPROUND; \
+       HSIPROUND; \
+       return v1 ^ v3;
+
+u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
+{
+       const u8 *end = data + len - (len % sizeof(u32));
+       const u8 left = len & (sizeof(u32) - 1);
+       u32 m;
+       HPREAMBLE(len)
+       for (; data != end; data += sizeof(u32)) {
+               m = le32_to_cpup(data);
+               v3 ^= m;
+               HSIPROUND;
+               v0 ^= m;
+       }
+       switch (left) {
+       case 3: b |= ((u32)end[2]) << 16;
+       case 2: b |= le16_to_cpup(data); break;
+       case 1: b |= end[0];
+       }
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_aligned);
+
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+u32 __hsiphash_unaligned(const void *data, size_t len,
+                        const hsiphash_key_t *key)
+{
+       const u8 *end = data + len - (len % sizeof(u32));
+       const u8 left = len & (sizeof(u32) - 1);
+       u32 m;
+       HPREAMBLE(len)
+       for (; data != end; data += sizeof(u32)) {
+               m = get_unaligned_le32(data);
+               v3 ^= m;
+               HSIPROUND;
+               v0 ^= m;
+       }
+       switch (left) {
+       case 3: b |= ((u32)end[2]) << 16;
+       case 2: b |= get_unaligned_le16(end); break;
+       case 1: b |= end[0];
+       }
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(__hsiphash_unaligned);
+#endif
+
+/**
+ * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
+ * @first: first u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
+{
+       HPREAMBLE(4)
+       v3 ^= first;
+       HSIPROUND;
+       v0 ^= first;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_1u32);
+
+/**
+ * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
+ * @first: first u32
+ * @second: second u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
+{
+       HPREAMBLE(8)
+       v3 ^= first;
+       HSIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       HSIPROUND;
+       v0 ^= second;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_2u32);
+
+/**
+ * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
+                 const hsiphash_key_t *key)
+{
+       HPREAMBLE(12)
+       v3 ^= first;
+       HSIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       HSIPROUND;
+       v0 ^= second;
+       v3 ^= third;
+       HSIPROUND;
+       v0 ^= third;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_3u32);
+
+/**
+ * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
+ * @first: first u32
+ * @second: second u32
+ * @third: third u32
+ * @forth: forth u32
+ * @key: the hsiphash key
+ */
+u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
+                 const u32 forth, const hsiphash_key_t *key)
+{
+       HPREAMBLE(16)
+       v3 ^= first;
+       HSIPROUND;
+       v0 ^= first;
+       v3 ^= second;
+       HSIPROUND;
+       v0 ^= second;
+       v3 ^= third;
+       HSIPROUND;
+       v0 ^= third;
+       v3 ^= forth;
+       HSIPROUND;
+       v0 ^= forth;
+       HPOSTAMBLE
+}
+EXPORT_SYMBOL(hsiphash_4u32);
+#endif
diff --git a/lib/test_parman.c b/lib/test_parman.c
new file mode 100644 (file)
index 0000000..fe9f3a7
--- /dev/null
@@ -0,0 +1,395 @@
+/*
+ * lib/test_parman.c - Test module for parman
+ * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/random.h>
+#include <linux/parman.h>
+
+#define TEST_PARMAN_PRIO_SHIFT 7 /* defines number of prios for testing */
+#define TEST_PARMAN_PRIO_COUNT BIT(TEST_PARMAN_PRIO_SHIFT)
+#define TEST_PARMAN_PRIO_MASK (TEST_PARMAN_PRIO_COUNT - 1)
+
+#define TEST_PARMAN_ITEM_SHIFT 13 /* defines a total number
+                                  * of items for testing
+                                  */
+#define TEST_PARMAN_ITEM_COUNT BIT(TEST_PARMAN_ITEM_SHIFT)
+#define TEST_PARMAN_ITEM_MASK (TEST_PARMAN_ITEM_COUNT - 1)
+
+#define TEST_PARMAN_BASE_SHIFT 8
+#define TEST_PARMAN_BASE_COUNT BIT(TEST_PARMAN_BASE_SHIFT)
+#define TEST_PARMAN_RESIZE_STEP_SHIFT 7
+#define TEST_PARMAN_RESIZE_STEP_COUNT BIT(TEST_PARMAN_RESIZE_STEP_SHIFT)
+
+#define TEST_PARMAN_BULK_MAX_SHIFT (2 + TEST_PARMAN_RESIZE_STEP_SHIFT)
+#define TEST_PARMAN_BULK_MAX_COUNT BIT(TEST_PARMAN_BULK_MAX_SHIFT)
+#define TEST_PARMAN_BULK_MAX_MASK (TEST_PARMAN_BULK_MAX_COUNT - 1)
+
+#define TEST_PARMAN_RUN_BUDGET (TEST_PARMAN_ITEM_COUNT * 256)
+
+struct test_parman_prio {
+       struct parman_prio parman_prio;
+       unsigned long priority;
+};
+
+struct test_parman_item {
+       struct parman_item parman_item;
+       struct test_parman_prio *prio;
+       bool used;
+};
+
+struct test_parman {
+       struct parman *parman;
+       struct test_parman_item **prio_array;
+       unsigned long prio_array_limit;
+       struct test_parman_prio prios[TEST_PARMAN_PRIO_COUNT];
+       struct test_parman_item items[TEST_PARMAN_ITEM_COUNT];
+       struct rnd_state rnd;
+       unsigned long run_budget;
+       unsigned long bulk_budget;
+       bool bulk_noop;
+       unsigned int used_items;
+};
+
+#define ITEM_PTRS_SIZE(count) (sizeof(struct test_parman_item *) * (count))
+
+static int test_parman_resize(void *priv, unsigned long new_count)
+{
+       struct test_parman *test_parman = priv;
+       struct test_parman_item **prio_array;
+       unsigned long old_count;
+
+       prio_array = krealloc(test_parman->prio_array,
+                             ITEM_PTRS_SIZE(new_count), GFP_KERNEL);
+       if (new_count == 0)
+               return 0;
+       if (!prio_array)
+               return -ENOMEM;
+       old_count = test_parman->prio_array_limit;
+       if (new_count > old_count)
+               memset(&prio_array[old_count], 0,
+                      ITEM_PTRS_SIZE(new_count - old_count));
+       test_parman->prio_array = prio_array;
+       test_parman->prio_array_limit = new_count;
+       return 0;
+}
+
+static void test_parman_move(void *priv, unsigned long from_index,
+                            unsigned long to_index, unsigned long count)
+{
+       struct test_parman *test_parman = priv;
+       struct test_parman_item **prio_array = test_parman->prio_array;
+
+       memmove(&prio_array[to_index], &prio_array[from_index],
+               ITEM_PTRS_SIZE(count));
+       memset(&prio_array[from_index], 0, ITEM_PTRS_SIZE(count));
+}
+
+static const struct parman_ops test_parman_lsort_ops = {
+       .base_count     = TEST_PARMAN_BASE_COUNT,
+       .resize_step    = TEST_PARMAN_RESIZE_STEP_COUNT,
+       .resize         = test_parman_resize,
+       .move           = test_parman_move,
+       .algo           = PARMAN_ALGO_TYPE_LSORT,
+};
+
+static void test_parman_rnd_init(struct test_parman *test_parman)
+{
+       prandom_seed_state(&test_parman->rnd, 3141592653589793238ULL);
+}
+
+static u32 test_parman_rnd_get(struct test_parman *test_parman)
+{
+       return prandom_u32_state(&test_parman->rnd);
+}
+
+static unsigned long test_parman_priority_gen(struct test_parman *test_parman)
+{
+       unsigned long priority;
+       int i;
+
+again:
+       priority = test_parman_rnd_get(test_parman);
+       if (priority == 0)
+               goto again;
+
+       for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) {
+               struct test_parman_prio *prio = &test_parman->prios[i];
+
+               if (prio->priority == 0)
+                       break;
+               if (prio->priority == priority)
+                       goto again;
+       }
+       return priority;
+}
+
+static void test_parman_prios_init(struct test_parman *test_parman)
+{
+       int i;
+
+       for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) {
+               struct test_parman_prio *prio = &test_parman->prios[i];
+
+               /* Assign random uniqueue priority to each prio structure */
+               prio->priority = test_parman_priority_gen(test_parman);
+               parman_prio_init(test_parman->parman, &prio->parman_prio,
+                                prio->priority);
+       }
+}
+
+static void test_parman_prios_fini(struct test_parman *test_parman)
+{
+       int i;
+
+       for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) {
+               struct test_parman_prio *prio = &test_parman->prios[i];
+
+               parman_prio_fini(&prio->parman_prio);
+       }
+}
+
+static void test_parman_items_init(struct test_parman *test_parman)
+{
+       int i;
+
+       for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) {
+               struct test_parman_item *item = &test_parman->items[i];
+               unsigned int prio_index = test_parman_rnd_get(test_parman) &
+                                         TEST_PARMAN_PRIO_MASK;
+
+               /* Assign random prio to each item structure */
+               item->prio = &test_parman->prios[prio_index];
+       }
+}
+
+static void test_parman_items_fini(struct test_parman *test_parman)
+{
+       int i;
+
+       for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) {
+               struct test_parman_item *item = &test_parman->items[i];
+
+               if (!item->used)
+                       continue;
+               parman_item_remove(test_parman->parman,
+                                  &item->prio->parman_prio,
+                                  &item->parman_item);
+       }
+}
+
+static struct test_parman *test_parman_create(const struct parman_ops *ops)
+{
+       struct test_parman *test_parman;
+       int err;
+
+       test_parman = kzalloc(sizeof(*test_parman), GFP_KERNEL);
+       if (!test_parman)
+               return ERR_PTR(-ENOMEM);
+       err = test_parman_resize(test_parman, TEST_PARMAN_BASE_COUNT);
+       if (err)
+               goto err_resize;
+       test_parman->parman = parman_create(ops, test_parman);
+       if (!test_parman->parman) {
+               err = -ENOMEM;
+               goto err_parman_create;
+       }
+       test_parman_rnd_init(test_parman);
+       test_parman_prios_init(test_parman);
+       test_parman_items_init(test_parman);
+       test_parman->run_budget = TEST_PARMAN_RUN_BUDGET;
+       return test_parman;
+
+err_parman_create:
+       test_parman_resize(test_parman, 0);
+err_resize:
+       kfree(test_parman);
+       return ERR_PTR(err);
+}
+
+static void test_parman_destroy(struct test_parman *test_parman)
+{
+       test_parman_items_fini(test_parman);
+       test_parman_prios_fini(test_parman);
+       parman_destroy(test_parman->parman);
+       test_parman_resize(test_parman, 0);
+       kfree(test_parman);
+}
+
+static bool test_parman_run_check_budgets(struct test_parman *test_parman)
+{
+       if (test_parman->run_budget-- == 0)
+               return false;
+       if (test_parman->bulk_budget-- != 0)
+               return true;
+
+       test_parman->bulk_budget = test_parman_rnd_get(test_parman) &
+                                  TEST_PARMAN_BULK_MAX_MASK;
+       test_parman->bulk_noop = test_parman_rnd_get(test_parman) & 1;
+       return true;
+}
+
+static int test_parman_run(struct test_parman *test_parman)
+{
+       unsigned int i = test_parman_rnd_get(test_parman);
+       int err;
+
+       while (test_parman_run_check_budgets(test_parman)) {
+               unsigned int item_index = i++ & TEST_PARMAN_ITEM_MASK;
+               struct test_parman_item *item = &test_parman->items[item_index];
+
+               if (test_parman->bulk_noop)
+                       continue;
+
+               if (!item->used) {
+                       err = parman_item_add(test_parman->parman,
+                                             &item->prio->parman_prio,
+                                             &item->parman_item);
+                       if (err)
+                               return err;
+                       test_parman->prio_array[item->parman_item.index] = item;
+                       test_parman->used_items++;
+               } else {
+                       test_parman->prio_array[item->parman_item.index] = NULL;
+                       parman_item_remove(test_parman->parman,
+                                          &item->prio->parman_prio,
+                                          &item->parman_item);
+                       test_parman->used_items--;
+               }
+               item->used = !item->used;
+       }
+       return 0;
+}
+
+static int test_parman_check_array(struct test_parman *test_parman,
+                                  bool gaps_allowed)
+{
+       unsigned int last_unused_items = 0;
+       unsigned long last_priority = 0;
+       unsigned int used_items = 0;
+       int i;
+
+       if (test_parman->prio_array_limit < TEST_PARMAN_BASE_COUNT) {
+               pr_err("Array limit is lower than the base count (%lu < %lu)\n",
+                      test_parman->prio_array_limit, TEST_PARMAN_BASE_COUNT);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < test_parman->prio_array_limit; i++) {
+               struct test_parman_item *item = test_parman->prio_array[i];
+
+               if (!item) {
+                       last_unused_items++;
+                       continue;
+               }
+               if (last_unused_items && !gaps_allowed) {
+                       pr_err("Gap found in array even though they are forbidden\n");
+                       return -EINVAL;
+               }
+
+               last_unused_items = 0;
+               used_items++;
+
+               if (item->prio->priority < last_priority) {
+                       pr_err("Item belongs under higher priority then the last one (current: %lu, previous: %lu)\n",
+                              item->prio->priority, last_priority);
+                       return -EINVAL;
+               }
+               last_priority = item->prio->priority;
+
+               if (item->parman_item.index != i) {
+                       pr_err("Item has different index in compare to where it actualy is (%lu != %d)\n",
+                              item->parman_item.index, i);
+                       return -EINVAL;
+               }
+       }
+
+       if (used_items != test_parman->used_items) {
+               pr_err("Number of used items in array does not match (%u != %u)\n",
+                      used_items, test_parman->used_items);
+               return -EINVAL;
+       }
+
+       if (last_unused_items >= TEST_PARMAN_RESIZE_STEP_COUNT) {
+               pr_err("Number of unused item at the end of array is bigger than resize step (%u >= %lu)\n",
+                      last_unused_items, TEST_PARMAN_RESIZE_STEP_COUNT);
+               return -EINVAL;
+       }
+
+       pr_info("Priority array check successful\n");
+
+       return 0;
+}
+
+static int test_parman_lsort(void)
+{
+       struct test_parman *test_parman;
+       int err;
+
+       test_parman = test_parman_create(&test_parman_lsort_ops);
+       if (IS_ERR(test_parman))
+               return PTR_ERR(test_parman);
+
+       err = test_parman_run(test_parman);
+       if (err)
+               goto out;
+
+       err = test_parman_check_array(test_parman, false);
+       if (err)
+               goto out;
+out:
+       test_parman_destroy(test_parman);
+       return err;
+}
+
+static int __init test_parman_init(void)
+{
+       return test_parman_lsort();
+}
+
+static void __exit test_parman_exit(void)
+{
+}
+
+module_init(test_parman_init);
+module_exit(test_parman_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
+MODULE_DESCRIPTION("Test module for parman");
diff --git a/lib/test_siphash.c b/lib/test_siphash.c
new file mode 100644 (file)
index 0000000..a6d854d
--- /dev/null
@@ -0,0 +1,223 @@
+/* Test cases for siphash.c
+ *
+ * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * This file is provided under a dual BSD/GPLv2 license.
+ *
+ * SipHash: a fast short-input PRF
+ * https://131002.net/siphash/
+ *
+ * This implementation is specifically for SipHash2-4 for a secure PRF
+ * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
+ * hashtables.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/siphash.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+
+/* Test vectors taken from reference source available at:
+ *     https://github.com/veorq/SipHash
+ */
+
+static const siphash_key_t test_key_siphash =
+       {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
+
+static const u64 test_vectors_siphash[64] = {
+       0x726fdb47dd0e0e31ULL, 0x74f839c593dc67fdULL, 0x0d6c8009d9a94f5aULL,
+       0x85676696d7fb7e2dULL, 0xcf2794e0277187b7ULL, 0x18765564cd99a68dULL,
+       0xcbc9466e58fee3ceULL, 0xab0200f58b01d137ULL, 0x93f5f5799a932462ULL,
+       0x9e0082df0ba9e4b0ULL, 0x7a5dbbc594ddb9f3ULL, 0xf4b32f46226bada7ULL,
+       0x751e8fbc860ee5fbULL, 0x14ea5627c0843d90ULL, 0xf723ca908e7af2eeULL,
+       0xa129ca6149be45e5ULL, 0x3f2acc7f57c29bdbULL, 0x699ae9f52cbe4794ULL,
+       0x4bc1b3f0968dd39cULL, 0xbb6dc91da77961bdULL, 0xbed65cf21aa2ee98ULL,
+       0xd0f2cbb02e3b67c7ULL, 0x93536795e3a33e88ULL, 0xa80c038ccd5ccec8ULL,
+       0xb8ad50c6f649af94ULL, 0xbce192de8a85b8eaULL, 0x17d835b85bbb15f3ULL,
+       0x2f2e6163076bcfadULL, 0xde4daaaca71dc9a5ULL, 0xa6a2506687956571ULL,
+       0xad87a3535c49ef28ULL, 0x32d892fad841c342ULL, 0x7127512f72f27cceULL,
+       0xa7f32346f95978e3ULL, 0x12e0b01abb051238ULL, 0x15e034d40fa197aeULL,
+       0x314dffbe0815a3b4ULL, 0x027990f029623981ULL, 0xcadcd4e59ef40c4dULL,
+       0x9abfd8766a33735cULL, 0x0e3ea96b5304a7d0ULL, 0xad0c42d6fc585992ULL,
+       0x187306c89bc215a9ULL, 0xd4a60abcf3792b95ULL, 0xf935451de4f21df2ULL,
+       0xa9538f0419755787ULL, 0xdb9acddff56ca510ULL, 0xd06c98cd5c0975ebULL,
+       0xe612a3cb9ecba951ULL, 0xc766e62cfcadaf96ULL, 0xee64435a9752fe72ULL,
+       0xa192d576b245165aULL, 0x0a8787bf8ecb74b2ULL, 0x81b3e73d20b49b6fULL,
+       0x7fa8220ba3b2eceaULL, 0x245731c13ca42499ULL, 0xb78dbfaf3a8d83bdULL,
+       0xea1ad565322a1a0bULL, 0x60e61c23a3795013ULL, 0x6606d7e446282b93ULL,
+       0x6ca4ecb15c5f91e1ULL, 0x9f626da15c9625f3ULL, 0xe51b38608ef25f57ULL,
+       0x958a324ceb064572ULL
+};
+
+#if BITS_PER_LONG == 64
+static const hsiphash_key_t test_key_hsiphash =
+       {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
+
+static const u32 test_vectors_hsiphash[64] = {
+       0x050fc4dcU, 0x7d57ca93U, 0x4dc7d44dU,
+       0xe7ddf7fbU, 0x88d38328U, 0x49533b67U,
+       0xc59f22a7U, 0x9bb11140U, 0x8d299a8eU,
+       0x6c063de4U, 0x92ff097fU, 0xf94dc352U,
+       0x57b4d9a2U, 0x1229ffa7U, 0xc0f95d34U,
+       0x2a519956U, 0x7d908b66U, 0x63dbd80cU,
+       0xb473e63eU, 0x8d297d1cU, 0xa6cce040U,
+       0x2b45f844U, 0xa320872eU, 0xdae6c123U,
+       0x67349c8cU, 0x705b0979U, 0xca9913a5U,
+       0x4ade3b35U, 0xef6cd00dU, 0x4ab1e1f4U,
+       0x43c5e663U, 0x8c21d1bcU, 0x16a7b60dU,
+       0x7a8ff9bfU, 0x1f2a753eU, 0xbf186b91U,
+       0xada26206U, 0xa3c33057U, 0xae3a36a1U,
+       0x7b108392U, 0x99e41531U, 0x3f1ad944U,
+       0xc8138825U, 0xc28949a6U, 0xfaf8876bU,
+       0x9f042196U, 0x68b1d623U, 0x8b5114fdU,
+       0xdf074c46U, 0x12cc86b3U, 0x0a52098fU,
+       0x9d292f9aU, 0xa2f41f12U, 0x43a71ed0U,
+       0x73f0bce6U, 0x70a7e980U, 0x243c6d75U,
+       0xfdb71513U, 0xa67d8a08U, 0xb7e8f148U,
+       0xf7a644eeU, 0x0f1837f2U, 0x4b6694e0U,
+       0xb7bbb3a8U
+};
+#else
+static const hsiphash_key_t test_key_hsiphash =
+       {{ 0x03020100U, 0x07060504U }};
+
+static const u32 test_vectors_hsiphash[64] = {
+       0x5814c896U, 0xe7e864caU, 0xbc4b0e30U,
+       0x01539939U, 0x7e059ea6U, 0x88e3d89bU,
+       0xa0080b65U, 0x9d38d9d6U, 0x577999b1U,
+       0xc839caedU, 0xe4fa32cfU, 0x959246eeU,
+       0x6b28096cU, 0x66dd9cd6U, 0x16658a7cU,
+       0xd0257b04U, 0x8b31d501U, 0x2b1cd04bU,
+       0x06712339U, 0x522aca67U, 0x911bb605U,
+       0x90a65f0eU, 0xf826ef7bU, 0x62512debU,
+       0x57150ad7U, 0x5d473507U, 0x1ec47442U,
+       0xab64afd3U, 0x0a4100d0U, 0x6d2ce652U,
+       0x2331b6a3U, 0x08d8791aU, 0xbc6dda8dU,
+       0xe0f6c934U, 0xb0652033U, 0x9b9851ccU,
+       0x7c46fb7fU, 0x732ba8cbU, 0xf142997aU,
+       0xfcc9aa1bU, 0x05327eb2U, 0xe110131cU,
+       0xf9e5e7c0U, 0xa7d708a6U, 0x11795ab1U,
+       0x65671619U, 0x9f5fff91U, 0xd89c5267U,
+       0x007783ebU, 0x95766243U, 0xab639262U,
+       0x9c7e1390U, 0xc368dda6U, 0x38ddc455U,
+       0xfa13d379U, 0x979ea4e8U, 0x53ecd77eU,
+       0x2ee80657U, 0x33dbb66aU, 0xae3f0577U,
+       0x88b4c4ccU, 0x3e7f480bU, 0x74c1ebf8U,
+       0x87178304U
+};
+#endif
+
+static int __init siphash_test_init(void)
+{
+       u8 in[64] __aligned(SIPHASH_ALIGNMENT);
+       u8 in_unaligned[65] __aligned(SIPHASH_ALIGNMENT);
+       u8 i;
+       int ret = 0;
+
+       for (i = 0; i < 64; ++i) {
+               in[i] = i;
+               in_unaligned[i + 1] = i;
+               if (siphash(in, i, &test_key_siphash) !=
+                                               test_vectors_siphash[i]) {
+                       pr_info("siphash self-test aligned %u: FAIL\n", i + 1);
+                       ret = -EINVAL;
+               }
+               if (siphash(in_unaligned + 1, i, &test_key_siphash) !=
+                                               test_vectors_siphash[i]) {
+                       pr_info("siphash self-test unaligned %u: FAIL\n", i + 1);
+                       ret = -EINVAL;
+               }
+               if (hsiphash(in, i, &test_key_hsiphash) !=
+                                               test_vectors_hsiphash[i]) {
+                       pr_info("hsiphash self-test aligned %u: FAIL\n", i + 1);
+                       ret = -EINVAL;
+               }
+               if (hsiphash(in_unaligned + 1, i, &test_key_hsiphash) !=
+                                               test_vectors_hsiphash[i]) {
+                       pr_info("hsiphash self-test unaligned %u: FAIL\n", i + 1);
+                       ret = -EINVAL;
+               }
+       }
+       if (siphash_1u64(0x0706050403020100ULL, &test_key_siphash) !=
+                                               test_vectors_siphash[8]) {
+               pr_info("siphash self-test 1u64: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+                        &test_key_siphash) != test_vectors_siphash[16]) {
+               pr_info("siphash self-test 2u64: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+                        0x1716151413121110ULL, &test_key_siphash) !=
+                                               test_vectors_siphash[24]) {
+               pr_info("siphash self-test 3u64: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+                        0x1716151413121110ULL, 0x1f1e1d1c1b1a1918ULL,
+                        &test_key_siphash) != test_vectors_siphash[32]) {
+               pr_info("siphash self-test 4u64: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (siphash_1u32(0x03020100U, &test_key_siphash) !=
+                                               test_vectors_siphash[4]) {
+               pr_info("siphash self-test 1u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash) !=
+                                               test_vectors_siphash[8]) {
+               pr_info("siphash self-test 2u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (siphash_3u32(0x03020100U, 0x07060504U,
+                        0x0b0a0908U, &test_key_siphash) !=
+                                               test_vectors_siphash[12]) {
+               pr_info("siphash self-test 3u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (siphash_4u32(0x03020100U, 0x07060504U,
+                        0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash) !=
+                                               test_vectors_siphash[16]) {
+               pr_info("siphash self-test 4u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (hsiphash_1u32(0x03020100U, &test_key_hsiphash) !=
+                                               test_vectors_hsiphash[4]) {
+               pr_info("hsiphash self-test 1u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash) !=
+                                               test_vectors_hsiphash[8]) {
+               pr_info("hsiphash self-test 2u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (hsiphash_3u32(0x03020100U, 0x07060504U,
+                         0x0b0a0908U, &test_key_hsiphash) !=
+                                               test_vectors_hsiphash[12]) {
+               pr_info("hsiphash self-test 3u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (hsiphash_4u32(0x03020100U, 0x07060504U,
+                         0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash) !=
+                                               test_vectors_hsiphash[16]) {
+               pr_info("hsiphash self-test 4u32: FAIL\n");
+               ret = -EINVAL;
+       }
+       if (!ret)
+               pr_info("self-tests: pass\n");
+       return ret;
+}
+
+static void __exit siphash_test_exit(void)
+{
+}
+
+module_init(siphash_test_init);
+module_exit(siphash_test_exit);
+
+MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
+MODULE_LICENSE("Dual BSD/GPL");
index 7008d53e455c5254db5879bb044b0a4b6d706775..4fa2fdda174d0139e669bd282ab4362aea41f512 100644 (file)
@@ -27,8 +27,8 @@ static int lowpan_nhc_insert(struct lowpan_nhc *nhc)
 
        /* Figure out where to put new node */
        while (*new) {
-               struct lowpan_nhc *this = container_of(*new, struct lowpan_nhc,
-                                                      node);
+               struct lowpan_nhc *this = rb_entry(*new, struct lowpan_nhc,
+                                                  node);
                int result, len_dif, len;
 
                len_dif = nhc->idlen - this->idlen;
@@ -69,8 +69,8 @@ static struct lowpan_nhc *lowpan_nhc_by_nhcid(const struct sk_buff *skb)
        const u8 *nhcid_skb_ptr = skb->data;
 
        while (node) {
-               struct lowpan_nhc *nhc = container_of(node, struct lowpan_nhc,
-                                                     node);
+               struct lowpan_nhc *nhc = rb_entry(node, struct lowpan_nhc,
+                                                 node);
                u8 nhcid_skb_ptr_masked[LOWPAN_NHC_MAX_ID_LEN];
                int result, i;
 
index 10da6c588bf84f45cdd237ae5d56c9975be5a1b2..e97ab824e368cc16f9609acd70d5337866eb2936 100644 (file)
@@ -671,7 +671,8 @@ static int vlan_ethtool_get_ts_info(struct net_device *dev,
        return 0;
 }
 
-static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+static void vlan_dev_get_stats64(struct net_device *dev,
+                                struct rtnl_link_stats64 *stats)
 {
        struct vlan_pcpu_stats *p;
        u32 rx_errors = 0, tx_dropped = 0;
@@ -702,8 +703,6 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
        }
        stats->rx_errors  = rx_errors;
        stats->tx_dropped = tx_dropped;
-
-       return stats;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -792,8 +791,6 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_netpoll_cleanup    = vlan_dev_netpoll_cleanup,
 #endif
        .ndo_fix_features       = vlan_dev_fix_features,
-       .ndo_neigh_construct    = netdev_default_l2upper_neigh_construct,
-       .ndo_neigh_destroy      = netdev_default_l2upper_neigh_destroy,
        .ndo_fdb_add            = switchdev_port_fdb_add,
        .ndo_fdb_del            = switchdev_port_fdb_del,
        .ndo_fdb_dump           = switchdev_port_fdb_dump,
index a29bb4b41c50e3c55463eaee2a5dda1292b757f6..102f781a0131aff34a521250c1fc879f8db5ee58 100644 (file)
@@ -57,6 +57,7 @@ source "net/packet/Kconfig"
 source "net/unix/Kconfig"
 source "net/xfrm/Kconfig"
 source "net/iucv/Kconfig"
+source "net/smc/Kconfig"
 
 config INET
        bool "TCP/IP networking"
@@ -296,7 +297,8 @@ config BPF_JIT
 
          Note, admin should enable this feature changing:
          /proc/sys/net/core/bpf_jit_enable
-         /proc/sys/net/core/bpf_jit_harden (optional)
+         /proc/sys/net/core/bpf_jit_harden   (optional)
+         /proc/sys/net/core/bpf_jit_kallsyms (optional)
 
 config NET_FLOW_LIMIT
        bool
@@ -389,6 +391,8 @@ source "net/9p/Kconfig"
 source "net/caif/Kconfig"
 source "net/ceph/Kconfig"
 source "net/nfc/Kconfig"
+source "net/psample/Kconfig"
+source "net/ife/Kconfig"
 
 config LWTUNNEL
        bool "Network light weight tunnels"
@@ -410,6 +414,10 @@ config DST_CACHE
        bool
        default n
 
+config GRO_CELLS
+       bool
+       default n
+
 config NET_DEVLINK
        tristate "Network physical/parent device Netlink interface"
        help
index 4cafaa2b4667e049ab4a698e8242902adb6a65a0..9b681550e3a3ea3c6146ac67572b6c97a28c9d2c 100644 (file)
@@ -51,6 +51,7 @@ obj-$(CONFIG_MAC80211)                += mac80211/
 obj-$(CONFIG_TIPC)             += tipc/
 obj-$(CONFIG_NETLABEL)         += netlabel/
 obj-$(CONFIG_IUCV)             += iucv/
+obj-$(CONFIG_SMC)              += smc/
 obj-$(CONFIG_RFKILL)           += rfkill/
 obj-$(CONFIG_NET_9P)           += 9p/
 obj-$(CONFIG_CAIF)             += caif/
@@ -69,6 +70,8 @@ obj-$(CONFIG_DNS_RESOLVER)    += dns_resolver/
 obj-$(CONFIG_CEPH_LIB)         += ceph/
 obj-$(CONFIG_BATMAN_ADV)       += batman-adv/
 obj-$(CONFIG_NFC)              += nfc/
+obj-$(CONFIG_PSAMPLE)          += psample/
+obj-$(CONFIG_NET_IFE)          += ife/
 obj-$(CONFIG_OPENVSWITCH)      += openvswitch/
 obj-$(CONFIG_VSOCKETS) += vmw_vsock/
 obj-$(CONFIG_MPLS)             += mpls/
index f724d3c98a816d1fb437a1f371b3e04cff2341ed..915987bc6d294e5450a5439e6b854267763f3370 100644 (file)
@@ -1,5 +1,5 @@
 #
-# Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
 #
 # Marek Lindner, Simon Wunderlich
 #
index 623d04302aa2ae766bf3f608a284fb9a1b9104a7..44fd073b7546c47ceb25238c5686aad2967bc15f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 3b5b69cdd12bc1505b63b48b542203fbfa2ab082..29f6312f9bf13ff0980b965f34465df5ae59265e 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Linus Lüssing
  *
index f00f666e2ccd4714bb7a5210c48e39edb40e0c17..7c3d994e90d87b868f2b1614cc5d26e2413e70ee 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index b9f3550faaf716b58defc53284618138ec59f33a..ae2ab526bdb1e95ef2ee5fc27fb59e38d3b462d2 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 2ac612d7bab4d0b4035c9e476dab17536349dca3..0acd081dd286996444d121b526f4530c4c1c0845 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-201 B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing, Marek Lindner
  *
index 83b77639729e15e41cd48b625b8f4f67af71f0c0..dd7c4b647e6b4a1325674d820fb7ca6314f437e2 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Linus Lüssing
  *
index f2fb2f05b6bf280d2b5fae26ed10288f73345f16..b90c9903e2465bcfa9a287104dcd054b336c7398 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-201 B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing, Marek Lindner
  *
index be17c0b1369e39efd5c0f0733e75f511445b7418..376ead280ab9e40e1283fbf4be5dbfe803a45015 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-201 B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing, Marek Lindner
  *
index 38b9aab83fc0eaf63e3713d278482524253d5c1a..03a35c9f456d7aba00f768de483540d2cabb50e6 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-201 B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
  *
index 4c4d45caa422ca9511f0b9d19435f507b6ce0e99..2068770b542dd58bb412ec6bc21b09f6fce3e554 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-201 B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
  *
index 032271421a203162ba3b5c8766746c3feccbf877..2b070c7e31da1862038063512c81f9b55fa98ccd 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index 0e6e9d09078cf176e7b426615666e5f51def5fbd..cc262c9d97e0ca606aa666493956e5feedd18bb1 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index e7f690b571ea9be8ace25843d6e187a907486b99..ba8420d8a992db2c14936f568f761869afd921c0 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
  *
@@ -449,7 +449,6 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
        batadv_inc_counter(bat_priv, BATADV_CNT_RX);
        batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
                           skb->len + ETH_HLEN);
-       soft_iface->last_rx = jiffies;
 
        netif_rx(skb);
 out:
index 1ae93e46fb98498c00082728ca91216d78e13298..e157986bd01cf989dc70c93bae7a4fdf17cf3c4f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
  *
@@ -20,6 +20,8 @@
 
 #include "main.h"
 
+#include <linux/compiler.h>
+#include <linux/stddef.h>
 #include <linux/types.h>
 
 struct net_device;
@@ -27,6 +29,22 @@ struct netlink_callback;
 struct seq_file;
 struct sk_buff;
 
+/**
+ * batadv_bla_is_loopdetect_mac - check if the mac address is from a loop detect
+ *  frame sent by bridge loop avoidance
+ * @mac: mac address to check
+ *
+ * Return: true if the it looks like a loop detect frame
+ * (mac starts with BA:BE), false otherwise
+ */
+static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac)
+{
+       if (mac[0] == 0xba && mac[1] == 0xbe)
+               return true;
+
+       return false;
+}
+
 #ifdef CONFIG_BATMAN_ADV_BLA
 bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
                   unsigned short vid, bool is_bcast);
index 77925504379dac7d64777393ddae326b5d6d9505..e32ad47c6efdf17914aad1e89029020d15801150 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -19,7 +19,7 @@
 #include "main.h"
 
 #include <linux/debugfs.h>
-#include <linux/device.h>
+#include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/fs.h>
index e49121ee55f696547ddc9774ba6c425af2d49b57..9c5d4a65b98c35239709d9258bb889a61ffef8c2 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 49576c5a3fe306a42c28c3901d2b2c6cce7d0b8e..1bfd1dbc2feba7bf6c16004ea8ca85ed6066c929 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
  *
@@ -1050,7 +1050,6 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                                                   bat_priv->soft_iface);
                bat_priv->stats.rx_packets++;
                bat_priv->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size;
-               bat_priv->soft_iface->last_rx = jiffies;
 
                netif_rx(skb_new);
                batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
index 813ecea96cf9334700fa219a41fe1cf5a20f791d..ec364a3c1c6691dc4d5659d6edd2693c607e128a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2017  B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
  *
index 0854ebd8613e9bf9044b04099b11341325d6e194..ead18ca836de7ba134502f3ca2ea3201e6257f97 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
  *
index b95f619606af86b7e0b557bf67b911ceac5b0d54..1a2d6c3087455d630d574b6d7b749fadda0a136e 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
  *
index 52b8bd6ec43183519a63483950c2e886e47a6f9e..de9955d5224d258cdbd20c14a1758bf7b4af19f9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 859166d03561b37f749363ed5d09266a8a5f549a..3baa3d466e5e1089d1e0dcd9fb62de6c7cfe01c1 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 21184810d89f69e372673aff221a74382945491d..5db2e43e3775ef40fc3832984c93411c7f0dbb08 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 8a5e1ddf1175ccb3f8e20169d9bd584e0327df86..0a6a97d201f27513a99ed4b3261138a2c6a8f0e8 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 61a431a9772ba96418644b399c9e787cbfd0e743..e348f76ea8c15c8901294598c02617028063bfd6 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index d6309a42362918d4beb4d43d174e0c4e8773e75c..9f9890ff7a22f0ea188b9135a7f3c6fb1958d6b8 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index a0a0fdb8580513215a59f971dd199aa00dc10d0f..b5f7e13918acdfa35ba73d92a31d8292faf94669 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index 557a7044cfbccecd3b2a5e66659a3ac151d819fa..0c905e91c5e285136f4959372c02317f5fc41040 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2017  B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index b310f381ae028d26ed89dea88ba071b7433cfca4..6308c9f0fd96c149f8d825c6f736e2112204a76c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index e44a7da51431bbae76e656f5d9055c2003896365..f3fec40aae862b89da46d802729759ba540cd0a5 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index c73c31769abaf9c802c408f1e7fae17c9db134b7..4ef4bde2cc2d387d0a2020389889f10399349e0a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 3284a7b0325dc631bc8de438a0f62ba8587c9f50..7a2b9f4da07830103a8f00e4c3b488b0367a9dc2 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index d46415edd3be98d9538def8d55674b5336eca6a1..5000c540614d0c0a866857e5245ff3f365d14223 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index a6cc8040a21dd24fb507683230fd66a9edb62458..57a8103dbce7f00fb58ff5ae252e12a0f6453aec 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -24,7 +24,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2016.5"
+#define BATADV_SOURCE_VERSION "2017.0"
 #endif
 
 /* B.A.T.M.A.N. parameters */
index 090a69fc342eac8a0b6bf89556d2b32523817d09..952ba81a565b611ee0a83fc0bbfb719b54187407 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
  *
index 2cddaf52a21d5e8ea927c96e51e5a811de6785c9..2a78cddab0e971a466797ddb7032661f2fa55430 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-2017  B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
  *
index 062738163bdce747b7f49c96d9180899bb15ea2f..ab13b4d58733829413c8b82f66e69733cbecc55b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2016-2017  B.A.T.M.A.N. contributors:
  *
  * Matthias Schiffer
  *
index 52eb16281aba7aae5d0c314f21375dabc9a6bf25..f1cd8c5da96605daff924f4aee00323f56a9e893 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2016-2017  B.A.T.M.A.N. contributors:
  *
  * Matthias Schiffer
  *
index ab5a3bf0765f36f2fe14ff4a91d43d905e08a1f3..e1f6fc72fe3e82cc3d55efdc1c08dfb60087ee23 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
  *
index d6d7fb4ec5d595ae996b983309e6ffc1246338c9..c66efb81d2f4154ed265e960632fa5c3f0e8aee8 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2017  B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
  *
index 8f3b2969cc4e3044e714086329166b9a3b7517a4..8e2a4b205257929e9b64f157e8570972cf1383f9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index ebc56183f3581835c899272425a212ff092033b6..d94220a6d21acf58d55fcf7a179151c67392b055 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 7a36bcfa0ba01993080b2ad1ddca94ca0d50adc9..8e8a5db197cb0f8205251fa8fdb1d48e25a9e872 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 6713bdf414cdacdaf36ecd6ac516f99e079fb51e..7fd740b6e36dfb0e11c67c283cec68a5cfd1b5f6 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -719,20 +719,19 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
 
        len = skb->len;
        res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
-       if (res == NET_XMIT_SUCCESS)
-               ret = NET_RX_SUCCESS;
-
-       /* skb was consumed */
-       skb = NULL;
 
        /* translate transmit result into receive result */
        if (res == NET_XMIT_SUCCESS) {
+               ret = NET_RX_SUCCESS;
                /* skb was transmitted and consumed */
                batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
                batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
                                   len + ETH_HLEN);
        }
 
+       /* skb was consumed */
+       skb = NULL;
+
 put_orig_node:
        batadv_orig_node_put(orig_node);
 free_skb:
index 05c3ff42e1816743989ad0c6ed4bc6eea943e448..5ede16c32f157369ba08fdd52357d3e09e123b83 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 49021b7124f37a4e95e043eb1b9d88855b60885c..1489ec27daff5548b072e88648f5cca192f74afa 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -77,6 +77,7 @@ int batadv_send_skb_packet(struct sk_buff *skb,
 {
        struct batadv_priv *bat_priv;
        struct ethhdr *ethhdr;
+       int ret;
 
        bat_priv = netdev_priv(hard_iface->soft_iface);
 
@@ -115,7 +116,8 @@ int batadv_send_skb_packet(struct sk_buff *skb,
         * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
         * (which is > 0). This will not be treated as an error.
         */
-       return dev_queue_xmit(skb);
+       ret = dev_queue_xmit(skb);
+       return net_xmit_eval(ret);
 send_skb_err:
        kfree_skb(skb);
        return NET_XMIT_DROP;
index a94e1e8639ca2da6ca85a3e7a5ea37af12815960..f21166d1032360a1febe3cebdfc9ee0e9958bb9c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 7b3494ae6ad93fd0d32391e5c88f5d636f43acd5..5d099b2e6cfccb8a436d98a10a6d513d89e31dc1 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -258,7 +258,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
        ethhdr = eth_hdr(skb);
 
        /* Register the client MAC in the transtable */
-       if (!is_multicast_ether_addr(ethhdr->h_source)) {
+       if (!is_multicast_ether_addr(ethhdr->h_source) &&
+           !batadv_bla_is_loopdetect_mac(ethhdr->h_source)) {
                client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source,
                                                   vid, skb->skb_iif,
                                                   skb->mark);
@@ -481,8 +482,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
        batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
                           skb->len + ETH_HLEN);
 
-       soft_iface->last_rx = jiffies;
-
        /* Let the bridge loop avoidance check the packet. If will
         * not handle it, we can safely push it up.
         */
index ec303ddbf647828947f8c83c7edde06582735ad6..639c3abb214a4dc44053b2868e51484c9cd2c344 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 17c844196eb26c9faf9fd543b88cd86cc1c2c029..0ae8b30e4eaaf4e3513e9a552de2ae51791d2554 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index c76021b4e1980a75bb8daa366b95d53e653094f4..e487412e256bbe4a83def9d3e4f73e999dd62d6e 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 981e8c5b07e9398c68df711d1d7b54e6e9333ead..c94ebdecdc3d123f71c5af89783623e58d2f323d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-201 B.A.T.M.A.N. contributors:
  *
  * Edo Monticelli, Antonio Quartulli
  *
@@ -23,7 +23,7 @@
 #include <linux/byteorder/generic.h>
 #include <linux/cache.h>
 #include <linux/compiler.h>
-#include <linux/device.h>
+#include <linux/err.h>
 #include <linux/etherdevice.h>
 #include <linux/fs.h>
 #include <linux/if_ether.h>
index ba922c425e56a0ddd02f96dd30c5c53f2fcb86aa..a8ada5c123bd95455412b822b5ff78d7a538b9e8 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-201 B.A.T.M.A.N. contributors:
  *
  * Edo Monticelli, Antonio Quartulli
  *
index 30ecbfb40adfa6f9f1c777fc93e42df8c39e4581..6077a87d46f0f781ac72dcc3cc1d9f84c814ad19 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
  *
@@ -3714,7 +3714,6 @@ static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv, u16 flags,
 {
        struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common_entry;
-       u16 changed_num = 0;
        struct hlist_head *head;
        u32 i;
 
@@ -3736,7 +3735,6 @@ static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv, u16 flags,
                                        continue;
                                tt_common_entry->flags &= ~flags;
                        }
-                       changed_num++;
 
                        if (!count)
                                continue;
index 783fdba84db252d4390ebf5df44b26e2f60db68e..411d586191da619669e5bd1a17b85b8b46eb4dfb 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
  *
index a783420356ae0cd4a6273b3b7a04781242e37a82..1d9e267caec92801354e6f7947151e1e3dabf40a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index e4369b547b43868d965bd8afeb081b95c5f9e907..4d01400ada30bd03abc583e5c22c717ddd355428 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index e913aee28c98bf77cdd7fe92496fa4b188ff9604..8f64a5c013454a6eb8f45b04c03389d69ef4e50a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016  B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017  B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 0aefc011b66851d391d7ae021da04af23f46d294..40b1ede527caedde81bbfea79d807a1af912e29e 100644 (file)
@@ -6,7 +6,8 @@ obj-$(CONFIG_BRIDGE) += bridge.o
 
 bridge-y       := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
                        br_ioctl.o br_stp.o br_stp_bpdu.o \
-                       br_stp_if.o br_stp_timer.o br_netlink.o
+                       br_stp_if.o br_stp_timer.o br_netlink.o \
+                       br_netlink_tunnel.o
 
 bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
 
@@ -18,7 +19,7 @@ obj-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
 
 bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
 
-bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o
+bridge-$(CONFIG_BRIDGE_VLAN_FILTERING) += br_vlan.o br_vlan_tunnel.o
 
 bridge-$(CONFIG_NET_SWITCHDEV) += br_switchdev.o
 
index ed3b3192fb00f575d9d52671ead28de03806002e..ea71513fca21a0aea0dd569b482717a1b1dbe673 100644 (file)
@@ -79,7 +79,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
                        br_multicast_flood(mdst, skb, false, true);
                else
                        br_flood(br, skb, BR_PKT_MULTICAST, false, true);
-       } else if ((dst = __br_fdb_get(br, dest, vid)) != NULL) {
+       } else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) {
                br_forward(dst->dst, skb, false, true);
        } else {
                br_flood(br, skb, BR_PKT_UNICAST, false, true);
@@ -153,8 +153,8 @@ static int br_dev_stop(struct net_device *dev)
        return 0;
 }
 
-static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
-                                               struct rtnl_link_stats64 *stats)
+static void br_get_stats64(struct net_device *dev,
+                          struct rtnl_link_stats64 *stats)
 {
        struct net_bridge *br = netdev_priv(dev);
        struct pcpu_sw_netstats tmp, sum = { 0 };
@@ -178,8 +178,6 @@ static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
        stats->tx_packets = sum.tx_packets;
        stats->rx_bytes   = sum.rx_bytes;
        stats->rx_packets = sum.rx_packets;
-
-       return stats;
 }
 
 static int br_change_mtu(struct net_device *dev, int new_mtu)
@@ -349,8 +347,6 @@ static const struct net_device_ops br_netdev_ops = {
        .ndo_add_slave           = br_add_slave,
        .ndo_del_slave           = br_del_slave,
        .ndo_fix_features        = br_fix_features,
-       .ndo_neigh_construct     = netdev_default_l2upper_neigh_construct,
-       .ndo_neigh_destroy       = netdev_default_l2upper_neigh_destroy,
        .ndo_fdb_add             = br_fdb_add,
        .ndo_fdb_del             = br_fdb_delete,
        .ndo_fdb_dump            = br_fdb_dump,
@@ -415,4 +411,5 @@ void br_dev_setup(struct net_device *dev)
        br_netfilter_rtable_init(br);
        br_stp_timer_init(br);
        br_multicast_init(br);
+       INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup);
 }
index e4a4176171c91fa364e914e2aeb9d357209a1f93..4f598dc2d9168cd323a3027d77d601854aa35f04 100644 (file)
@@ -28,9 +28,6 @@
 #include "br_private.h"
 
 static struct kmem_cache *br_fdb_cache __read_mostly;
-static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
-                                            const unsigned char *addr,
-                                            __u16 vid);
 static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
                      const unsigned char *addr, u16 vid);
 static void fdb_notify(struct net_bridge *br,
@@ -68,7 +65,7 @@ static inline unsigned long hold_time(const struct net_bridge *br)
 static inline int has_expired(const struct net_bridge *br,
                                  const struct net_bridge_fdb_entry *fdb)
 {
-       return !fdb->is_static &&
+       return !fdb->is_static && !fdb->added_by_external_learn &&
                time_before_eq(fdb->updated + hold_time(br), jiffies);
 }
 
@@ -86,6 +83,47 @@ static void fdb_rcu_free(struct rcu_head *head)
        kmem_cache_free(br_fdb_cache, ent);
 }
 
+static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
+                                                const unsigned char *addr,
+                                                __u16 vid)
+{
+       struct net_bridge_fdb_entry *f;
+
+       WARN_ON_ONCE(!rcu_read_lock_held());
+
+       hlist_for_each_entry_rcu(f, head, hlist)
+               if (ether_addr_equal(f->addr.addr, addr) && f->vlan_id == vid)
+                       break;
+
+       return f;
+}
+
+/* requires bridge hash_lock */
+static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
+                                               const unsigned char *addr,
+                                               __u16 vid)
+{
+       struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
+       struct net_bridge_fdb_entry *fdb;
+
+       WARN_ON_ONCE(!br_hash_lock_held(br));
+
+       rcu_read_lock();
+       fdb = fdb_find_rcu(head, addr, vid);
+       rcu_read_unlock();
+
+       return fdb;
+}
+
+struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
+                                            const unsigned char *addr,
+                                            __u16 vid)
+{
+       struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
+
+       return fdb_find_rcu(head, addr, vid);
+}
+
 /* When a static FDB entry is added, the mac address from the entry is
  * added to the bridge private HW address list and all required ports
  * are then updated with the new information.
@@ -154,7 +192,7 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
        if (f->added_by_external_learn)
                fdb_del_external_learn(f);
 
-       hlist_del_rcu(&f->hlist);
+       hlist_del_init_rcu(&f->hlist);
        fdb_notify(br, f, RTM_DELNEIGH);
        call_rcu(&f->rcu, fdb_rcu_free);
 }
@@ -198,11 +236,10 @@ void br_fdb_find_delete_local(struct net_bridge *br,
                              const struct net_bridge_port *p,
                              const unsigned char *addr, u16 vid)
 {
-       struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
        struct net_bridge_fdb_entry *f;
 
        spin_lock_bh(&br->hash_lock);
-       f = fdb_find(head, addr, vid);
+       f = br_fdb_find(br, addr, vid);
        if (f && f->is_local && !f->added_by_user && f->dst == p)
                fdb_delete_local(br, p, f);
        spin_unlock_bh(&br->hash_lock);
@@ -266,7 +303,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
        spin_lock_bh(&br->hash_lock);
 
        /* If old entry was unassociated with any port, then delete it. */
-       f = __br_fdb_get(br, br->dev->dev_addr, 0);
+       f = br_fdb_find(br, br->dev->dev_addr, 0);
        if (f && f->is_local && !f->dst && !f->added_by_user)
                fdb_delete_local(br, NULL, f);
 
@@ -281,7 +318,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
        list_for_each_entry(v, &vg->vlan_list, vlist) {
                if (!br_vlan_should_use(v))
                        continue;
-               f = __br_fdb_get(br, br->dev->dev_addr, v->vid);
+               f = br_fdb_find(br, br->dev->dev_addr, v->vid);
                if (f && f->is_local && !f->dst && !f->added_by_user)
                        fdb_delete_local(br, NULL, f);
                fdb_insert(br, NULL, newaddr, v->vid);
@@ -290,34 +327,43 @@ out:
        spin_unlock_bh(&br->hash_lock);
 }
 
-void br_fdb_cleanup(unsigned long _data)
+void br_fdb_cleanup(struct work_struct *work)
 {
-       struct net_bridge *br = (struct net_bridge *)_data;
+       struct net_bridge *br = container_of(work, struct net_bridge,
+                                            gc_work.work);
        unsigned long delay = hold_time(br);
-       unsigned long next_timer = jiffies + br->ageing_time;
+       unsigned long work_delay = delay;
+       unsigned long now = jiffies;
        int i;
 
-       spin_lock(&br->hash_lock);
        for (i = 0; i < BR_HASH_SIZE; i++) {
                struct net_bridge_fdb_entry *f;
                struct hlist_node *n;
 
+               if (!br->hash[i].first)
+                       continue;
+
+               spin_lock_bh(&br->hash_lock);
                hlist_for_each_entry_safe(f, n, &br->hash[i], hlist) {
                        unsigned long this_timer;
+
                        if (f->is_static)
                                continue;
                        if (f->added_by_external_learn)
                                continue;
                        this_timer = f->updated + delay;
-                       if (time_before_eq(this_timer, jiffies))
+                       if (time_after(this_timer, now))
+                               work_delay = min(work_delay, this_timer - now);
+                       else
                                fdb_delete(br, f);
-                       else if (time_before(this_timer, next_timer))
-                               next_timer = this_timer;
                }
+               spin_unlock_bh(&br->hash_lock);
+               cond_resched();
        }
-       spin_unlock(&br->hash_lock);
 
-       mod_timer(&br->gc_timer, round_jiffies_up(next_timer));
+       /* Cleanup minimum 10 milliseconds apart */
+       work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
+       mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
 }
 
 /* Completely flush all dynamic entries in forwarding database.*/
@@ -371,26 +417,6 @@ void br_fdb_delete_by_port(struct net_bridge *br,
        spin_unlock_bh(&br->hash_lock);
 }
 
-/* No locking or refcounting, assumes caller has rcu_read_lock */
-struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
-                                         const unsigned char *addr,
-                                         __u16 vid)
-{
-       struct net_bridge_fdb_entry *fdb;
-
-       hlist_for_each_entry_rcu(fdb,
-                               &br->hash[br_mac_hash(addr, vid)], hlist) {
-               if (ether_addr_equal(fdb->addr.addr, addr) &&
-                   fdb->vlan_id == vid) {
-                       if (unlikely(has_expired(br, fdb)))
-                               break;
-                       return fdb;
-               }
-       }
-
-       return NULL;
-}
-
 #if IS_ENABLED(CONFIG_ATM_LANE)
 /* Interface used by ATM LANE hook to test
  * if an addr is on some other bridge port */
@@ -405,7 +431,7 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
        if (!port)
                ret = 0;
        else {
-               fdb = __br_fdb_get(port->br, addr, 0);
+               fdb = br_fdb_find_rcu(port->br, addr, 0);
                ret = fdb && fdb->dst && fdb->dst->dev != dev &&
                        fdb->dst->state == BR_STATE_FORWARDING;
        }
@@ -467,34 +493,6 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
        return num;
 }
 
-static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
-                                            const unsigned char *addr,
-                                            __u16 vid)
-{
-       struct net_bridge_fdb_entry *fdb;
-
-       hlist_for_each_entry(fdb, head, hlist) {
-               if (ether_addr_equal(fdb->addr.addr, addr) &&
-                   fdb->vlan_id == vid)
-                       return fdb;
-       }
-       return NULL;
-}
-
-static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
-                                                const unsigned char *addr,
-                                                __u16 vid)
-{
-       struct net_bridge_fdb_entry *fdb;
-
-       hlist_for_each_entry_rcu(fdb, head, hlist) {
-               if (ether_addr_equal(fdb->addr.addr, addr) &&
-                   fdb->vlan_id == vid)
-                       return fdb;
-       }
-       return NULL;
-}
-
 static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
                                               struct net_bridge_port *source,
                                               const unsigned char *addr,
@@ -528,7 +526,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
        if (!is_valid_ether_addr(addr))
                return -EINVAL;
 
-       fdb = fdb_find(head, addr, vid);
+       fdb = br_fdb_find(br, addr, vid);
        if (fdb) {
                /* it is okay to have multiple ports with same
                 * address, just use the first one.
@@ -585,12 +583,15 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
                                br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
                                        source->dev->name, addr, vid);
                } else {
+                       unsigned long now = jiffies;
+
                        /* fastpath: update of existing entry */
                        if (unlikely(source != fdb->dst)) {
                                fdb->dst = source;
                                fdb_modified = true;
                        }
-                       fdb->updated = jiffies;
+                       if (now != fdb->updated)
+                               fdb->updated = now;
                        if (unlikely(added_by_user))
                                fdb->added_by_user = 1;
                        if (unlikely(fdb_modified))
@@ -598,7 +599,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
                }
        } else {
                spin_lock(&br->hash_lock);
-               if (likely(!fdb_find(head, addr, vid))) {
+               if (likely(!fdb_find_rcu(head, addr, vid))) {
                        fdb = fdb_create(head, source, addr, vid, 0, 0);
                        if (fdb) {
                                if (unlikely(added_by_user))
@@ -782,7 +783,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
                return -EINVAL;
        }
 
-       fdb = fdb_find(head, addr, vid);
+       fdb = br_fdb_find(br, addr, vid);
        if (fdb == NULL) {
                if (!(flags & NLM_F_CREATE))
                        return -ENOENT;
@@ -929,55 +930,30 @@ out:
        return err;
 }
 
-static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr,
-                             u16 vid)
-{
-       struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
-       struct net_bridge_fdb_entry *fdb;
-
-       fdb = fdb_find(head, addr, vid);
-       if (!fdb)
-               return -ENOENT;
-
-       fdb_delete(br, fdb);
-       return 0;
-}
-
-static int __br_fdb_delete_by_addr(struct net_bridge *br,
-                                  const unsigned char *addr, u16 vid)
-{
-       int err;
-
-       spin_lock_bh(&br->hash_lock);
-       err = fdb_delete_by_addr(br, addr, vid);
-       spin_unlock_bh(&br->hash_lock);
-
-       return err;
-}
-
-static int fdb_delete_by_addr_and_port(struct net_bridge_port *p,
+static int fdb_delete_by_addr_and_port(struct net_bridge *br,
+                                      const struct net_bridge_port *p,
                                       const u8 *addr, u16 vlan)
 {
-       struct net_bridge *br = p->br;
-       struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)];
        struct net_bridge_fdb_entry *fdb;
 
-       fdb = fdb_find(head, addr, vlan);
+       fdb = br_fdb_find(br, addr, vlan);
        if (!fdb || fdb->dst != p)
                return -ENOENT;
 
        fdb_delete(br, fdb);
+
        return 0;
 }
 
-static int __br_fdb_delete(struct net_bridge_port *p,
+static int __br_fdb_delete(struct net_bridge *br,
+                          const struct net_bridge_port *p,
                           const unsigned char *addr, u16 vid)
 {
        int err;
 
-       spin_lock_bh(&p->br->hash_lock);
-       err = fdb_delete_by_addr_and_port(p, addr, vid);
-       spin_unlock_bh(&p->br->hash_lock);
+       spin_lock_bh(&br->hash_lock);
+       err = fdb_delete_by_addr_and_port(br, p, addr, vid);
+       spin_unlock_bh(&br->hash_lock);
 
        return err;
 }
@@ -990,7 +966,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
        struct net_bridge_vlan_group *vg;
        struct net_bridge_port *p = NULL;
        struct net_bridge_vlan *v;
-       struct net_bridge *br = NULL;
+       struct net_bridge *br;
        int err;
 
        if (dev->priv_flags & IFF_EBRIDGE) {
@@ -1004,6 +980,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
                        return -EINVAL;
                }
                vg = nbp_vlan_group(p);
+               br = p->br;
        }
 
        if (vid) {
@@ -1013,30 +990,20 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
                        return -EINVAL;
                }
 
-               if (dev->priv_flags & IFF_EBRIDGE)
-                       err = __br_fdb_delete_by_addr(br, addr, vid);
-               else
-                       err = __br_fdb_delete(p, addr, vid);
+               err = __br_fdb_delete(br, p, addr, vid);
        } else {
                err = -ENOENT;
-               if (dev->priv_flags & IFF_EBRIDGE)
-                       err = __br_fdb_delete_by_addr(br, addr, 0);
-               else
-                       err &= __br_fdb_delete(p, addr, 0);
-
+               err &= __br_fdb_delete(br, p, addr, 0);
                if (!vg || !vg->num_vlans)
-                       goto out;
+                       return err;
 
                list_for_each_entry(v, &vg->vlan_list, vlist) {
                        if (!br_vlan_should_use(v))
                                continue;
-                       if (dev->priv_flags & IFF_EBRIDGE)
-                               err = __br_fdb_delete_by_addr(br, addr, v->vid);
-                       else
-                               err &= __br_fdb_delete(p, addr, v->vid);
+                       err &= __br_fdb_delete(br, p, addr, v->vid);
                }
        }
-out:
+
        return err;
 }
 
@@ -1107,7 +1074,7 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
        spin_lock_bh(&br->hash_lock);
 
        head = &br->hash[br_mac_hash(addr, vid)];
-       fdb = fdb_find(head, addr, vid);
+       fdb = br_fdb_find(br, addr, vid);
        if (!fdb) {
                fdb = fdb_create(head, p, addr, vid, 0, 0);
                if (!fdb) {
@@ -1135,15 +1102,13 @@ err_unlock:
 int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
                              const unsigned char *addr, u16 vid)
 {
-       struct hlist_head *head;
        struct net_bridge_fdb_entry *fdb;
        int err = 0;
 
        ASSERT_RTNL();
        spin_lock_bh(&br->hash_lock);
 
-       head = &br->hash[br_mac_hash(addr, vid)];
-       fdb = fdb_find(head, addr, vid);
+       fdb = br_fdb_find(br, addr, vid);
        if (fdb && fdb->added_by_external_learn)
                fdb_delete(br, fdb);
        else
index 7cb41aee4c82e63b4f278e64564c3ab7e8db3e10..6bfac29318f21e2d2a177fb6739d5e5a32b358b9 100644 (file)
@@ -80,7 +80,7 @@ static void __br_forward(const struct net_bridge_port *to,
        int br_hook;
 
        vg = nbp_vlan_group_rcu(to);
-       skb = br_handle_vlan(to->br, vg, skb);
+       skb = br_handle_vlan(to->br, to, vg, skb);
        if (!skb)
                return;
 
@@ -220,6 +220,31 @@ out:
 }
 
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
+                              const unsigned char *addr, bool local_orig)
+{
+       struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
+       const unsigned char *src = eth_hdr(skb)->h_source;
+
+       if (!should_deliver(p, skb))
+               return;
+
+       /* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */
+       if (skb->dev == p->dev && ether_addr_equal(src, addr))
+               return;
+
+       skb = skb_copy(skb, GFP_ATOMIC);
+       if (!skb) {
+               dev->stats.tx_dropped++;
+               return;
+       }
+
+       if (!is_broadcast_ether_addr(addr))
+               memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
+
+       __br_forward(p, skb, local_orig);
+}
+
 /* called with rcu_read_lock */
 void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
                        struct sk_buff *skb,
@@ -241,10 +266,20 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
                rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
                             NULL;
 
-               port = (unsigned long)lport > (unsigned long)rport ?
-                      lport : rport;
+               if ((unsigned long)lport > (unsigned long)rport) {
+                       port = lport;
+
+                       if (port->flags & BR_MULTICAST_TO_UNICAST) {
+                               maybe_deliver_addr(lport, skb, p->eth_addr,
+                                                  local_orig);
+                               goto delivered;
+                       }
+               } else {
+                       port = rport;
+               }
 
                prev = maybe_deliver(prev, port, skb, local_orig);
+delivered:
                if (IS_ERR(prev))
                        goto out;
                if (prev == port)
index ed0dd334008439b2283b9247fbda86b5bf6b64c4..8ac1770aa222f21f89027d303a218c49be9dc650 100644 (file)
@@ -313,7 +313,7 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
 
        br_vlan_flush(br);
        br_multicast_dev_del(br);
-       del_timer_sync(&br->gc_timer);
+       cancel_delayed_work_sync(&br->gc_work);
 
        br_sysfs_delbr(br->dev);
        unregister_netdevice_queue(br->dev, head);
index 855b72fbe1da405ba37deca3f340a5b2e02f8dda..236f34244dbe1f2cd2bdfaf9d4eceb0765276882 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/export.h>
 #include <linux/rculist.h>
 #include "br_private.h"
+#include "br_private_tunnel.h"
 
 /* Hook for brouter */
 br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
@@ -57,7 +58,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
 
        indev = skb->dev;
        skb->dev = brdev;
-       skb = br_handle_vlan(br, vg, skb);
+       skb = br_handle_vlan(br, NULL, vg, skb);
        if (!skb)
                return NET_RX_DROP;
        /* update the multicast stats if the packet is IGMP/MLD */
@@ -113,7 +114,7 @@ static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br,
                        return;
                }
 
-               f = __br_fdb_get(br, n->ha, vid);
+               f = br_fdb_find_rcu(br, n->ha, vid);
                if (f && ((p->flags & BR_PROXYARP) ||
                          (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)))) {
                        arp_send(ARPOP_REPLY, ETH_P_ARP, sip, skb->dev, tip,
@@ -188,16 +189,19 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
                }
                break;
        case BR_PKT_UNICAST:
-               dst = __br_fdb_get(br, dest, vid);
+               dst = br_fdb_find_rcu(br, dest, vid);
        default:
                break;
        }
 
        if (dst) {
+               unsigned long now = jiffies;
+
                if (dst->is_local)
                        return br_pass_frame_up(skb);
 
-               dst->used = jiffies;
+               if (now != dst->used)
+                       dst->used = now;
                br_forward(dst->dst, skb, local_rcv, false);
        } else {
                if (!mcast_hit)
@@ -261,6 +265,11 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
                return RX_HANDLER_CONSUMED;
 
        p = br_port_get_rcu(skb->dev);
+       if (p->flags & BR_VLAN_TUNNEL) {
+               if (br_handle_ingress_vlan_tunnel(skb, p,
+                                                 nbp_vlan_group_rcu(p)))
+                       goto drop;
+       }
 
        if (unlikely(is_link_local_ether_addr(dest))) {
                u16 fwd_mask = p->br->group_fwd_mask_required;
index da8157c57eb15d83471862260bbc364a76a537a0..7970f8540cbbc0036b9e18c77a4feb0ec3b34264 100644 (file)
@@ -149,7 +149,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
                b.hello_timer_value = br_timer_value(&br->hello_timer);
                b.tcn_timer_value = br_timer_value(&br->tcn_timer);
                b.topology_change_timer_value = br_timer_value(&br->topology_change_timer);
-               b.gc_timer_value = br_timer_value(&br->gc_timer);
+               b.gc_timer_value = br_timer_value(&br->gc_work.timer);
                rcu_read_unlock();
 
                if (copy_to_user((void __user *)args[1], &b, sizeof(b)))
index 7dbc80d01eb00ab69fc06a0d613167651a9b15f2..056e6ac49d8fc7727fce8fc8320b8db4ac5351d2 100644 (file)
@@ -531,7 +531,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
                        break;
        }
 
-       p = br_multicast_new_port_group(port, group, *pp, state);
+       p = br_multicast_new_port_group(port, group, *pp, state, NULL);
        if (unlikely(!p))
                return -ENOMEM;
        rcu_assign_pointer(*pp, p);
index b30e77e8427c54bffc6801e2f1139139807c439f..b760f2620abf320307a65c3f5baf86ff91221545 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/inetdevice.h>
 #include <linux/mroute.h>
 #include <net/ip.h>
+#include <net/switchdev.h>
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
 #include <net/mld.h>
@@ -43,12 +44,15 @@ static void br_multicast_add_router(struct net_bridge *br,
 static void br_ip4_multicast_leave_group(struct net_bridge *br,
                                         struct net_bridge_port *port,
                                         __be32 group,
-                                        __u16 vid);
+                                        __u16 vid,
+                                        const unsigned char *src);
+
+static void __del_port_router(struct net_bridge_port *p);
 #if IS_ENABLED(CONFIG_IPV6)
 static void br_ip6_multicast_leave_group(struct net_bridge *br,
                                         struct net_bridge_port *port,
                                         const struct in6_addr *group,
-                                        __u16 vid);
+                                        __u16 vid, const unsigned char *src);
 #endif
 unsigned int br_mdb_rehash_seq;
 
@@ -540,7 +544,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
                break;
        case 2:
                mld2q = (struct mld2_query *)icmp6_hdr(skb);
-               mld2q->mld2q_mrc = ntohs((u16)jiffies_to_msecs(interval));
+               mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
                mld2q->mld2q_type = ICMPV6_MGM_QUERY;
                mld2q->mld2q_code = 0;
                mld2q->mld2q_cksum = 0;
@@ -711,7 +715,8 @@ struct net_bridge_port_group *br_multicast_new_port_group(
                        struct net_bridge_port *port,
                        struct br_ip *group,
                        struct net_bridge_port_group __rcu *next,
-                       unsigned char flags)
+                       unsigned char flags,
+                       const unsigned char *src)
 {
        struct net_bridge_port_group *p;
 
@@ -726,12 +731,32 @@ struct net_bridge_port_group *br_multicast_new_port_group(
        hlist_add_head(&p->mglist, &port->mglist);
        setup_timer(&p->timer, br_multicast_port_group_expired,
                    (unsigned long)p);
+
+       if (src)
+               memcpy(p->eth_addr, src, ETH_ALEN);
+       else
+               memset(p->eth_addr, 0xff, ETH_ALEN);
+
        return p;
 }
 
+static bool br_port_group_equal(struct net_bridge_port_group *p,
+                               struct net_bridge_port *port,
+                               const unsigned char *src)
+{
+       if (p->port != port)
+               return false;
+
+       if (!(port->flags & BR_MULTICAST_TO_UNICAST))
+               return true;
+
+       return ether_addr_equal(src, p->eth_addr);
+}
+
 static int br_multicast_add_group(struct net_bridge *br,
                                  struct net_bridge_port *port,
-                                 struct br_ip *group)
+                                 struct br_ip *group,
+                                 const unsigned char *src)
 {
        struct net_bridge_port_group __rcu **pp;
        struct net_bridge_port_group *p;
@@ -758,13 +783,13 @@ static int br_multicast_add_group(struct net_bridge *br,
        for (pp = &mp->ports;
             (p = mlock_dereference(*pp, br)) != NULL;
             pp = &p->next) {
-               if (p->port == port)
+               if (br_port_group_equal(p, port, src))
                        goto found;
                if ((unsigned long)p->port < (unsigned long)port)
                        break;
        }
 
-       p = br_multicast_new_port_group(port, group, *pp, 0);
+       p = br_multicast_new_port_group(port, group, *pp, 0, src);
        if (unlikely(!p))
                goto err;
        rcu_assign_pointer(*pp, p);
@@ -783,7 +808,8 @@ err:
 static int br_ip4_multicast_add_group(struct net_bridge *br,
                                      struct net_bridge_port *port,
                                      __be32 group,
-                                     __u16 vid)
+                                     __u16 vid,
+                                     const unsigned char *src)
 {
        struct br_ip br_group;
 
@@ -794,14 +820,15 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
        br_group.proto = htons(ETH_P_IP);
        br_group.vid = vid;
 
-       return br_multicast_add_group(br, port, &br_group);
+       return br_multicast_add_group(br, port, &br_group, src);
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
 static int br_ip6_multicast_add_group(struct net_bridge *br,
                                      struct net_bridge_port *port,
                                      const struct in6_addr *group,
-                                     __u16 vid)
+                                     __u16 vid,
+                                     const unsigned char *src)
 {
        struct br_ip br_group;
 
@@ -812,7 +839,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
        br_group.proto = htons(ETH_P_IPV6);
        br_group.vid = vid;
 
-       return br_multicast_add_group(br, port, &br_group);
+       return br_multicast_add_group(br, port, &br_group, src);
 }
 #endif
 
@@ -824,16 +851,10 @@ static void br_multicast_router_expired(unsigned long data)
        spin_lock(&br->multicast_lock);
        if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
            port->multicast_router == MDB_RTR_TYPE_PERM ||
-           timer_pending(&port->multicast_router_timer) ||
-           hlist_unhashed(&port->rlist))
+           timer_pending(&port->multicast_router_timer))
                goto out;
 
-       hlist_del_init_rcu(&port->rlist);
-       br_rtr_notify(br->dev, port, RTM_DELMDB);
-       /* Don't allow timer refresh if the router expired */
-       if (port->multicast_router == MDB_RTR_TYPE_TEMP)
-               port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
-
+       __del_port_router(port);
 out:
        spin_unlock(&br->multicast_lock);
 }
@@ -982,6 +1003,18 @@ static void br_ip6_multicast_port_query_expired(unsigned long data)
 }
 #endif
 
+static void br_mc_disabled_update(struct net_device *dev, bool value)
+{
+       struct switchdev_attr attr = {
+               .orig_dev = dev,
+               .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
+               .flags = SWITCHDEV_F_DEFER,
+               .u.mc_disabled = value,
+       };
+
+       switchdev_port_attr_set(dev, &attr);
+}
+
 int br_multicast_add_port(struct net_bridge_port *port)
 {
        port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
@@ -994,6 +1027,8 @@ int br_multicast_add_port(struct net_bridge_port *port)
        setup_timer(&port->ip6_own_query.timer,
                    br_ip6_multicast_port_query_expired, (unsigned long)port);
 #endif
+       br_mc_disabled_update(port->dev, port->br->multicast_disabled);
+
        port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
        if (!port->mcast_stats)
                return -ENOMEM;
@@ -1061,13 +1096,8 @@ void br_multicast_disable_port(struct net_bridge_port *port)
                if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
                        br_multicast_del_pg(br, pg);
 
-       if (!hlist_unhashed(&port->rlist)) {
-               hlist_del_init_rcu(&port->rlist);
-               br_rtr_notify(br->dev, port, RTM_DELMDB);
-               /* Don't allow timer refresh if disabling */
-               if (port->multicast_router == MDB_RTR_TYPE_TEMP)
-                       port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
-       }
+       __del_port_router(port);
+
        del_timer(&port->multicast_router_timer);
        del_timer(&port->ip4_own_query.timer);
 #if IS_ENABLED(CONFIG_IPV6)
@@ -1081,6 +1111,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
                                         struct sk_buff *skb,
                                         u16 vid)
 {
+       const unsigned char *src;
        struct igmpv3_report *ih;
        struct igmpv3_grec *grec;
        int i;
@@ -1121,12 +1152,14 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
                        continue;
                }
 
+               src = eth_hdr(skb)->h_source;
                if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
                     type == IGMPV3_MODE_IS_INCLUDE) &&
                    ntohs(grec->grec_nsrcs) == 0) {
-                       br_ip4_multicast_leave_group(br, port, group, vid);
+                       br_ip4_multicast_leave_group(br, port, group, vid, src);
                } else {
-                       err = br_ip4_multicast_add_group(br, port, group, vid);
+                       err = br_ip4_multicast_add_group(br, port, group, vid,
+                                                        src);
                        if (err)
                                break;
                }
@@ -1141,6 +1174,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
                                        struct sk_buff *skb,
                                        u16 vid)
 {
+       const unsigned char *src;
        struct icmp6hdr *icmp6h;
        struct mld2_grec *grec;
        int i;
@@ -1188,14 +1222,16 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
                        continue;
                }
 
+               src = eth_hdr(skb)->h_source;
                if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
                     grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
                    ntohs(*nsrcs) == 0) {
                        br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
-                                                    vid);
+                                                    vid, src);
                } else {
                        err = br_ip6_multicast_add_group(br, port,
-                                                        &grec->grec_mca, vid);
+                                                        &grec->grec_mca, vid,
+                                                        src);
                        if (err)
                                break;
                }
@@ -1281,6 +1317,19 @@ br_multicast_update_query_timer(struct net_bridge *br,
        mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
 }
 
+static void br_port_mc_router_state_change(struct net_bridge_port *p,
+                                          bool is_mc_router)
+{
+       struct switchdev_attr attr = {
+               .orig_dev = p->dev,
+               .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
+               .flags = SWITCHDEV_F_DEFER,
+               .u.mrouter = is_mc_router,
+       };
+
+       switchdev_port_attr_set(p->dev, &attr);
+}
+
 /*
  * Add port to router_list
  *  list is maintained ordered by pointer value
@@ -1306,6 +1355,7 @@ static void br_multicast_add_router(struct net_bridge *br,
        else
                hlist_add_head_rcu(&port->rlist, &br->router_list);
        br_rtr_notify(br->dev, port, RTM_NEWMDB);
+       br_port_mc_router_state_change(port, true);
 }
 
 static void br_multicast_mark_router(struct net_bridge *br,
@@ -1511,7 +1561,8 @@ br_multicast_leave_group(struct net_bridge *br,
                         struct net_bridge_port *port,
                         struct br_ip *group,
                         struct bridge_mcast_other_query *other_query,
-                        struct bridge_mcast_own_query *own_query)
+                        struct bridge_mcast_own_query *own_query,
+                        const unsigned char *src)
 {
        struct net_bridge_mdb_htable *mdb;
        struct net_bridge_mdb_entry *mp;
@@ -1535,7 +1586,7 @@ br_multicast_leave_group(struct net_bridge *br,
                for (pp = &mp->ports;
                     (p = mlock_dereference(*pp, br)) != NULL;
                     pp = &p->next) {
-                       if (p->port != port)
+                       if (!br_port_group_equal(p, port, src))
                                continue;
 
                        rcu_assign_pointer(*pp, p->next);
@@ -1566,7 +1617,7 @@ br_multicast_leave_group(struct net_bridge *br,
                for (p = mlock_dereference(mp->ports, br);
                     p != NULL;
                     p = mlock_dereference(p->next, br)) {
-                       if (p->port != port)
+                       if (!br_port_group_equal(p, port, src))
                                continue;
 
                        if (!hlist_unhashed(&p->mglist) &&
@@ -1617,7 +1668,8 @@ out:
 static void br_ip4_multicast_leave_group(struct net_bridge *br,
                                         struct net_bridge_port *port,
                                         __be32 group,
-                                        __u16 vid)
+                                        __u16 vid,
+                                        const unsigned char *src)
 {
        struct br_ip br_group;
        struct bridge_mcast_own_query *own_query;
@@ -1632,14 +1684,15 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
        br_group.vid = vid;
 
        br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
-                                own_query);
+                                own_query, src);
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
 static void br_ip6_multicast_leave_group(struct net_bridge *br,
                                         struct net_bridge_port *port,
                                         const struct in6_addr *group,
-                                        __u16 vid)
+                                        __u16 vid,
+                                        const unsigned char *src)
 {
        struct br_ip br_group;
        struct bridge_mcast_own_query *own_query;
@@ -1654,7 +1707,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
        br_group.vid = vid;
 
        br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
-                                own_query);
+                                own_query, src);
 }
 #endif
 
@@ -1712,6 +1765,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
                                 u16 vid)
 {
        struct sk_buff *skb_trimmed = NULL;
+       const unsigned char *src;
        struct igmphdr *ih;
        int err;
 
@@ -1731,13 +1785,14 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
        }
 
        ih = igmp_hdr(skb);
+       src = eth_hdr(skb)->h_source;
        BR_INPUT_SKB_CB(skb)->igmp = ih->type;
 
        switch (ih->type) {
        case IGMP_HOST_MEMBERSHIP_REPORT:
        case IGMPV2_HOST_MEMBERSHIP_REPORT:
                BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
-               err = br_ip4_multicast_add_group(br, port, ih->group, vid);
+               err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
                break;
        case IGMPV3_HOST_MEMBERSHIP_REPORT:
                err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
@@ -1746,7 +1801,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
                err = br_ip4_multicast_query(br, port, skb_trimmed, vid);
                break;
        case IGMP_HOST_LEAVE_MESSAGE:
-               br_ip4_multicast_leave_group(br, port, ih->group, vid);
+               br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
                break;
        }
 
@@ -1766,6 +1821,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
                                 u16 vid)
 {
        struct sk_buff *skb_trimmed = NULL;
+       const unsigned char *src;
        struct mld_msg *mld;
        int err;
 
@@ -1785,8 +1841,10 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
 
        switch (mld->mld_type) {
        case ICMPV6_MGM_REPORT:
+               src = eth_hdr(skb)->h_source;
                BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
-               err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
+               err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
+                                                src);
                break;
        case ICMPV6_MLD2_REPORT:
                err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
@@ -1795,7 +1853,8 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
                err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
                break;
        case ICMPV6_MGM_REDUCTION:
-               br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
+               src = eth_hdr(skb)->h_source;
+               br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
                break;
        }
 
@@ -2004,6 +2063,11 @@ static void __del_port_router(struct net_bridge_port *p)
                return;
        hlist_del_init_rcu(&p->rlist);
        br_rtr_notify(p->br->dev, p, RTM_DELMDB);
+       br_port_mc_router_state_change(p, false);
+
+       /* don't allow timer refresh */
+       if (p->multicast_router == MDB_RTR_TYPE_TEMP)
+               p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
 }
 
 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
@@ -2081,6 +2145,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
        if (br->multicast_disabled == !val)
                goto unlock;
 
+       br_mc_disabled_update(br->dev, !val);
        br->multicast_disabled = !val;
        if (br->multicast_disabled)
                goto unlock;
index 7109b389ea585ab4b3a52bac803ae1fd860c2f65..a8f6acd23e309dcf51e6825076d9b3ba00996a9c 100644 (file)
@@ -20,6 +20,7 @@
 
 #include "br_private.h"
 #include "br_private_stp.h"
+#include "br_private_tunnel.h"
 
 static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
                                u32 filter_mask)
@@ -95,9 +96,10 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
                                           u32 filter_mask)
 {
        struct net_bridge_vlan_group *vg = NULL;
-       struct net_bridge_port *p;
+       struct net_bridge_port *p = NULL;
        struct net_bridge *br;
        int num_vlan_infos;
+       size_t vinfo_sz = 0;
 
        rcu_read_lock();
        if (br_port_exists(dev)) {
@@ -110,8 +112,13 @@ static size_t br_get_link_af_size_filtered(const struct net_device *dev,
        num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
        rcu_read_unlock();
 
+       if (p && (p->flags & BR_VLAN_TUNNEL))
+               vinfo_sz += br_get_vlan_tunnel_info_size(vg);
+
        /* Each VLAN is returned in bridge_vlan_info along with flags */
-       return num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
+       vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
+
+       return vinfo_sz;
 }
 
 static inline size_t br_port_info_size(void)
@@ -123,10 +130,12 @@ static inline size_t br_port_info_size(void)
                + nla_total_size(1)     /* IFLA_BRPORT_GUARD */
                + nla_total_size(1)     /* IFLA_BRPORT_PROTECT */
                + nla_total_size(1)     /* IFLA_BRPORT_FAST_LEAVE */
+               + nla_total_size(1)     /* IFLA_BRPORT_MCAST_TO_UCAST */
                + nla_total_size(1)     /* IFLA_BRPORT_LEARNING */
                + nla_total_size(1)     /* IFLA_BRPORT_UNICAST_FLOOD */
                + nla_total_size(1)     /* IFLA_BRPORT_PROXYARP */
                + nla_total_size(1)     /* IFLA_BRPORT_PROXYARP_WIFI */
+               + nla_total_size(1)     /* IFLA_BRPORT_VLAN_TUNNEL */
                + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */
                + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */
                + nla_total_size(sizeof(u16))   /* IFLA_BRPORT_DESIGNATED_PORT */
@@ -173,6 +182,8 @@ static int br_port_fill_attrs(struct sk_buff *skb,
                       !!(p->flags & BR_ROOT_BLOCK)) ||
            nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
                       !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
+           nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
+                      !!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
            nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
            nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
                       !!(p->flags & BR_FLOOD)) ||
@@ -191,7 +202,9 @@ static int br_port_fill_attrs(struct sk_buff *skb,
            nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
            nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
                       p->topology_change_ack) ||
-           nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending))
+           nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
+           nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
+                                                       BR_VLAN_TUNNEL)))
                return -EMSGSIZE;
 
        timerval = br_timer_value(&p->message_age_timer);
@@ -414,6 +427,9 @@ static int br_fill_ifinfo(struct sk_buff *skb,
                        err = br_fill_ifvlaninfo_compressed(skb, vg);
                else
                        err = br_fill_ifvlaninfo(skb, vg);
+
+               if (port && (port->flags & BR_VLAN_TUNNEL))
+                       err = br_fill_vlan_tunnel_info(skb, vg);
                rcu_read_unlock();
                if (err)
                        goto nla_put_failure;
@@ -514,60 +530,88 @@ static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
        return err;
 }
 
+static int br_process_vlan_info(struct net_bridge *br,
+                               struct net_bridge_port *p, int cmd,
+                               struct bridge_vlan_info *vinfo_curr,
+                               struct bridge_vlan_info **vinfo_last)
+{
+       if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK)
+               return -EINVAL;
+
+       if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
+               /* check if we are already processing a range */
+               if (*vinfo_last)
+                       return -EINVAL;
+               *vinfo_last = vinfo_curr;
+               /* don't allow range of pvids */
+               if ((*vinfo_last)->flags & BRIDGE_VLAN_INFO_PVID)
+                       return -EINVAL;
+               return 0;
+       }
+
+       if (*vinfo_last) {
+               struct bridge_vlan_info tmp_vinfo;
+               int v, err;
+
+               if (!(vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END))
+                       return -EINVAL;
+
+               if (vinfo_curr->vid <= (*vinfo_last)->vid)
+                       return -EINVAL;
+
+               memcpy(&tmp_vinfo, *vinfo_last,
+                      sizeof(struct bridge_vlan_info));
+               for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) {
+                       tmp_vinfo.vid = v;
+                       err = br_vlan_info(br, p, cmd, &tmp_vinfo);
+                       if (err)
+                               break;
+               }
+               *vinfo_last = NULL;
+
+               return 0;
+       }
+
+       return br_vlan_info(br, p, cmd, vinfo_curr);
+}
+
 static int br_afspec(struct net_bridge *br,
                     struct net_bridge_port *p,
                     struct nlattr *af_spec,
                     int cmd)
 {
-       struct bridge_vlan_info *vinfo_start = NULL;
-       struct bridge_vlan_info *vinfo = NULL;
+       struct bridge_vlan_info *vinfo_curr = NULL;
+       struct bridge_vlan_info *vinfo_last = NULL;
        struct nlattr *attr;
-       int err = 0;
-       int rem;
+       struct vtunnel_info tinfo_last = {};
+       struct vtunnel_info tinfo_curr = {};
+       int err = 0, rem;
 
        nla_for_each_nested(attr, af_spec, rem) {
-               if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
-                       continue;
-               if (nla_len(attr) != sizeof(struct bridge_vlan_info))
-                       return -EINVAL;
-               vinfo = nla_data(attr);
-               if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
-                       return -EINVAL;
-               if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
-                       if (vinfo_start)
-                               return -EINVAL;
-                       vinfo_start = vinfo;
-                       /* don't allow range of pvids */
-                       if (vinfo_start->flags & BRIDGE_VLAN_INFO_PVID)
-                               return -EINVAL;
-                       continue;
-               }
-
-               if (vinfo_start) {
-                       struct bridge_vlan_info tmp_vinfo;
-                       int v;
-
-                       if (!(vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END))
+               err = 0;
+               switch (nla_type(attr)) {
+               case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
+                       if (!(p->flags & BR_VLAN_TUNNEL))
                                return -EINVAL;
-
-                       if (vinfo->vid <= vinfo_start->vid)
+                       err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
+                       if (err)
+                               return err;
+                       err = br_process_vlan_tunnel_info(br, p, cmd,
+                                                         &tinfo_curr,
+                                                         &tinfo_last);
+                       if (err)
+                               return err;
+                       break;
+               case IFLA_BRIDGE_VLAN_INFO:
+                       if (nla_len(attr) != sizeof(struct bridge_vlan_info))
                                return -EINVAL;
-
-                       memcpy(&tmp_vinfo, vinfo_start,
-                              sizeof(struct bridge_vlan_info));
-
-                       for (v = vinfo_start->vid; v <= vinfo->vid; v++) {
-                               tmp_vinfo.vid = v;
-                               err = br_vlan_info(br, p, cmd, &tmp_vinfo);
-                               if (err)
-                                       break;
-                       }
-                       vinfo_start = NULL;
-               } else {
-                       err = br_vlan_info(br, p, cmd, vinfo);
-               }
-               if (err)
+                       vinfo_curr = nla_data(attr);
+                       err = br_process_vlan_info(br, p, cmd, vinfo_curr,
+                                                  &vinfo_last);
+                       if (err)
+                               return err;
                        break;
+               }
        }
 
        return err;
@@ -586,6 +630,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
        [IFLA_BRPORT_PROXYARP]  = { .type = NLA_U8 },
        [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
        [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
+       [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
 };
 
 /* Change the state of the port and notify spanning tree */
@@ -626,8 +671,9 @@ static void br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
 /* Process bridge protocol info on port */
 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
 {
-       int err;
        unsigned long old_flags = p->flags;
+       bool br_vlan_tunnel_old = false;
+       int err;
 
        br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
        br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
@@ -636,9 +682,15 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
        br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
        br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
        br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
+       br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
        br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
        br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
 
+       br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false;
+       br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
+       if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
+               nbp_vlan_tunnel_info_flush(p);
+
        if (tb[IFLA_BRPORT_COST]) {
                err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
                if (err)
@@ -1195,7 +1247,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
        if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
                              IFLA_BR_PAD))
                return -EMSGSIZE;
-       clockval = br_timer_value(&br->gc_timer);
+       clockval = br_timer_value(&br->gc_work.timer);
        if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
                return -EMSGSIZE;
 
diff --git a/net/bridge/br_netlink_tunnel.c b/net/bridge/br_netlink_tunnel.c
new file mode 100644 (file)
index 0000000..c913491
--- /dev/null
@@ -0,0 +1,294 @@
+/*
+ *     Bridge per vlan tunnel port dst_metadata netlink control interface
+ *
+ *     Authors:
+ *     Roopa Prabhu            <roopa@cumulusnetworks.com>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+#include <net/rtnetlink.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <uapi/linux/if_bridge.h>
+#include <net/dst_metadata.h>
+
+#include "br_private.h"
+#include "br_private_tunnel.h"
+
+static size_t __get_vlan_tinfo_size(void)
+{
+       return nla_total_size(0) + /* nest IFLA_BRIDGE_VLAN_TUNNEL_INFO */
+                 nla_total_size(sizeof(u32)) + /* IFLA_BRIDGE_VLAN_TUNNEL_ID */
+                 nla_total_size(sizeof(u16)) + /* IFLA_BRIDGE_VLAN_TUNNEL_VID */
+                 nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_VLAN_TUNNEL_FLAGS */
+}
+
+static bool vlan_tunid_inrange(struct net_bridge_vlan *v_curr,
+                              struct net_bridge_vlan *v_last)
+{
+       __be32 tunid_curr = tunnel_id_to_key32(v_curr->tinfo.tunnel_id);
+       __be32 tunid_last = tunnel_id_to_key32(v_last->tinfo.tunnel_id);
+
+       return (be32_to_cpu(tunid_curr) - be32_to_cpu(tunid_last)) == 1;
+}
+
+static int __get_num_vlan_tunnel_infos(struct net_bridge_vlan_group *vg)
+{
+       struct net_bridge_vlan *v, *vtbegin = NULL, *vtend = NULL;
+       int num_tinfos = 0;
+
+       /* Count number of vlan infos */
+       list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
+               /* only a context, bridge vlan not activated */
+               if (!br_vlan_should_use(v) || !v->tinfo.tunnel_id)
+                       continue;
+
+               if (!vtbegin) {
+                       goto initvars;
+               } else if ((v->vid - vtend->vid) == 1 &&
+                          vlan_tunid_inrange(v, vtend)) {
+                       vtend = v;
+                       continue;
+               } else {
+                       if ((vtend->vid - vtbegin->vid) > 0)
+                               num_tinfos += 2;
+                       else
+                               num_tinfos += 1;
+               }
+initvars:
+               vtbegin = v;
+               vtend = v;
+       }
+
+       if (vtbegin && vtend) {
+               if ((vtend->vid - vtbegin->vid) > 0)
+                       num_tinfos += 2;
+               else
+                       num_tinfos += 1;
+       }
+
+       return num_tinfos;
+}
+
+int br_get_vlan_tunnel_info_size(struct net_bridge_vlan_group *vg)
+{
+       int num_tinfos;
+
+       if (!vg)
+               return 0;
+
+       rcu_read_lock();
+       num_tinfos = __get_num_vlan_tunnel_infos(vg);
+       rcu_read_unlock();
+
+       return num_tinfos * __get_vlan_tinfo_size();
+}
+
+static int br_fill_vlan_tinfo(struct sk_buff *skb, u16 vid,
+                             __be64 tunnel_id, u16 flags)
+{
+       __be32 tid = tunnel_id_to_key32(tunnel_id);
+       struct nlattr *tmap;
+
+       tmap = nla_nest_start(skb, IFLA_BRIDGE_VLAN_TUNNEL_INFO);
+       if (!tmap)
+               return -EMSGSIZE;
+       if (nla_put_u32(skb, IFLA_BRIDGE_VLAN_TUNNEL_ID,
+                       be32_to_cpu(tid)))
+               goto nla_put_failure;
+       if (nla_put_u16(skb, IFLA_BRIDGE_VLAN_TUNNEL_VID,
+                       vid))
+               goto nla_put_failure;
+       if (nla_put_u16(skb, IFLA_BRIDGE_VLAN_TUNNEL_FLAGS,
+                       flags))
+               goto nla_put_failure;
+       nla_nest_end(skb, tmap);
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, tmap);
+
+       return -EMSGSIZE;
+}
+
+static int br_fill_vlan_tinfo_range(struct sk_buff *skb,
+                                   struct net_bridge_vlan *vtbegin,
+                                   struct net_bridge_vlan *vtend)
+{
+       int err;
+
+       if (vtend && (vtend->vid - vtbegin->vid) > 0) {
+               /* add range to skb */
+               err = br_fill_vlan_tinfo(skb, vtbegin->vid,
+                                        vtbegin->tinfo.tunnel_id,
+                                        BRIDGE_VLAN_INFO_RANGE_BEGIN);
+               if (err)
+                       return err;
+
+               err = br_fill_vlan_tinfo(skb, vtend->vid,
+                                        vtend->tinfo.tunnel_id,
+                                        BRIDGE_VLAN_INFO_RANGE_END);
+               if (err)
+                       return err;
+       } else {
+               err = br_fill_vlan_tinfo(skb, vtbegin->vid,
+                                        vtbegin->tinfo.tunnel_id,
+                                        0);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+int br_fill_vlan_tunnel_info(struct sk_buff *skb,
+                            struct net_bridge_vlan_group *vg)
+{
+       struct net_bridge_vlan *vtbegin = NULL;
+       struct net_bridge_vlan *vtend = NULL;
+       struct net_bridge_vlan *v;
+       int err;
+
+       /* Count number of vlan infos */
+       list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
+               /* only a context, bridge vlan not activated */
+               if (!br_vlan_should_use(v))
+                       continue;
+
+               if (!v->tinfo.tunnel_dst)
+                       continue;
+
+               if (!vtbegin) {
+                       goto initvars;
+               } else if ((v->vid - vtend->vid) == 1 &&
+                           vlan_tunid_inrange(v, vtend)) {
+                       vtend = v;
+                       continue;
+               } else {
+                       err = br_fill_vlan_tinfo_range(skb, vtbegin, vtend);
+                       if (err)
+                               return err;
+               }
+initvars:
+               vtbegin = v;
+               vtend = v;
+       }
+
+       if (vtbegin) {
+               err = br_fill_vlan_tinfo_range(skb, vtbegin, vtend);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static const struct nla_policy vlan_tunnel_policy[IFLA_BRIDGE_VLAN_TUNNEL_MAX + 1] = {
+       [IFLA_BRIDGE_VLAN_TUNNEL_ID] = { .type = NLA_U32 },
+       [IFLA_BRIDGE_VLAN_TUNNEL_VID] = { .type = NLA_U16 },
+       [IFLA_BRIDGE_VLAN_TUNNEL_FLAGS] = { .type = NLA_U16 },
+};
+
+static int br_vlan_tunnel_info(struct net_bridge_port *p, int cmd,
+                              u16 vid, u32 tun_id)
+{
+       int err = 0;
+
+       if (!p)
+               return -EINVAL;
+
+       switch (cmd) {
+       case RTM_SETLINK:
+               err = nbp_vlan_tunnel_info_add(p, vid, tun_id);
+               break;
+       case RTM_DELLINK:
+               nbp_vlan_tunnel_info_delete(p, vid);
+               break;
+       }
+
+       return err;
+}
+
+int br_parse_vlan_tunnel_info(struct nlattr *attr,
+                             struct vtunnel_info *tinfo)
+{
+       struct nlattr *tb[IFLA_BRIDGE_VLAN_TUNNEL_MAX + 1];
+       u32 tun_id;
+       u16 vid, flags = 0;
+       int err;
+
+       memset(tinfo, 0, sizeof(*tinfo));
+
+       err = nla_parse_nested(tb, IFLA_BRIDGE_VLAN_TUNNEL_MAX,
+                              attr, vlan_tunnel_policy);
+       if (err < 0)
+               return err;
+
+       if (!tb[IFLA_BRIDGE_VLAN_TUNNEL_ID] ||
+           !tb[IFLA_BRIDGE_VLAN_TUNNEL_VID])
+               return -EINVAL;
+
+       tun_id = nla_get_u32(tb[IFLA_BRIDGE_VLAN_TUNNEL_ID]);
+       vid = nla_get_u16(tb[IFLA_BRIDGE_VLAN_TUNNEL_VID]);
+       if (vid >= VLAN_VID_MASK)
+               return -ERANGE;
+
+       if (tb[IFLA_BRIDGE_VLAN_TUNNEL_FLAGS])
+               flags = nla_get_u16(tb[IFLA_BRIDGE_VLAN_TUNNEL_FLAGS]);
+
+       tinfo->tunid = tun_id;
+       tinfo->vid = vid;
+       tinfo->flags = flags;
+
+       return 0;
+}
+
+int br_process_vlan_tunnel_info(struct net_bridge *br,
+                               struct net_bridge_port *p, int cmd,
+                               struct vtunnel_info *tinfo_curr,
+                               struct vtunnel_info *tinfo_last)
+{
+       int err;
+
+       if (tinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
+               if (tinfo_last->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN)
+                       return -EINVAL;
+               memcpy(tinfo_last, tinfo_curr, sizeof(struct vtunnel_info));
+       } else if (tinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END) {
+               int t, v;
+
+               if (!(tinfo_last->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN))
+                       return -EINVAL;
+               if ((tinfo_curr->vid - tinfo_last->vid) !=
+                   (tinfo_curr->tunid - tinfo_last->tunid))
+                       return -EINVAL;
+               t = tinfo_last->tunid;
+               for (v = tinfo_last->vid; v <= tinfo_curr->vid; v++) {
+                       err = br_vlan_tunnel_info(p, cmd, v, t);
+                       if (err)
+                               return err;
+                       t++;
+               }
+               memset(tinfo_last, 0, sizeof(struct vtunnel_info));
+               memset(tinfo_curr, 0, sizeof(struct vtunnel_info));
+       } else {
+               if (tinfo_last->flags)
+                       return -EINVAL;
+               err = br_vlan_tunnel_info(p, cmd, tinfo_curr->vid,
+                                         tinfo_curr->tunid);
+               if (err)
+                       return err;
+               memset(tinfo_last, 0, sizeof(struct vtunnel_info));
+               memset(tinfo_curr, 0, sizeof(struct vtunnel_info));
+       }
+
+       return 0;
+}
index 8ce621e8345c478700ee64b76e41a98b2bd81d16..2288fca7756c5103fc4e8420ad61a2f9e633c097 100644 (file)
@@ -91,6 +91,11 @@ struct br_vlan_stats {
        struct u64_stats_sync syncp;
 };
 
+struct br_tunnel_info {
+       __be64                  tunnel_id;
+       struct metadata_dst     *tunnel_dst;
+};
+
 /**
  * struct net_bridge_vlan - per-vlan entry
  *
@@ -113,6 +118,7 @@ struct br_vlan_stats {
  */
 struct net_bridge_vlan {
        struct rhash_head               vnode;
+       struct rhash_head               tnode;
        u16                             vid;
        u16                             flags;
        struct br_vlan_stats __percpu   *stats;
@@ -124,6 +130,9 @@ struct net_bridge_vlan {
                atomic_t                refcnt;
                struct net_bridge_vlan  *brvlan;
        };
+
+       struct br_tunnel_info           tinfo;
+
        struct list_head                vlist;
 
        struct rcu_head                 rcu;
@@ -145,24 +154,27 @@ struct net_bridge_vlan {
  */
 struct net_bridge_vlan_group {
        struct rhashtable               vlan_hash;
+       struct rhashtable               tunnel_hash;
        struct list_head                vlan_list;
        u16                             num_vlans;
        u16                             pvid;
 };
 
-struct net_bridge_fdb_entry
-{
+struct net_bridge_fdb_entry {
        struct hlist_node               hlist;
        struct net_bridge_port          *dst;
 
-       unsigned long                   updated;
-       unsigned long                   used;
        mac_addr                        addr;
        __u16                           vlan_id;
        unsigned char                   is_local:1,
                                        is_static:1,
                                        added_by_user:1,
                                        added_by_external_learn:1;
+
+       /* write-heavy members should not affect lookups */
+       unsigned long                   updated ____cacheline_aligned_in_smp;
+       unsigned long                   used;
+
        struct rcu_head                 rcu;
 };
 
@@ -177,6 +189,7 @@ struct net_bridge_port_group {
        struct timer_list               timer;
        struct br_ip                    addr;
        unsigned char                   flags;
+       unsigned char                   eth_addr[ETH_ALEN];
 };
 
 struct net_bridge_mdb_entry
@@ -201,12 +214,16 @@ struct net_bridge_mdb_htable
        u32                             ver;
 };
 
-struct net_bridge_port
-{
+struct net_bridge_port {
        struct net_bridge               *br;
        struct net_device               *dev;
        struct list_head                list;
 
+       unsigned long                   flags;
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+       struct net_bridge_vlan_group    __rcu *vlgrp;
+#endif
+
        /* STP */
        u8                              priority;
        u8                              state;
@@ -227,8 +244,6 @@ struct net_bridge_port
        struct kobject                  kobj;
        struct rcu_head                 rcu;
 
-       unsigned long                   flags;
-
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
        struct bridge_mcast_own_query   ip4_own_query;
 #if IS_ENABLED(CONFIG_IPV6)
@@ -248,9 +263,6 @@ struct net_bridge_port
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll                  *np;
 #endif
-#ifdef CONFIG_BRIDGE_VLAN_FILTERING
-       struct net_bridge_vlan_group    __rcu *vlgrp;
-#endif
 #ifdef CONFIG_NET_SWITCHDEV
        int                             offload_fwd_mark;
 #endif
@@ -272,14 +284,21 @@ static inline struct net_bridge_port *br_port_get_rtnl(const struct net_device *
                rtnl_dereference(dev->rx_handler_data) : NULL;
 }
 
-struct net_bridge
-{
+struct net_bridge {
        spinlock_t                      lock;
+       spinlock_t                      hash_lock;
        struct list_head                port_list;
        struct net_device               *dev;
-
        struct pcpu_sw_netstats         __percpu *stats;
-       spinlock_t                      hash_lock;
+       /* These fields are accessed on each packet */
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+       u8                              vlan_enabled;
+       u8                              vlan_stats_enabled;
+       __be16                          vlan_proto;
+       u16                             default_pvid;
+       struct net_bridge_vlan_group    __rcu *vlgrp;
+#endif
+
        struct hlist_head               hash[BR_HASH_SIZE];
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        union {
@@ -297,6 +316,9 @@ struct net_bridge
        bridge_id                       designated_root;
        bridge_id                       bridge_id;
        u32                             root_path_cost;
+       unsigned char                   topology_change;
+       unsigned char                   topology_change_detected;
+       u16                             root_port;
        unsigned long                   max_age;
        unsigned long                   hello_time;
        unsigned long                   forward_delay;
@@ -308,7 +330,6 @@ struct net_bridge
 
        u8                              group_addr[ETH_ALEN];
        bool                            group_addr_set;
-       u16                             root_port;
 
        enum {
                BR_NO_STP,              /* no spanning tree */
@@ -316,9 +337,6 @@ struct net_bridge
                BR_USER_STP,            /* new RSTP in userspace */
        } stp_enabled;
 
-       unsigned char                   topology_change;
-       unsigned char                   topology_change_detected;
-
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
        unsigned char                   multicast_router;
 
@@ -363,21 +381,13 @@ struct net_bridge
        struct timer_list               hello_timer;
        struct timer_list               tcn_timer;
        struct timer_list               topology_change_timer;
-       struct timer_list               gc_timer;
+       struct delayed_work             gc_work;
        struct kobject                  *ifobj;
        u32                             auto_cnt;
 
 #ifdef CONFIG_NET_SWITCHDEV
        int offload_fwd_mark;
 #endif
-
-#ifdef CONFIG_BRIDGE_VLAN_FILTERING
-       struct net_bridge_vlan_group    __rcu *vlgrp;
-       u8                              vlan_enabled;
-       u8                              vlan_stats_enabled;
-       __be16                          vlan_proto;
-       u16                             default_pvid;
-#endif
 };
 
 struct br_input_skb_cb {
@@ -494,11 +504,12 @@ void br_fdb_find_delete_local(struct net_bridge *br,
                              const unsigned char *addr, u16 vid);
 void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr);
 void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
-void br_fdb_cleanup(unsigned long arg);
+void br_fdb_cleanup(struct work_struct *work);
 void br_fdb_delete_by_port(struct net_bridge *br,
                           const struct net_bridge_port *p, u16 vid, int do_all);
-struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
-                                         const unsigned char *addr, __u16 vid);
+struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
+                                            const unsigned char *addr,
+                                            __u16 vid);
 int br_fdb_test_addr(struct net_device *dev, unsigned char *addr);
 int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count,
                   unsigned long off);
@@ -520,6 +531,15 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
 int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
                              const unsigned char *addr, u16 vid);
 
+static inline bool br_hash_lock_held(struct net_bridge *br)
+{
+#ifdef CONFIG_LOCKDEP
+       return lockdep_is_held(&br->hash_lock);
+#else
+       return true;
+#endif
+}
+
 /* br_forward.c */
 enum br_pkt_type {
        BR_PKT_UNICAST,
@@ -599,7 +619,7 @@ void br_multicast_free_pg(struct rcu_head *head);
 struct net_bridge_port_group *
 br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
                            struct net_bridge_port_group __rcu *next,
-                           unsigned char flags);
+                           unsigned char flags, const unsigned char *src);
 void br_mdb_init(void);
 void br_mdb_uninit(void);
 void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
@@ -764,6 +784,7 @@ bool br_allowed_egress(struct net_bridge_vlan_group *vg,
                       const struct sk_buff *skb);
 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid);
 struct sk_buff *br_handle_vlan(struct net_bridge *br,
+                              const struct net_bridge_port *port,
                               struct net_bridge_vlan_group *vg,
                               struct sk_buff *skb);
 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
@@ -863,6 +884,7 @@ static inline bool br_should_learn(struct net_bridge_port *p,
 }
 
 static inline struct sk_buff *br_handle_vlan(struct net_bridge *br,
+                                            const struct net_bridge_port *port,
                                             struct net_bridge_vlan_group *vg,
                                             struct sk_buff *skb)
 {
diff --git a/net/bridge/br_private_tunnel.h b/net/bridge/br_private_tunnel.h
new file mode 100644 (file)
index 0000000..4a447a3
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ *     Bridge per vlan tunnels
+ *
+ *     Authors:
+ *     Roopa Prabhu            <roopa@cumulusnetworks.com>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _BR_PRIVATE_TUNNEL_H
+#define _BR_PRIVATE_TUNNEL_H
+
+struct vtunnel_info {
+       u32     tunid;
+       u16     vid;
+       u16     flags;
+};
+
+/* br_netlink_tunnel.c */
+int br_parse_vlan_tunnel_info(struct nlattr *attr,
+                             struct vtunnel_info *tinfo);
+int br_process_vlan_tunnel_info(struct net_bridge *br,
+                               struct net_bridge_port *p,
+                               int cmd,
+                               struct vtunnel_info *tinfo_curr,
+                               struct vtunnel_info *tinfo_last);
+int br_get_vlan_tunnel_info_size(struct net_bridge_vlan_group *vg);
+int br_fill_vlan_tunnel_info(struct sk_buff *skb,
+                            struct net_bridge_vlan_group *vg);
+
+#ifdef CONFIG_BRIDGE_VLAN_FILTERING
+/* br_vlan_tunnel.c */
+int vlan_tunnel_init(struct net_bridge_vlan_group *vg);
+void vlan_tunnel_deinit(struct net_bridge_vlan_group *vg);
+int nbp_vlan_tunnel_info_delete(struct net_bridge_port *port, u16 vid);
+int nbp_vlan_tunnel_info_add(struct net_bridge_port *port, u16 vid, u32 tun_id);
+void nbp_vlan_tunnel_info_flush(struct net_bridge_port *port);
+void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
+                         struct net_bridge_vlan *vlan);
+int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
+                                 struct net_bridge_port *p,
+                                 struct net_bridge_vlan_group *vg);
+int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
+                                struct net_bridge_vlan *vlan);
+#else
+static inline int vlan_tunnel_init(struct net_bridge_vlan_group *vg)
+{
+       return 0;
+}
+
+static inline int nbp_vlan_tunnel_info_delete(struct net_bridge_port *port,
+                                             u16 vid)
+{
+       return 0;
+}
+
+static inline int nbp_vlan_tunnel_info_add(struct net_bridge_port *port,
+                                          u16 vid, u32 tun_id)
+{
+       return 0;
+}
+
+static inline void nbp_vlan_tunnel_info_flush(struct net_bridge_port *port)
+{
+}
+
+static inline void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
+                                       struct net_bridge_vlan *vlan)
+{
+}
+
+static inline int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
+                                               struct net_bridge_port *p,
+                                               struct net_bridge_vlan_group *vg)
+{
+       return 0;
+}
+#endif
+
+#endif
index 71fd1a4e63cc84ec047f56adf5fe24bb971522fa..8f56c2d1f1a7081d869d82fe8c3e2607eaf53604 100644 (file)
@@ -602,7 +602,7 @@ int br_set_ageing_time(struct net_bridge *br, clock_t ageing_time)
        br->ageing_time = t;
        spin_unlock_bh(&br->lock);
 
-       mod_timer(&br->gc_timer, jiffies);
+       mod_delayed_work(system_long_wq, &br->gc_work, 0);
 
        return 0;
 }
index 6c1e214111250ea69199d40080f96af730d78934..08341d2aa9c946d7bdd6e0d599e31ba96557a290 100644 (file)
@@ -57,7 +57,7 @@ void br_stp_enable_bridge(struct net_bridge *br)
        spin_lock_bh(&br->lock);
        if (br->stp_enabled == BR_KERNEL_STP)
                mod_timer(&br->hello_timer, jiffies + br->hello_time);
-       mod_timer(&br->gc_timer, jiffies + HZ/10);
+       mod_delayed_work(system_long_wq, &br->gc_work, HZ / 10);
 
        br_config_bpdu_generation(br);
 
@@ -88,7 +88,7 @@ void br_stp_disable_bridge(struct net_bridge *br)
        del_timer_sync(&br->hello_timer);
        del_timer_sync(&br->topology_change_timer);
        del_timer_sync(&br->tcn_timer);
-       del_timer_sync(&br->gc_timer);
+       cancel_delayed_work_sync(&br->gc_work);
 }
 
 /* called under bridge lock */
index 7ddb38e0a06ea2d67f156a25eb50277a9cfc3898..c98b3e5c140a5f30a28fd748408cf5e949a032b6 100644 (file)
@@ -153,8 +153,6 @@ void br_stp_timer_init(struct net_bridge *br)
        setup_timer(&br->topology_change_timer,
                      br_topology_change_timer_expired,
                      (unsigned long) br);
-
-       setup_timer(&br->gc_timer, br_fdb_cleanup, (unsigned long) br);
 }
 
 void br_stp_port_timer_init(struct net_bridge_port *p)
index a18148213b08dc22f8d8572607ec3a5c261340d4..0f4034934d56f707363ff5906ecabffbbc8d517e 100644 (file)
@@ -263,7 +263,7 @@ static ssize_t gc_timer_show(struct device *d, struct device_attribute *attr,
                             char *buf)
 {
        struct net_bridge *br = to_bridge(d);
-       return sprintf(buf, "%ld\n", br_timer_value(&br->gc_timer));
+       return sprintf(buf, "%ld\n", br_timer_value(&br->gc_work.timer));
 }
 static DEVICE_ATTR_RO(gc_timer);
 
index 8bd569695e76fb76112bd5c17fb1a8027c6590a5..05e8946ccc03554b8c842bd742bf6bed982f7ea6 100644 (file)
@@ -188,6 +188,7 @@ static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
                   store_multicast_router);
 
 BRPORT_ATTR_FLAG(multicast_fast_leave, BR_MULTICAST_FAST_LEAVE);
+BRPORT_ATTR_FLAG(multicast_to_unicast, BR_MULTICAST_TO_UNICAST);
 #endif
 
 static const struct brport_attribute *brport_attrs[] = {
@@ -214,6 +215,7 @@ static const struct brport_attribute *brport_attrs[] = {
 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
        &brport_attr_multicast_router,
        &brport_attr_multicast_fast_leave,
+       &brport_attr_multicast_to_unicast,
 #endif
        &brport_attr_proxyarp,
        &brport_attr_proxyarp_wifi,
index b6de4f45716184d04f84886f8fdb0481c31012ad..62e68c0dc68740bc1364204902ea7e97e44a7e92 100644 (file)
@@ -5,6 +5,7 @@
 #include <net/switchdev.h>
 
 #include "br_private.h"
+#include "br_private_tunnel.h"
 
 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
                              const void *ptr)
@@ -310,6 +311,7 @@ static int __vlan_del(struct net_bridge_vlan *v)
        }
 
        if (masterv != v) {
+               vlan_tunnel_info_del(vg, v);
                rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
                                       br_vlan_rht_params);
                __vlan_del_list(v);
@@ -325,6 +327,7 @@ static void __vlan_group_free(struct net_bridge_vlan_group *vg)
 {
        WARN_ON(!list_empty(&vg->vlan_list));
        rhashtable_destroy(&vg->vlan_hash);
+       vlan_tunnel_deinit(vg);
        kfree(vg);
 }
 
@@ -338,6 +341,7 @@ static void __vlan_flush(struct net_bridge_vlan_group *vg)
 }
 
 struct sk_buff *br_handle_vlan(struct net_bridge *br,
+                              const struct net_bridge_port *p,
                               struct net_bridge_vlan_group *vg,
                               struct sk_buff *skb)
 {
@@ -378,6 +382,12 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
 
        if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
                skb->vlan_tci = 0;
+
+       if (p && (p->flags & BR_VLAN_TUNNEL) &&
+           br_handle_egress_vlan_tunnel(skb, v)) {
+               kfree_skb(skb);
+               return NULL;
+       }
 out:
        return skb;
 }
@@ -613,6 +623,8 @@ int br_vlan_delete(struct net_bridge *br, u16 vid)
        br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
        br_fdb_delete_by_port(br, NULL, vid, 0);
 
+       vlan_tunnel_info_del(vg, v);
+
        return __vlan_del(v);
 }
 
@@ -918,6 +930,9 @@ int br_vlan_init(struct net_bridge *br)
        ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
        if (ret)
                goto err_rhtbl;
+       ret = vlan_tunnel_init(vg);
+       if (ret)
+               goto err_tunnel_init;
        INIT_LIST_HEAD(&vg->vlan_list);
        br->vlan_proto = htons(ETH_P_8021Q);
        br->default_pvid = 1;
@@ -932,6 +947,8 @@ out:
        return ret;
 
 err_vlan_add:
+       vlan_tunnel_deinit(vg);
+err_tunnel_init:
        rhashtable_destroy(&vg->vlan_hash);
 err_rhtbl:
        kfree(vg);
@@ -961,6 +978,9 @@ int nbp_vlan_init(struct net_bridge_port *p)
        ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
        if (ret)
                goto err_rhtbl;
+       ret = vlan_tunnel_init(vg);
+       if (ret)
+               goto err_tunnel_init;
        INIT_LIST_HEAD(&vg->vlan_list);
        rcu_assign_pointer(p->vlgrp, vg);
        if (p->br->default_pvid) {
@@ -976,8 +996,10 @@ out:
 err_vlan_add:
        RCU_INIT_POINTER(p->vlgrp, NULL);
        synchronize_rcu();
-       rhashtable_destroy(&vg->vlan_hash);
+       vlan_tunnel_deinit(vg);
 err_vlan_enabled:
+err_tunnel_init:
+       rhashtable_destroy(&vg->vlan_hash);
 err_rhtbl:
        kfree(vg);
 
diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c
new file mode 100644 (file)
index 0000000..6d2c4ee
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+ *     Bridge per vlan tunnel port dst_metadata handling code
+ *
+ *     Authors:
+ *     Roopa Prabhu            <roopa@cumulusnetworks.com>
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <net/switchdev.h>
+#include <net/dst_metadata.h>
+
+#include "br_private.h"
+#include "br_private_tunnel.h"
+
+static inline int br_vlan_tunid_cmp(struct rhashtable_compare_arg *arg,
+                                   const void *ptr)
+{
+       const struct net_bridge_vlan *vle = ptr;
+       __be64 tunid = *(__be64 *)arg->key;
+
+       return vle->tinfo.tunnel_id != tunid;
+}
+
+static const struct rhashtable_params br_vlan_tunnel_rht_params = {
+       .head_offset = offsetof(struct net_bridge_vlan, tnode),
+       .key_offset = offsetof(struct net_bridge_vlan, tinfo.tunnel_id),
+       .key_len = sizeof(__be64),
+       .nelem_hint = 3,
+       .locks_mul = 1,
+       .obj_cmpfn = br_vlan_tunid_cmp,
+       .automatic_shrinking = true,
+};
+
+static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl,
+                                                    u64 tunnel_id)
+{
+       return rhashtable_lookup_fast(tbl, &tunnel_id,
+                                     br_vlan_tunnel_rht_params);
+}
+
+void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
+                         struct net_bridge_vlan *vlan)
+{
+       if (!vlan->tinfo.tunnel_dst)
+               return;
+       rhashtable_remove_fast(&vg->tunnel_hash, &vlan->tnode,
+                              br_vlan_tunnel_rht_params);
+       vlan->tinfo.tunnel_id = 0;
+       dst_release(&vlan->tinfo.tunnel_dst->dst);
+       vlan->tinfo.tunnel_dst = NULL;
+}
+
+static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
+                                 struct net_bridge_vlan *vlan, u32 tun_id)
+{
+       struct metadata_dst *metadata = NULL;
+       __be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id));
+       int err;
+
+       if (vlan->tinfo.tunnel_dst)
+               return -EEXIST;
+
+       metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
+                                   key, 0);
+       if (!metadata)
+               return -EINVAL;
+
+       metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE;
+       vlan->tinfo.tunnel_dst = metadata;
+       vlan->tinfo.tunnel_id = key;
+
+       err = rhashtable_lookup_insert_fast(&vg->tunnel_hash, &vlan->tnode,
+                                           br_vlan_tunnel_rht_params);
+       if (err)
+               goto out;
+
+       return 0;
+out:
+       dst_release(&vlan->tinfo.tunnel_dst->dst);
+       vlan->tinfo.tunnel_dst = NULL;
+       vlan->tinfo.tunnel_id = 0;
+
+       return err;
+}
+
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
+int nbp_vlan_tunnel_info_add(struct net_bridge_port *port, u16 vid, u32 tun_id)
+{
+       struct net_bridge_vlan_group *vg;
+       struct net_bridge_vlan *vlan;
+
+       ASSERT_RTNL();
+
+       vg = nbp_vlan_group(port);
+       vlan = br_vlan_find(vg, vid);
+       if (!vlan)
+               return -EINVAL;
+
+       return __vlan_tunnel_info_add(vg, vlan, tun_id);
+}
+
+/* Must be protected by RTNL.
+ * Must be called with vid in range from 1 to 4094 inclusive.
+ */
+int nbp_vlan_tunnel_info_delete(struct net_bridge_port *port, u16 vid)
+{
+       struct net_bridge_vlan_group *vg;
+       struct net_bridge_vlan *v;
+
+       ASSERT_RTNL();
+
+       vg = nbp_vlan_group(port);
+       v = br_vlan_find(vg, vid);
+       if (!v)
+               return -ENOENT;
+
+       vlan_tunnel_info_del(vg, v);
+
+       return 0;
+}
+
+static void __vlan_tunnel_info_flush(struct net_bridge_vlan_group *vg)
+{
+       struct net_bridge_vlan *vlan, *tmp;
+
+       list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
+               vlan_tunnel_info_del(vg, vlan);
+}
+
+void nbp_vlan_tunnel_info_flush(struct net_bridge_port *port)
+{
+       struct net_bridge_vlan_group *vg;
+
+       ASSERT_RTNL();
+
+       vg = nbp_vlan_group(port);
+       __vlan_tunnel_info_flush(vg);
+}
+
+int vlan_tunnel_init(struct net_bridge_vlan_group *vg)
+{
+       return rhashtable_init(&vg->tunnel_hash, &br_vlan_tunnel_rht_params);
+}
+
+void vlan_tunnel_deinit(struct net_bridge_vlan_group *vg)
+{
+       rhashtable_destroy(&vg->tunnel_hash);
+}
+
+int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
+                                 struct net_bridge_port *p,
+                                 struct net_bridge_vlan_group *vg)
+{
+       struct ip_tunnel_info *tinfo = skb_tunnel_info(skb);
+       struct net_bridge_vlan *vlan;
+
+       if (!vg || !tinfo)
+               return 0;
+
+       /* if already tagged, ignore */
+       if (skb_vlan_tagged(skb))
+               return 0;
+
+       /* lookup vid, given tunnel id */
+       vlan = br_vlan_tunnel_lookup(&vg->tunnel_hash, tinfo->key.tun_id);
+       if (!vlan)
+               return 0;
+
+       skb_dst_drop(skb);
+
+       __vlan_hwaccel_put_tag(skb, p->br->vlan_proto, vlan->vid);
+
+       return 0;
+}
+
+int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
+                                struct net_bridge_vlan *vlan)
+{
+       int err;
+
+       if (!vlan || !vlan->tinfo.tunnel_id)
+               return 0;
+
+       if (unlikely(!skb_vlan_tag_present(skb)))
+               return 0;
+
+       skb_dst_drop(skb);
+       err = skb_vlan_pop(skb);
+       if (err)
+               return err;
+
+       skb_dst_set(skb, dst_clone(&vlan->tinfo.tunnel_dst->dst));
+
+       return 0;
+}
index 517e78befcb2688a76e307ba7e7064e334700cb1..61a9f1be1263afc95dfbc43cdffe5d0a6e8e7246 100644 (file)
@@ -105,6 +105,7 @@ static struct xt_match ebt_limit_mt_reg __read_mostly = {
        .match          = ebt_limit_mt,
        .checkentry     = ebt_limit_mt_check,
        .matchsize      = sizeof(struct ebt_limit_info),
+       .usersize       = offsetof(struct ebt_limit_info, prev),
 #ifdef CONFIG_COMPAT
        .compatsize     = sizeof(struct ebt_compat_limit_info),
 #endif
index e88bd4827ac1add767973298760895d851aa4b10..98b9c8e8615ebc6e2ddefd1885a01af3ef58781b 100644 (file)
@@ -78,7 +78,7 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
        unsigned int bitmask;
 
        /* FIXME: Disabled from containers until syslog ns is supported */
-       if (!net_eq(net, &init_net))
+       if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns)
                return;
 
        spin_lock_bh(&ebt_log_lock);
index 537e3d506fc237f673af53a50109fea6935872ff..79b69917f5210c0065ab5c9c37a497ede7cc273b 100644 (file)
@@ -1346,56 +1346,72 @@ static int update_counters(struct net *net, const void __user *user,
                                hlp.num_counters, user, len);
 }
 
-static inline int ebt_make_matchname(const struct ebt_entry_match *m,
-                                    const char *base, char __user *ubase)
+static inline int ebt_obj_to_user(char __user *um, const char *_name,
+                                 const char *data, int entrysize,
+                                 int usersize, int datasize)
 {
-       char __user *hlp = ubase + ((char *)m - base);
-       char name[EBT_FUNCTION_MAXNAMELEN] = {};
+       char name[EBT_FUNCTION_MAXNAMELEN] = {0};
 
        /* ebtables expects 32 bytes long names but xt_match names are 29 bytes
         * long. Copy 29 bytes and fill remaining bytes with zeroes.
         */
-       strlcpy(name, m->u.match->name, sizeof(name));
-       if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
+       strlcpy(name, _name, sizeof(name));
+       if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) ||
+           put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) ||
+           xt_data_to_user(um + entrysize, data, usersize, datasize))
                return -EFAULT;
+
        return 0;
 }
 
-static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
-                                      const char *base, char __user *ubase)
+static inline int ebt_match_to_user(const struct ebt_entry_match *m,
+                                   const char *base, char __user *ubase)
 {
-       char __user *hlp = ubase + ((char *)w - base);
-       char name[EBT_FUNCTION_MAXNAMELEN] = {};
+       return ebt_obj_to_user(ubase + ((char *)m - base),
+                              m->u.match->name, m->data, sizeof(*m),
+                              m->u.match->usersize, m->match_size);
+}
 
-       strlcpy(name, w->u.watcher->name, sizeof(name));
-       if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
-               return -EFAULT;
-       return 0;
+static inline int ebt_watcher_to_user(const struct ebt_entry_watcher *w,
+                                     const char *base, char __user *ubase)
+{
+       return ebt_obj_to_user(ubase + ((char *)w - base),
+                              w->u.watcher->name, w->data, sizeof(*w),
+                              w->u.watcher->usersize, w->watcher_size);
 }
 
-static inline int ebt_make_names(struct ebt_entry *e, const char *base,
-                                char __user *ubase)
+static inline int ebt_entry_to_user(struct ebt_entry *e, const char *base,
+                                   char __user *ubase)
 {
        int ret;
        char __user *hlp;
        const struct ebt_entry_target *t;
-       char name[EBT_FUNCTION_MAXNAMELEN] = {};
 
-       if (e->bitmask == 0)
+       if (e->bitmask == 0) {
+               /* special case !EBT_ENTRY_OR_ENTRIES */
+               if (copy_to_user(ubase + ((char *)e - base), e,
+                                sizeof(struct ebt_entries)))
+                       return -EFAULT;
                return 0;
+       }
+
+       if (copy_to_user(ubase + ((char *)e - base), e, sizeof(*e)))
+               return -EFAULT;
 
        hlp = ubase + (((char *)e + e->target_offset) - base);
        t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
 
-       ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
+       ret = EBT_MATCH_ITERATE(e, ebt_match_to_user, base, ubase);
        if (ret != 0)
                return ret;
-       ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
+       ret = EBT_WATCHER_ITERATE(e, ebt_watcher_to_user, base, ubase);
        if (ret != 0)
                return ret;
-       strlcpy(name, t->u.target->name, sizeof(name));
-       if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
-               return -EFAULT;
+       ret = ebt_obj_to_user(hlp, t->u.target->name, t->data, sizeof(*t),
+                             t->u.target->usersize, t->target_size);
+       if (ret != 0)
+               return ret;
+
        return 0;
 }
 
@@ -1475,13 +1491,9 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
        if (ret)
                return ret;
 
-       if (copy_to_user(tmp.entries, entries, entries_size)) {
-               BUGPRINT("Couldn't copy entries to userspace\n");
-               return -EFAULT;
-       }
        /* set the match/watcher/target names right */
        return EBT_ENTRY_ITERATE(entries, entries_size,
-          ebt_make_names, entries, tmp.entries);
+          ebt_entry_to_user, entries, tmp.entries);
 }
 
 static int do_ebt_set_ctl(struct sock *sk,
@@ -1630,8 +1642,10 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
        if (match->compat_to_user) {
                if (match->compat_to_user(cm->data, m->data))
                        return -EFAULT;
-       } else if (copy_to_user(cm->data, m->data, msize))
+       } else {
+               if (xt_data_to_user(cm->data, m->data, match->usersize, msize))
                        return -EFAULT;
+       }
 
        *size -= ebt_compat_entry_padsize() + off;
        *dstptr = cm->data;
@@ -1657,8 +1671,10 @@ static int compat_target_to_user(struct ebt_entry_target *t,
        if (target->compat_to_user) {
                if (target->compat_to_user(cm->data, t->data))
                        return -EFAULT;
-       } else if (copy_to_user(cm->data, t->data, tsize))
-               return -EFAULT;
+       } else {
+               if (xt_data_to_user(cm->data, t->data, target->usersize, tsize))
+                       return -EFAULT;
+       }
 
        *size -= ebt_compat_entry_padsize() + off;
        *dstptr = cm->data;
index 3408ed51b611c9a7ff8d0575580fd972e2801a7a..1816fc9f1ee779f85874e886a9ba6db721c60680 100644 (file)
@@ -44,7 +44,6 @@ enum caif_states {
 
 struct chnl_net {
        struct cflayer chnl;
-       struct net_device_stats stats;
        struct caif_connect_request conn_req;
        struct list_head list_field;
        struct net_device *netdev;
index 96c544b05b15e3287cff8905b4b6007af69322af..ba3ac722714dfda35aa766592a703a51caac80f5 100644 (file)
@@ -90,11 +90,11 @@ int get_compat_msghdr(struct msghdr *kmsg,
 #define CMSG_COMPAT_ALIGN(len) ALIGN((len), sizeof(s32))
 
 #define CMSG_COMPAT_DATA(cmsg)                         \
-       ((void __user *)((char __user *)(cmsg) + CMSG_COMPAT_ALIGN(sizeof(struct compat_cmsghdr))))
+       ((void __user *)((char __user *)(cmsg) + sizeof(struct compat_cmsghdr)))
 #define CMSG_COMPAT_SPACE(len)                         \
-       (CMSG_COMPAT_ALIGN(sizeof(struct compat_cmsghdr)) + CMSG_COMPAT_ALIGN(len))
+       (sizeof(struct compat_cmsghdr) + CMSG_COMPAT_ALIGN(len))
 #define CMSG_COMPAT_LEN(len)                           \
-       (CMSG_COMPAT_ALIGN(sizeof(struct compat_cmsghdr)) + (len))
+       (sizeof(struct compat_cmsghdr) + (len))
 
 #define CMSG_COMPAT_FIRSTHDR(msg)                      \
        (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ?     \
@@ -130,6 +130,9 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
        __kernel_size_t kcmlen, tmp;
        int err = -EFAULT;
 
+       BUILD_BUG_ON(sizeof(struct compat_cmsghdr) !=
+                    CMSG_COMPAT_ALIGN(sizeof(struct compat_cmsghdr)));
+
        kcmlen = 0;
        kcmsg_base = kcmsg = (struct cmsghdr *)stackbuf;
        ucmsg = CMSG_COMPAT_FIRSTHDR(kmsg);
@@ -141,8 +144,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
                if (!CMSG_COMPAT_OK(ucmlen, ucmsg, kmsg))
                        return -EINVAL;
 
-               tmp = ((ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg))) +
-                      CMSG_ALIGN(sizeof(struct cmsghdr)));
+               tmp = ((ucmlen - sizeof(*ucmsg)) + sizeof(struct cmsghdr));
                tmp = CMSG_ALIGN(tmp);
                kcmlen += tmp;
                ucmsg = cmsg_compat_nxthdr(kmsg, ucmsg, ucmlen);
@@ -168,8 +170,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
                        goto Efault;
                if (!CMSG_COMPAT_OK(ucmlen, ucmsg, kmsg))
                        goto Einval;
-               tmp = ((ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg))) +
-                      CMSG_ALIGN(sizeof(struct cmsghdr)));
+               tmp = ((ucmlen - sizeof(*ucmsg)) + sizeof(struct cmsghdr));
                if ((char *)kcmsg_base + kcmlen - (char *)kcmsg < CMSG_ALIGN(tmp))
                        goto Einval;
                kcmsg->cmsg_len = tmp;
@@ -178,7 +179,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
                    __get_user(kcmsg->cmsg_type, &ucmsg->cmsg_type) ||
                    copy_from_user(CMSG_DATA(kcmsg),
                                   CMSG_COMPAT_DATA(ucmsg),
-                                  (ucmlen - CMSG_COMPAT_ALIGN(sizeof(*ucmsg)))))
+                                  (ucmlen - sizeof(*ucmsg))))
                        goto Efault;
 
                /* Advance. */
index f6761b6e3b29bc4c645585bfdcb38ca94bdace34..79f9479e965812c635b5337ff89d393a7176d9ab 100644 (file)
@@ -28,3 +28,4 @@ obj-$(CONFIG_LWTUNNEL_BPF) += lwt_bpf.o
 obj-$(CONFIG_DST_CACHE) += dst_cache.o
 obj-$(CONFIG_HWBM) += hwbm.o
 obj-$(CONFIG_NET_DEVLINK) += devlink.o
+obj-$(CONFIG_GRO_CELLS) += gro_cells.o
index 29101c98399f40b6b8e42c31a255d8f1fb6bd7a1..05d19c6acf9460e072195e185b64e19088de13da 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *     NET3    Protocol independent device support routines.
+ *      NET3    Protocol independent device support routines.
  *
  *             This program is free software; you can redistribute it and/or
  *             modify it under the terms of the GNU General Public License
@@ -7,7 +7,7 @@
  *             2 of the License, or (at your option) any later version.
  *
  *     Derived from the non IP parts of dev.c 1.0.19
- *             Authors:        Ross Biro
+ *              Authors:       Ross Biro
  *                             Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  *                             Mark Evans, <evansmp@uhura.aston.ac.uk>
  *
@@ -21,9 +21,9 @@
  *
  *     Changes:
  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
- *                                     to 2 if register_netdev gets called
- *                                     before net_dev_init & also removed a
- *                                     few lines of code in the process.
+ *                                      to 2 if register_netdev gets called
+ *                                      before net_dev_init & also removed a
+ *                                      few lines of code in the process.
  *             Alan Cox        :       device private ioctl copies fields back.
  *             Alan Cox        :       Transmit queue code does relevant
  *                                     stunts to keep the queue safe.
@@ -36,7 +36,7 @@
  *             Alan Cox        :       100 backlog just doesn't cut it when
  *                                     you start doing multicast video 8)
  *             Alan Cox        :       Rewrote net_bh and list manager.
- *             Alan Cox        :       Fix ETH_P_ALL echoback lengths.
+ *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
  *             Alan Cox        :       Took out transmit every packet pass
  *                                     Saved a few bytes in the ioctl handler
  *             Alan Cox        :       Network driver sets packet type before
@@ -46,7 +46,7 @@
  *             Richard Kooijman:       Timestamp fixes.
  *             Alan Cox        :       Wrong field in SIOCGIFDSTADDR
  *             Alan Cox        :       Device lock protection.
- *             Alan Cox        :       Fixed nasty side effect of device close
+ *              Alan Cox        :       Fixed nasty side effect of device close
  *                                     changes.
  *             Rudi Cilibrasi  :       Pass the right thing to
  *                                     set_mac_address()
@@ -67,8 +67,8 @@
  *     Paul Rusty Russell      :       SIOCSIFNAME
  *              Pekka Riikonen  :      Netdev boot-time settings code
  *              Andrew Morton   :       Make unregister_netdevice wait
- *                                     indefinitely on dev->refcnt
- *             J Hadi Salim    :       - Backlog queue sampling
+ *                                      indefinitely on dev->refcnt
+ *              J Hadi Salim    :       - Backlog queue sampling
  *                                     - netif_rx() feedback
  */
 
@@ -192,7 +192,8 @@ static seqcount_t devnet_rename_seq;
 
 static inline void dev_base_seq_inc(struct net *net)
 {
-       while (++net->dev_base_seq == 0);
+       while (++net->dev_base_seq == 0)
+               ;
 }
 
 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
@@ -274,8 +275,8 @@ EXPORT_PER_CPU_SYMBOL(softnet_data);
  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
  * according to dev->type
  */
-static const unsigned short netdev_lock_type[] =
-       {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
+static const unsigned short netdev_lock_type[] = {
+        ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
         ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
         ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
         ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
@@ -291,22 +292,22 @@ static const unsigned short netdev_lock_type[] =
         ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
         ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
 
-static const char *const netdev_lock_name[] =
-       {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
-        "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
-        "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
-        "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
-        "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
-        "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
-        "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
-        "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
-        "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
-        "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
-        "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
-        "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
-        "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
-        "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
-        "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
+static const char *const netdev_lock_name[] = {
+       "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
+       "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
+       "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
+       "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
+       "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
+       "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
+       "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
+       "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
+       "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
+       "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
+       "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
+       "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
+       "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
+       "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
+       "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
 
 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
@@ -352,10 +353,11 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
 #endif
 
 /*******************************************************************************
+ *
+ *             Protocol management and registration routines
+ *
+ *******************************************************************************/
 
-               Protocol management and registration routines
-
-*******************************************************************************/
 
 /*
  *     Add a protocol ID to the list. Now that the input handler is
@@ -538,10 +540,10 @@ void dev_remove_offload(struct packet_offload *po)
 EXPORT_SYMBOL(dev_remove_offload);
 
 /******************************************************************************
-
                    Device Boot-time Settings Routines
-
-*******************************************************************************/
+ *
*                   Device Boot-time Settings Routines
+ *
+ ******************************************************************************/
 
 /* Boot time configuration table */
 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
@@ -574,13 +576,13 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map)
 }
 
 /**
- *     netdev_boot_setup_check - check boot time settings
- *     @dev: the netdevice
+ * netdev_boot_setup_check     - check boot time settings
+ * @dev: the netdevice
  *
- *     Check boot time settings for the device.
- *     The found settings are set for the device to be used
- *     later in the device probing.
- *     Returns 0 if no settings found, 1 if they are.
+ * Check boot time settings for the device.
+ * The found settings are set for the device to be used
+ * later in the device probing.
+ * Returns 0 if no settings found, 1 if they are.
  */
 int netdev_boot_setup_check(struct net_device *dev)
 {
@@ -590,10 +592,10 @@ int netdev_boot_setup_check(struct net_device *dev)
        for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
                if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
                    !strcmp(dev->name, s[i].name)) {
-                       dev->irq        = s[i].map.irq;
-                       dev->base_addr  = s[i].map.base_addr;
-                       dev->mem_start  = s[i].map.mem_start;
-                       dev->mem_end    = s[i].map.mem_end;
+                       dev->irq = s[i].map.irq;
+                       dev->base_addr = s[i].map.base_addr;
+                       dev->mem_start = s[i].map.mem_start;
+                       dev->mem_end = s[i].map.mem_end;
                        return 1;
                }
        }
@@ -603,14 +605,14 @@ EXPORT_SYMBOL(netdev_boot_setup_check);
 
 
 /**
- *     netdev_boot_base        - get address from boot time settings
- *     @prefix: prefix for network device
- *     @unit: id for network device
+ * netdev_boot_base    - get address from boot time settings
+ * @prefix: prefix for network device
+ * @unit: id for network device
  *
- *     Check boot time settings for the base address of device.
- *     The found settings are set for the device to be used
- *     later in the device probing.
- *     Returns 0 if no settings found.
+ * Check boot time settings for the base address of device.
+ * The found settings are set for the device to be used
+ * later in the device probing.
+ * Returns 0 if no settings found.
  */
 unsigned long netdev_boot_base(const char *prefix, int unit)
 {
@@ -663,10 +665,10 @@ int __init netdev_boot_setup(char *str)
 __setup("netdev=", netdev_boot_setup);
 
 /*******************************************************************************
-
                          Device Interface Subroutines
-
-*******************************************************************************/
+ *
*                         Device Interface Subroutines
+ *
+ *******************************************************************************/
 
 /**
  *     dev_get_iflink  - get 'iflink' value of a interface
@@ -737,15 +739,15 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name)
 EXPORT_SYMBOL(__dev_get_by_name);
 
 /**
- *     dev_get_by_name_rcu     - find a device by its name
- *     @net: the applicable net namespace
- *     @name: name to find
+ * dev_get_by_name_rcu - find a device by its name
+ * @net: the applicable net namespace
+ * @name: name to find
  *
- *     Find an interface by name.
- *     If the name is found a pointer to the device is returned.
- *     If the name is not found then %NULL is returned.
- *     The reference counters are not incremented so the caller must be
- *     careful with locks. The caller must hold RCU lock.
+ * Find an interface by name.
+ * If the name is found a pointer to the device is returned.
+ * If the name is not found then %NULL is returned.
+ * The reference counters are not incremented so the caller must be
+ * careful with locks. The caller must hold RCU lock.
  */
 
 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
@@ -1289,8 +1291,8 @@ void netdev_state_change(struct net_device *dev)
 EXPORT_SYMBOL(netdev_state_change);
 
 /**
- *     netdev_notify_peers - notify network peers about existence of @dev
- *     @dev: network device
+ * netdev_notify_peers - notify network peers about existence of @dev
+ * @dev: network device
  *
  * Generate traffic such that interested network peers are aware of
  * @dev, such as by generating a gratuitous ARP. This may be used when
@@ -1518,17 +1520,17 @@ static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
 static int dev_boot_phase = 1;
 
 /**
- *     register_netdevice_notifier - register a network notifier block
- *     @nb: notifier
+ * register_netdevice_notifier - register a network notifier block
+ * @nb: notifier
  *
- *     Register a notifier to be called when network device events occur.
- *     The notifier passed is linked into the kernel structures and must
- *     not be reused until it has been unregistered. A negative errno code
- *     is returned on a failure.
+ * Register a notifier to be called when network device events occur.
+ * The notifier passed is linked into the kernel structures and must
+ * not be reused until it has been unregistered. A negative errno code
+ * is returned on a failure.
  *
- *     When registered all registration and up events are replayed
- *     to the new notifier to allow device to have a race free
- *     view of the network device list.
+ * When registered all registration and up events are replayed
+ * to the new notifier to allow device to have a race free
+ * view of the network device list.
  */
 
 int register_netdevice_notifier(struct notifier_block *nb)
@@ -1585,17 +1587,17 @@ outroll:
 EXPORT_SYMBOL(register_netdevice_notifier);
 
 /**
- *     unregister_netdevice_notifier - unregister a network notifier block
- *     @nb: notifier
+ * unregister_netdevice_notifier - unregister a network notifier block
+ * @nb: notifier
  *
- *     Unregister a notifier previously registered by
- *     register_netdevice_notifier(). The notifier is unlinked into the
- *     kernel structures and may then be reused. A negative errno code
- *     is returned on a failure.
+ * Unregister a notifier previously registered by
+ * register_netdevice_notifier(). The notifier is unlinked into the
+ * kernel structures and may then be reused. A negative errno code
+ * is returned on a failure.
  *
- *     After unregistering unregister and down device events are synthesized
- *     for all devices on the device list to the removed notifier to remove
- *     the need for special case cleanup code.
+ * After unregistering unregister and down device events are synthesized
+ * for all devices on the device list to the removed notifier to remove
+ * the need for special case cleanup code.
  */
 
 int unregister_netdevice_notifier(struct notifier_block *nb)
@@ -2403,28 +2405,6 @@ void netif_schedule_queue(struct netdev_queue *txq)
 }
 EXPORT_SYMBOL(netif_schedule_queue);
 
-/**
- *     netif_wake_subqueue - allow sending packets on subqueue
- *     @dev: network device
- *     @queue_index: sub queue index
- *
- * Resume individual transmit queue of a device with multiple transmit queues.
- */
-void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
-{
-       struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
-
-       if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
-               struct Qdisc *q;
-
-               rcu_read_lock();
-               q = rcu_dereference(txq->qdisc);
-               __netif_schedule(q);
-               rcu_read_unlock();
-       }
-}
-EXPORT_SYMBOL(netif_wake_subqueue);
-
 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
 {
        if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
@@ -2518,6 +2498,7 @@ u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
 
        if (dev->num_tc) {
                u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
+
                qoffset = dev->tc_to_txq[tc].offset;
                qcount = dev->tc_to_txq[tc].count;
        }
@@ -2654,9 +2635,10 @@ EXPORT_SYMBOL(skb_mac_gso_segment);
 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
 {
        if (tx_path)
-               return skb->ip_summed != CHECKSUM_PARTIAL;
-       else
-               return skb->ip_summed == CHECKSUM_NONE;
+               return skb->ip_summed != CHECKSUM_PARTIAL &&
+                      skb->ip_summed != CHECKSUM_NONE;
+
+       return skb->ip_summed == CHECKSUM_NONE;
 }
 
 /**
@@ -2675,11 +2657,12 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
                                  netdev_features_t features, bool tx_path)
 {
+       struct sk_buff *segs;
+
        if (unlikely(skb_needs_check(skb, tx_path))) {
                int err;
 
-               skb_warn_bad_offload(skb);
-
+               /* We're going to init ->check field in TCP or UDP header */
                err = skb_cow_head(skb, 0);
                if (err < 0)
                        return ERR_PTR(err);
@@ -2707,7 +2690,12 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
        skb_reset_mac_header(skb);
        skb_reset_mac_len(skb);
 
-       return skb_mac_gso_segment(skb, features);
+       segs = skb_mac_gso_segment(skb, features);
+
+       if (unlikely(skb_needs_check(skb, tx_path)))
+               skb_warn_bad_offload(skb);
+
+       return segs;
 }
 EXPORT_SYMBOL(__skb_gso_segment);
 
@@ -2732,9 +2720,11 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
 {
 #ifdef CONFIG_HIGHMEM
        int i;
+
        if (!(dev->features & NETIF_F_HIGHDMA)) {
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
                        if (PageHighMem(skb_frag_page(frag)))
                                return 1;
                }
@@ -2748,6 +2738,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                        skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                        dma_addr_t addr = page_to_phys(skb_frag_page(frag));
+
                        if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
                                return 1;
                }
@@ -3148,9 +3139,7 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
        if (!cl)
                return skb;
 
-       /* skb->tc_verd and qdisc_skb_cb(skb)->pkt_len were already set
-        * earlier by the caller.
-        */
+       /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
        qdisc_bstats_cpu_update(cl->q, skb);
 
        switch (tc_classify(skb, cl, &cl_res, false)) {
@@ -3225,6 +3214,7 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
        if (queue_index < 0 || skb->ooo_okay ||
            queue_index >= dev->real_num_tx_queues) {
                int new_index = get_xps_queue(dev, skb);
+
                if (new_index < 0)
                        new_index = skb_tx_hash(dev, skb);
 
@@ -3254,6 +3244,7 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
 
        if (dev->real_num_tx_queues != 1) {
                const struct net_device_ops *ops = dev->netdev_ops;
+
                if (ops->ndo_select_queue)
                        queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
                                                            __netdev_pick_tx);
@@ -3315,7 +3306,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
 
        qdisc_pkt_len_init(skb);
 #ifdef CONFIG_NET_CLS_ACT
-       skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
+       skb->tc_at_ingress = 0;
 # ifdef CONFIG_NET_EGRESS
        if (static_key_false(&egress_needed)) {
                skb = sch_handle_egress(skb, &rc, dev);
@@ -3342,16 +3333,16 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
        }
 
        /* The device has no queue. Common case for software devices:
-          loopback, all the sorts of tunnels...
+        * loopback, all the sorts of tunnels...
 
-          Really, it is unlikely that netif_tx_lock protection is necessary
-          here.  (f.e. loopback and IP tunnels are clean ignoring statistics
-          counters.)
-          However, it is possible, that they rely on protection
-          made by us here.
+        * Really, it is unlikely that netif_tx_lock protection is necessary
+        * here.  (f.e. loopback and IP tunnels are clean ignoring statistics
+        * counters.)
+        * However, it is possible, that they rely on protection
+        * made by us here.
 
-          Check this and shot the lock. It is not prone from deadlocks.
-          Either shot noqueue qdisc, it is even simpler 8)
+        * Check this and shot the lock. It is not prone from deadlocks.
+        *Either shot noqueue qdisc, it is even simpler 8)
         */
        if (dev->flags & IFF_UP) {
                int cpu = smp_processor_id(); /* ok because BHs are off */
@@ -3413,16 +3404,20 @@ int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
 EXPORT_SYMBOL(dev_queue_xmit_accel);
 
 
-/*=======================================================================
                      Receiver routines
 =======================================================================*/
+/*************************************************************************
*                     Receiver routines
*************************************************************************/
 
 int netdev_max_backlog __read_mostly = 1000;
 EXPORT_SYMBOL(netdev_max_backlog);
 
 int netdev_tstamp_prequeue __read_mostly = 1;
 int netdev_budget __read_mostly = 300;
-int weight_p __read_mostly = 64;            /* old backlog weight */
+int weight_p __read_mostly = 64;           /* old backlog weight */
+int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
+int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
+int dev_rx_weight __read_mostly = 64;
+int dev_tx_weight __read_mostly = 64;
 
 /* Called with irq disabled */
 static inline void ____napi_schedule(struct softnet_data *sd,
@@ -3779,6 +3774,7 @@ static int netif_rx_internal(struct sk_buff *skb)
 #endif
        {
                unsigned int qtail;
+
                ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
                put_cpu();
        }
@@ -3838,6 +3834,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
 
                while (clist) {
                        struct sk_buff *skb = clist;
+
                        clist = clist->next;
 
                        WARN_ON(atomic_read(&skb->users));
@@ -3911,7 +3908,7 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
        }
 
        qdisc_skb_cb(skb)->pkt_len = skb->len;
-       skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
+       skb->tc_at_ingress = 1;
        qdisc_bstats_cpu_update(cl->q, skb);
 
        switch (tc_classify(skb, cl, &cl_res, false)) {
@@ -3976,9 +3973,7 @@ int netdev_rx_handler_register(struct net_device *dev,
                               rx_handler_func_t *rx_handler,
                               void *rx_handler_data)
 {
-       ASSERT_RTNL();
-
-       if (dev->rx_handler)
+       if (netdev_is_rx_handler_busy(dev))
                return -EBUSY;
 
        /* Note: rx_handler_data must be set before rx_handler */
@@ -4084,12 +4079,8 @@ another_round:
                        goto out;
        }
 
-#ifdef CONFIG_NET_CLS_ACT
-       if (skb->tc_verd & TC_NCLS) {
-               skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
-               goto ncls;
-       }
-#endif
+       if (skb_skip_tc_classify(skb))
+               goto skip_classify;
 
        if (pfmemalloc)
                goto skip_taps;
@@ -4117,10 +4108,8 @@ skip_taps:
                        goto out;
        }
 #endif
-#ifdef CONFIG_NET_CLS_ACT
-       skb->tc_verd = 0;
-ncls:
-#endif
+       skb_reset_tc(skb);
+skip_classify:
        if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
                goto drop;
 
@@ -4521,6 +4510,11 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
        if (&ptype->list == head)
                goto normal;
 
+       if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) {
+               ret = GRO_CONSUMED;
+               goto ok;
+       }
+
        same_flow = NAPI_GRO_CB(skb)->same_flow;
        ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
 
@@ -4616,6 +4610,7 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
        case GRO_MERGED_FREE:
                if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
                        skb_dst_drop(skb);
+                       secpath_reset(skb);
                        kmem_cache_free(skbuff_head_cache, skb);
                } else {
                        __kfree_skb(skb);
@@ -4624,6 +4619,7 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
 
        case GRO_HELD:
        case GRO_MERGED:
+       case GRO_CONSUMED:
                break;
        }
 
@@ -4656,6 +4652,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
        skb->encapsulation = 0;
        skb_shinfo(skb)->gso_type = 0;
        skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
+       secpath_reset(skb);
 
        napi->skb = skb;
 }
@@ -4694,6 +4691,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
                break;
 
        case GRO_MERGED:
+       case GRO_CONSUMED:
                break;
        }
 
@@ -4830,7 +4828,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
                net_rps_action_and_irq_enable(sd);
        }
 
-       napi->weight = weight_p;
+       napi->weight = dev_rx_weight;
        while (again) {
                struct sk_buff *skb;
 
@@ -4897,23 +4895,6 @@ void __napi_schedule_irqoff(struct napi_struct *n)
 }
 EXPORT_SYMBOL(__napi_schedule_irqoff);
 
-bool __napi_complete(struct napi_struct *n)
-{
-       BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
-
-       /* Some drivers call us directly, instead of calling
-        * napi_complete_done().
-        */
-       if (unlikely(test_bit(NAPI_STATE_IN_BUSY_POLL, &n->state)))
-               return false;
-
-       list_del_init(&n->poll_list);
-       smp_mb__before_atomic();
-       clear_bit(NAPI_STATE_SCHED, &n->state);
-       return true;
-}
-EXPORT_SYMBOL(__napi_complete);
-
 bool napi_complete_done(struct napi_struct *n, int work_done)
 {
        unsigned long flags;
@@ -4940,14 +4921,13 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
                else
                        napi_gro_flush(n, false);
        }
-       if (likely(list_empty(&n->poll_list))) {
-               WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
-       } else {
+       if (unlikely(!list_empty(&n->poll_list))) {
                /* If n->poll_list is not empty, we need to mask irqs */
                local_irq_save(flags);
-               __napi_complete(n);
+               list_del_init(&n->poll_list);
                local_irq_restore(flags);
        }
+       WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
        return true;
 }
 EXPORT_SYMBOL(napi_complete_done);
@@ -4993,7 +4973,6 @@ bool sk_busy_loop(struct sock *sk, int nonblock)
 {
        unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
        int (*napi_poll)(struct napi_struct *napi, int budget);
-       int (*busy_poll)(struct napi_struct *dev);
        void *have_poll_lock = NULL;
        struct napi_struct *napi;
        int rc;
@@ -5008,17 +4987,10 @@ restart:
        if (!napi)
                goto out;
 
-       /* Note: ndo_busy_poll method is optional in linux-4.5 */
-       busy_poll = napi->dev->netdev_ops->ndo_busy_poll;
-
        preempt_disable();
        for (;;) {
                rc = 0;
                local_bh_disable();
-               if (busy_poll) {
-                       rc = busy_poll(napi);
-                       goto count;
-               }
                if (!napi_poll) {
                        unsigned long val = READ_ONCE(napi->state);
 
@@ -5043,9 +5015,6 @@ count:
                                        LINUX_MIB_BUSYPOLLRXPACKETS, rc);
                local_bh_enable();
 
-               if (rc == LL_FLUSH_FAILED)
-                       break; /* permanent failure */
-
                if (nonblock || !skb_queue_empty(&sk->sk_receive_queue) ||
                    busy_loop_timeout(end_time))
                        break;
@@ -5706,6 +5675,7 @@ static int netdev_adjacent_sysfs_add(struct net_device *dev,
                              struct list_head *dev_list)
 {
        char linkname[IFNAMSIZ+7];
+
        sprintf(linkname, dev_list == &dev->adj_list.upper ?
                "upper_%s" : "lower_%s", adj_dev->name);
        return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
@@ -5716,6 +5686,7 @@ static void netdev_adjacent_sysfs_del(struct net_device *dev,
                               struct list_head *dev_list)
 {
        char linkname[IFNAMSIZ+7];
+
        sprintf(linkname, dev_list == &dev->adj_list.upper ?
                "upper_%s" : "lower_%s", name);
        sysfs_remove_link(&(dev->dev.kobj), linkname);
@@ -5985,6 +5956,7 @@ void netdev_upper_dev_unlink(struct net_device *dev,
                             struct net_device *upper_dev)
 {
        struct netdev_notifier_changeupper_info changeupper_info;
+
        ASSERT_RTNL();
 
        changeupper_info.upper_dev = upper_dev;
@@ -6151,50 +6123,6 @@ void netdev_lower_state_changed(struct net_device *lower_dev,
 }
 EXPORT_SYMBOL(netdev_lower_state_changed);
 
-int netdev_default_l2upper_neigh_construct(struct net_device *dev,
-                                          struct neighbour *n)
-{
-       struct net_device *lower_dev, *stop_dev;
-       struct list_head *iter;
-       int err;
-
-       netdev_for_each_lower_dev(dev, lower_dev, iter) {
-               if (!lower_dev->netdev_ops->ndo_neigh_construct)
-                       continue;
-               err = lower_dev->netdev_ops->ndo_neigh_construct(lower_dev, n);
-               if (err) {
-                       stop_dev = lower_dev;
-                       goto rollback;
-               }
-       }
-       return 0;
-
-rollback:
-       netdev_for_each_lower_dev(dev, lower_dev, iter) {
-               if (lower_dev == stop_dev)
-                       break;
-               if (!lower_dev->netdev_ops->ndo_neigh_destroy)
-                       continue;
-               lower_dev->netdev_ops->ndo_neigh_destroy(lower_dev, n);
-       }
-       return err;
-}
-EXPORT_SYMBOL_GPL(netdev_default_l2upper_neigh_construct);
-
-void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
-                                         struct neighbour *n)
-{
-       struct net_device *lower_dev;
-       struct list_head *iter;
-
-       netdev_for_each_lower_dev(dev, lower_dev, iter) {
-               if (!lower_dev->netdev_ops->ndo_neigh_destroy)
-                       continue;
-               lower_dev->netdev_ops->ndo_neigh_destroy(lower_dev, n);
-       }
-}
-EXPORT_SYMBOL_GPL(netdev_default_l2upper_neigh_destroy);
-
 static void dev_change_rx_flags(struct net_device *dev, int flags)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -6447,8 +6375,8 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
        }
 
        /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
-          is important. Some (broken) drivers set IFF_PROMISC, when
-          IFF_ALLMULTI is requested not asking us and not reporting.
+        * is important. Some (broken) drivers set IFF_PROMISC, when
+        * IFF_ALLMULTI is requested not asking us and not reporting.
         */
        if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
                int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
@@ -6746,6 +6674,7 @@ EXPORT_SYMBOL(dev_change_xdp_fd);
 static int dev_new_index(struct net *net)
 {
        int ifindex = net->ifindex;
+
        for (;;) {
                if (++ifindex <= 0)
                        ifindex = 1;
@@ -6812,8 +6741,8 @@ static void rollback_registered_many(struct list_head *head)
 
 
                /* Notify protocols, that we are about to destroy
-                  this device. They should clean all the things.
-               */
+                * this device. They should clean all the things.
+                */
                call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
 
                if (!dev->rtnl_link_ops ||
@@ -6971,13 +6900,6 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
                features &= ~dev->gso_partial_features;
        }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       if (dev->netdev_ops->ndo_busy_poll)
-               features |= NETIF_F_BUSY_POLL;
-       else
-#endif
-               features &= ~NETIF_F_BUSY_POLL;
-
        return features;
 }
 
@@ -7166,6 +7088,7 @@ void netif_tx_stop_all_queues(struct net_device *dev)
 
        for (i = 0; i < dev->num_tx_queues; i++) {
                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
                netif_tx_stop_queue(txq);
        }
 }
@@ -7640,17 +7563,17 @@ void netdev_freemem(struct net_device *dev)
 }
 
 /**
- *     alloc_netdev_mqs - allocate network device
- *     @sizeof_priv:           size of private data to allocate space for
- *     @name:                  device name format string
- *     @name_assign_type:      origin of device name
- *     @setup:                 callback to initialize device
- *     @txqs:                  the number of TX subqueues to allocate
- *     @rxqs:                  the number of RX subqueues to allocate
- *
- *     Allocates a struct net_device with private data area for driver use
- *     and performs basic initialization.  Also allocates subqueue structs
- *     for each queue on the device.
+ * alloc_netdev_mqs - allocate network device
+ * @sizeof_priv: size of private data to allocate space for
+ * @name: device name format string
+ * @name_assign_type: origin of device name
+ * @setup: callback to initialize device
+ * @txqs: the number of TX subqueues to allocate
+ * @rxqs: the number of RX subqueues to allocate
+ *
+ * Allocates a struct net_device with private data area for driver use
+ * and performs basic initialization.  Also allocates subqueue structs
+ * for each queue on the device.
  */
 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
                unsigned char name_assign_type,
@@ -7762,13 +7685,13 @@ free_dev:
 EXPORT_SYMBOL(alloc_netdev_mqs);
 
 /**
- *     free_netdev - free network device
- *     @dev: device
+ * free_netdev - free network device
+ * @dev: device
  *
- *     This function does the last stage of destroying an allocated device
- *     interface. The reference to the device object is released.
- *     If this is the last reference then it will be freed.
- *     Must be called in process context.
+ * This function does the last stage of destroying an allocated device
+ * interface. The reference to the device object is released. If this
+ * is the last reference then it will be freed.Must be called in process
+ * context.
  */
 void free_netdev(struct net_device *dev)
 {
@@ -7950,12 +7873,12 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
        dev_shutdown(dev);
 
        /* Notify protocols, that we are about to destroy
-          this device. They should clean all the things.
-
-          Note that dev->reg_state stays at NETREG_REGISTERED.
-          This is wanted because this way 8021q and macvlan know
-          the device is just moving and can keep their slaves up.
-       */
+        * this device. They should clean all the things.
+        *
+        * Note that dev->reg_state stays at NETREG_REGISTERED.
+        * This is wanted because this way 8021q and macvlan know
+        * the device is just moving and can keep their slaves up.
+        */
        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
        rcu_barrier();
        call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
index 2b5bf9efa720a9feed9d2f187dda6ab5bf59cda0..e9c1e6acfb6d196d4373dcc36bcf76577952dd32 100644 (file)
@@ -1392,9 +1392,9 @@ static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
        return -EOPNOTSUPP;
 }
 
-static int devlink_eswitch_fill(struct sk_buff *msg, struct devlink *devlink,
-                               enum devlink_command cmd, u32 portid,
-                               u32 seq, int flags)
+static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink,
+                                  enum devlink_command cmd, u32 portid,
+                                  u32 seq, int flags)
 {
        const struct devlink_ops *ops = devlink->ops;
        void *hdr;
@@ -1408,50 +1408,52 @@ static int devlink_eswitch_fill(struct sk_buff *msg, struct devlink *devlink,
 
        err = devlink_nl_put_handle(msg, devlink);
        if (err)
-               goto out;
+               goto nla_put_failure;
 
-       err = ops->eswitch_mode_get(devlink, &mode);
-       if (err)
-               goto out;
-       err = nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode);
-       if (err)
-               goto out;
+       if (ops->eswitch_mode_get) {
+               err = ops->eswitch_mode_get(devlink, &mode);
+               if (err)
+                       goto nla_put_failure;
+               err = nla_put_u16(msg, DEVLINK_ATTR_ESWITCH_MODE, mode);
+               if (err)
+                       goto nla_put_failure;
+       }
 
        if (ops->eswitch_inline_mode_get) {
                err = ops->eswitch_inline_mode_get(devlink, &inline_mode);
                if (err)
-                       goto out;
+                       goto nla_put_failure;
                err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_INLINE_MODE,
                                 inline_mode);
                if (err)
-                       goto out;
+                       goto nla_put_failure;
        }
 
        genlmsg_end(msg, hdr);
        return 0;
 
-out:
+nla_put_failure:
        genlmsg_cancel(msg, hdr);
        return err;
 }
 
-static int devlink_nl_cmd_eswitch_mode_get_doit(struct sk_buff *skb,
-                                               struct genl_info *info)
+static int devlink_nl_cmd_eswitch_get_doit(struct sk_buff *skb,
+                                          struct genl_info *info)
 {
        struct devlink *devlink = info->user_ptr[0];
        const struct devlink_ops *ops = devlink->ops;
        struct sk_buff *msg;
        int err;
 
-       if (!ops || !ops->eswitch_mode_get)
+       if (!ops)
                return -EOPNOTSUPP;
 
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
 
-       err = devlink_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_MODE_GET,
-                                  info->snd_portid, info->snd_seq, 0);
+       err = devlink_nl_eswitch_fill(msg, devlink, DEVLINK_CMD_ESWITCH_GET,
+                                     info->snd_portid, info->snd_seq, 0);
 
        if (err) {
                nlmsg_free(msg);
@@ -1461,8 +1463,8 @@ static int devlink_nl_cmd_eswitch_mode_get_doit(struct sk_buff *skb,
        return genlmsg_reply(msg, info);
 }
 
-static int devlink_nl_cmd_eswitch_mode_set_doit(struct sk_buff *skb,
-                                               struct genl_info *info)
+static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
+                                          struct genl_info *info)
 {
        struct devlink *devlink = info->user_ptr[0];
        const struct devlink_ops *ops = devlink->ops;
@@ -1629,15 +1631,15 @@ static const struct genl_ops devlink_nl_ops[] = {
                                  DEVLINK_NL_FLAG_LOCK_PORTS,
        },
        {
-               .cmd = DEVLINK_CMD_ESWITCH_MODE_GET,
-               .doit = devlink_nl_cmd_eswitch_mode_get_doit,
+               .cmd = DEVLINK_CMD_ESWITCH_GET,
+               .doit = devlink_nl_cmd_eswitch_get_doit,
                .policy = devlink_nl_policy,
                .flags = GENL_ADMIN_PERM,
                .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
        },
        {
-               .cmd = DEVLINK_CMD_ESWITCH_MODE_SET,
-               .doit = devlink_nl_cmd_eswitch_mode_set_doit,
+               .cmd = DEVLINK_CMD_ESWITCH_SET,
+               .doit = devlink_nl_cmd_eswitch_set_doit,
                .policy = devlink_nl_policy,
                .flags = GENL_ADMIN_PERM,
                .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
index b5cbbe07f78608aac0f48712160629be29a9b0a2..960e503b5a529a2c4f1866f49c150493ee98d7da 100644 (file)
@@ -190,7 +190,6 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
        dst->__use = 0;
        dst->lastuse = jiffies;
        dst->flags = flags;
-       dst->pending_confirm = 0;
        dst->next = NULL;
        if (!(flags & DST_NOCOUNT))
                dst_entries_add(ops, 1);
index d92de0a1f0a49d51ec8329c65d46a4f2ae304ebd..be7bab1adcde3d2f3228191163c55ca898d08e2f 100644 (file)
@@ -102,7 +102,6 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
        [NETIF_F_RXFCS_BIT] =            "rx-fcs",
        [NETIF_F_RXALL_BIT] =            "rx-all",
        [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
-       [NETIF_F_BUSY_POLL_BIT] =        "busy-poll",
        [NETIF_F_HW_TC_BIT] =            "hw-tc-offload",
 };
 
@@ -1820,11 +1819,13 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
        ret = __ethtool_get_sset_count(dev, gstrings.string_set);
        if (ret < 0)
                return ret;
+       if (ret > S32_MAX / ETH_GSTRING_LEN)
+               return -ENOMEM;
+       WARN_ON_ONCE(!ret);
 
        gstrings.len = ret;
-
-       data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
-       if (!data)
+       data = vzalloc(gstrings.len * ETH_GSTRING_LEN);
+       if (gstrings.len && !data)
                return -ENOMEM;
 
        __ethtool_get_strings(dev, gstrings.string_set, data);
@@ -1833,12 +1834,13 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
        if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
                goto out;
        useraddr += sizeof(gstrings);
-       if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+       if (gstrings.len &&
+           copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
                goto out;
        ret = 0;
 
 out:
-       kfree(data);
+       vfree(data);
        return ret;
 }
 
@@ -1915,14 +1917,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
        n_stats = ops->get_sset_count(dev, ETH_SS_STATS);
        if (n_stats < 0)
                return n_stats;
-       WARN_ON(n_stats == 0);
-
+       if (n_stats > S32_MAX / sizeof(u64))
+               return -ENOMEM;
+       WARN_ON_ONCE(!n_stats);
        if (copy_from_user(&stats, useraddr, sizeof(stats)))
                return -EFAULT;
 
        stats.n_stats = n_stats;
-       data = kmalloc(n_stats * sizeof(u64), GFP_USER);
-       if (!data)
+       data = vzalloc(n_stats * sizeof(u64));
+       if (n_stats && !data)
                return -ENOMEM;
 
        ops->get_ethtool_stats(dev, &stats, data);
@@ -1931,12 +1934,12 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
        if (copy_to_user(useraddr, &stats, sizeof(stats)))
                goto out;
        useraddr += sizeof(stats);
-       if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
+       if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
                goto out;
        ret = 0;
 
  out:
-       kfree(data);
+       vfree(data);
        return ret;
 }
 
@@ -1951,17 +1954,18 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
                return -EOPNOTSUPP;
 
        n_stats = phy_get_sset_count(phydev);
-
        if (n_stats < 0)
                return n_stats;
-       WARN_ON(n_stats == 0);
+       if (n_stats > S32_MAX / sizeof(u64))
+               return -ENOMEM;
+       WARN_ON_ONCE(!n_stats);
 
        if (copy_from_user(&stats, useraddr, sizeof(stats)))
                return -EFAULT;
 
        stats.n_stats = n_stats;
-       data = kmalloc_array(n_stats, sizeof(u64), GFP_USER);
-       if (!data)
+       data = vzalloc(n_stats * sizeof(u64));
+       if (n_stats && !data)
                return -ENOMEM;
 
        mutex_lock(&phydev->lock);
@@ -1972,12 +1976,12 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
        if (copy_to_user(useraddr, &stats, sizeof(stats)))
                goto out;
        useraddr += sizeof(stats);
-       if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
+       if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
                goto out;
        ret = 0;
 
  out:
-       kfree(data);
+       vfree(data);
        return ret;
 }
 
index 1969b3f118c1d6e30b3edccd7c85a6caa520af5c..e466e0040137caba1c95a50df68cb2ef2af6f633 100644 (file)
@@ -76,9 +76,10 @@ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
         * allow SOCK_MEMALLOC sockets to use it as this socket is
         * helping free memory
         */
-       if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
+       if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
                return -ENOMEM;
-
+       }
        err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
        if (err)
                return err;
@@ -1416,8 +1417,8 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_CTX,
        .arg2_type      = ARG_ANYTHING,
-       .arg3_type      = ARG_PTR_TO_STACK,
-       .arg4_type      = ARG_CONST_STACK_SIZE,
+       .arg3_type      = ARG_PTR_TO_MEM,
+       .arg4_type      = ARG_CONST_SIZE,
        .arg5_type      = ARG_ANYTHING,
 };
 
@@ -1447,8 +1448,8 @@ static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_CTX,
        .arg2_type      = ARG_ANYTHING,
-       .arg3_type      = ARG_PTR_TO_RAW_STACK,
-       .arg4_type      = ARG_CONST_STACK_SIZE,
+       .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
+       .arg4_type      = ARG_CONST_SIZE,
 };
 
 BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
@@ -1522,10 +1523,11 @@ BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
 {
        bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
        bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
+       bool do_mforce = flags & BPF_F_MARK_ENFORCE;
        __sum16 *ptr;
 
-       if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
-                              BPF_F_HDR_FIELD_MASK)))
+       if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
+                              BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
                return -EINVAL;
        if (unlikely(offset > 0xffff || offset & 1))
                return -EFAULT;
@@ -1533,7 +1535,7 @@ BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
                return -EFAULT;
 
        ptr = (__sum16 *)(skb->data + offset);
-       if (is_mmzero && !*ptr)
+       if (is_mmzero && !do_mforce && !*ptr)
                return 0;
 
        switch (flags & BPF_F_HDR_FIELD_MASK) {
@@ -1601,10 +1603,10 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
        .gpl_only       = false,
        .pkt_access     = true,
        .ret_type       = RET_INTEGER,
-       .arg1_type      = ARG_PTR_TO_STACK,
-       .arg2_type      = ARG_CONST_STACK_SIZE_OR_ZERO,
-       .arg3_type      = ARG_PTR_TO_STACK,
-       .arg4_type      = ARG_CONST_STACK_SIZE_OR_ZERO,
+       .arg1_type      = ARG_PTR_TO_MEM,
+       .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
+       .arg3_type      = ARG_PTR_TO_MEM,
+       .arg4_type      = ARG_CONST_SIZE_OR_ZERO,
        .arg5_type      = ARG_ANYTHING,
 };
 
@@ -2306,8 +2308,8 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
        .arg1_type      = ARG_PTR_TO_CTX,
        .arg2_type      = ARG_CONST_MAP_PTR,
        .arg3_type      = ARG_ANYTHING,
-       .arg4_type      = ARG_PTR_TO_STACK,
-       .arg5_type      = ARG_CONST_STACK_SIZE,
+       .arg4_type      = ARG_PTR_TO_MEM,
+       .arg5_type      = ARG_CONST_SIZE,
 };
 
 static unsigned short bpf_tunnel_key_af(u64 flags)
@@ -2377,8 +2379,8 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_CTX,
-       .arg2_type      = ARG_PTR_TO_RAW_STACK,
-       .arg3_type      = ARG_CONST_STACK_SIZE,
+       .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
+       .arg3_type      = ARG_CONST_SIZE,
        .arg4_type      = ARG_ANYTHING,
 };
 
@@ -2412,8 +2414,8 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_CTX,
-       .arg2_type      = ARG_PTR_TO_RAW_STACK,
-       .arg3_type      = ARG_CONST_STACK_SIZE,
+       .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
+       .arg3_type      = ARG_CONST_SIZE,
 };
 
 static struct metadata_dst __percpu *md_dst;
@@ -2483,8 +2485,8 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_CTX,
-       .arg2_type      = ARG_PTR_TO_STACK,
-       .arg3_type      = ARG_CONST_STACK_SIZE,
+       .arg2_type      = ARG_PTR_TO_MEM,
+       .arg3_type      = ARG_CONST_SIZE,
        .arg4_type      = ARG_ANYTHING,
 };
 
@@ -2509,8 +2511,8 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_CTX,
-       .arg2_type      = ARG_PTR_TO_STACK,
-       .arg3_type      = ARG_CONST_STACK_SIZE,
+       .arg2_type      = ARG_PTR_TO_MEM,
+       .arg3_type      = ARG_CONST_SIZE,
 };
 
 static const struct bpf_func_proto *
@@ -2593,12 +2595,12 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
        .arg1_type      = ARG_PTR_TO_CTX,
        .arg2_type      = ARG_CONST_MAP_PTR,
        .arg3_type      = ARG_ANYTHING,
-       .arg4_type      = ARG_PTR_TO_STACK,
-       .arg5_type      = ARG_CONST_STACK_SIZE,
+       .arg4_type      = ARG_PTR_TO_MEM,
+       .arg5_type      = ARG_CONST_SIZE,
 };
 
 static const struct bpf_func_proto *
-sk_filter_func_proto(enum bpf_func_id func_id)
+bpf_base_func_proto(enum bpf_func_id func_id)
 {
        switch (func_id) {
        case BPF_FUNC_map_lookup_elem:
@@ -2625,6 +2627,17 @@ sk_filter_func_proto(enum bpf_func_id func_id)
        }
 }
 
+static const struct bpf_func_proto *
+sk_filter_func_proto(enum bpf_func_id func_id)
+{
+       switch (func_id) {
+       case BPF_FUNC_skb_load_bytes:
+               return &bpf_skb_load_bytes_proto;
+       default:
+               return bpf_base_func_proto(func_id);
+       }
+}
+
 static const struct bpf_func_proto *
 tc_cls_act_func_proto(enum bpf_func_id func_id)
 {
@@ -2680,7 +2693,7 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
        case BPF_FUNC_skb_under_cgroup:
                return &bpf_skb_under_cgroup_proto;
        default:
-               return sk_filter_func_proto(func_id);
+               return bpf_base_func_proto(func_id);
        }
 }
 
@@ -2695,7 +2708,7 @@ xdp_func_proto(enum bpf_func_id func_id)
        case BPF_FUNC_xdp_adjust_head:
                return &bpf_xdp_adjust_head_proto;
        default:
-               return sk_filter_func_proto(func_id);
+               return bpf_base_func_proto(func_id);
        }
 }
 
@@ -2706,7 +2719,7 @@ cg_skb_func_proto(enum bpf_func_id func_id)
        case BPF_FUNC_skb_load_bytes:
                return &bpf_skb_load_bytes_proto;
        default:
-               return sk_filter_func_proto(func_id);
+               return bpf_base_func_proto(func_id);
        }
 }
 
@@ -2733,7 +2746,7 @@ lwt_inout_func_proto(enum bpf_func_id func_id)
        case BPF_FUNC_skb_under_cgroup:
                return &bpf_skb_under_cgroup_proto;
        default:
-               return sk_filter_func_proto(func_id);
+               return bpf_base_func_proto(func_id);
        }
 }
 
@@ -2776,11 +2789,22 @@ static bool __is_valid_access(int off, int size)
 {
        if (off < 0 || off >= sizeof(struct __sk_buff))
                return false;
+
        /* The verifier guarantees that size > 0. */
        if (off % size != 0)
                return false;
-       if (size != sizeof(__u32))
-               return false;
+
+       switch (off) {
+       case offsetof(struct __sk_buff, cb[0]) ...
+            offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
+               if (off + size >
+                   offsetof(struct __sk_buff, cb[4]) + sizeof(__u32))
+                       return false;
+               break;
+       default:
+               if (size != sizeof(__u32))
+                       return false;
+       }
 
        return true;
 }
@@ -2799,7 +2823,7 @@ static bool sk_filter_is_valid_access(int off, int size,
        if (type == BPF_WRITE) {
                switch (off) {
                case offsetof(struct __sk_buff, cb[0]) ...
-                    offsetof(struct __sk_buff, cb[4]):
+                    offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
                        break;
                default:
                        return false;
@@ -2823,7 +2847,7 @@ static bool lwt_is_valid_access(int off, int size,
                case offsetof(struct __sk_buff, mark):
                case offsetof(struct __sk_buff, priority):
                case offsetof(struct __sk_buff, cb[0]) ...
-                    offsetof(struct __sk_buff, cb[4]):
+                    offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
                        break;
                default:
                        return false;
@@ -2915,7 +2939,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
                case offsetof(struct __sk_buff, tc_index):
                case offsetof(struct __sk_buff, priority):
                case offsetof(struct __sk_buff, cb[0]) ...
-                    offsetof(struct __sk_buff, cb[4]):
+                    offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
                case offsetof(struct __sk_buff, tc_classid):
                        break;
                default:
@@ -2972,32 +2996,33 @@ void bpf_warn_invalid_xdp_action(u32 act)
 }
 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
 
-static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
-                                       int src_reg, int ctx_off,
-                                       struct bpf_insn *insn_buf,
-                                       struct bpf_prog *prog)
+static u32 bpf_convert_ctx_access(enum bpf_access_type type,
+                                 const struct bpf_insn *si,
+                                 struct bpf_insn *insn_buf,
+                                 struct bpf_prog *prog)
 {
        struct bpf_insn *insn = insn_buf;
+       int off;
 
-       switch (ctx_off) {
+       switch (si->off) {
        case offsetof(struct __sk_buff, len):
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
 
-               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+               *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
                                      offsetof(struct sk_buff, len));
                break;
 
        case offsetof(struct __sk_buff, protocol):
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
 
-               *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+               *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
                                      offsetof(struct sk_buff, protocol));
                break;
 
        case offsetof(struct __sk_buff, vlan_proto):
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
 
-               *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+               *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
                                      offsetof(struct sk_buff, vlan_proto));
                break;
 
@@ -3005,17 +3030,17 @@ static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);
 
                if (type == BPF_WRITE)
-                       *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
+                       *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
                                              offsetof(struct sk_buff, priority));
                else
-                       *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+                       *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
                                              offsetof(struct sk_buff, priority));
                break;
 
        case offsetof(struct __sk_buff, ingress_ifindex):
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4);
 
-               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+               *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
                                      offsetof(struct sk_buff, skb_iif));
                break;
 
@@ -3023,17 +3048,17 @@ static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
                BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
-                                     dst_reg, src_reg,
+                                     si->dst_reg, si->src_reg,
                                      offsetof(struct sk_buff, dev));
-               *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
-               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
+               *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
+               *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
                                      offsetof(struct net_device, ifindex));
                break;
 
        case offsetof(struct __sk_buff, hash):
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
 
-               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+               *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
                                      offsetof(struct sk_buff, hash));
                break;
 
@@ -3041,63 +3066,77 @@ static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
 
                if (type == BPF_WRITE)
-                       *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
+                       *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
                                              offsetof(struct sk_buff, mark));
                else
-                       *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+                       *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
                                              offsetof(struct sk_buff, mark));
                break;
 
        case offsetof(struct __sk_buff, pkt_type):
-               return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn);
+               return convert_skb_access(SKF_AD_PKTTYPE, si->dst_reg,
+                                         si->src_reg, insn);
 
        case offsetof(struct __sk_buff, queue_mapping):
-               return convert_skb_access(SKF_AD_QUEUE, dst_reg, src_reg, insn);
+               return convert_skb_access(SKF_AD_QUEUE, si->dst_reg,
+                                         si->src_reg, insn);
 
        case offsetof(struct __sk_buff, vlan_present):
                return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
-                                         dst_reg, src_reg, insn);
+                                         si->dst_reg, si->src_reg, insn);
 
        case offsetof(struct __sk_buff, vlan_tci):
                return convert_skb_access(SKF_AD_VLAN_TAG,
-                                         dst_reg, src_reg, insn);
+                                         si->dst_reg, si->src_reg, insn);
 
        case offsetof(struct __sk_buff, cb[0]) ...
-            offsetof(struct __sk_buff, cb[4]):
+            offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
                BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
+               BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
+                             offsetof(struct qdisc_skb_cb, data)) %
+                            sizeof(__u64));
 
                prog->cb_access = 1;
-               ctx_off -= offsetof(struct __sk_buff, cb[0]);
-               ctx_off += offsetof(struct sk_buff, cb);
-               ctx_off += offsetof(struct qdisc_skb_cb, data);
+               off  = si->off;
+               off -= offsetof(struct __sk_buff, cb[0]);
+               off += offsetof(struct sk_buff, cb);
+               off += offsetof(struct qdisc_skb_cb, data);
                if (type == BPF_WRITE)
-                       *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
+                       *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
+                                             si->src_reg, off);
                else
-                       *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
+                       *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
+                                             si->src_reg, off);
                break;
 
        case offsetof(struct __sk_buff, tc_classid):
-               ctx_off -= offsetof(struct __sk_buff, tc_classid);
-               ctx_off += offsetof(struct sk_buff, cb);
-               ctx_off += offsetof(struct qdisc_skb_cb, tc_classid);
+               BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2);
+
+               off  = si->off;
+               off -= offsetof(struct __sk_buff, tc_classid);
+               off += offsetof(struct sk_buff, cb);
+               off += offsetof(struct qdisc_skb_cb, tc_classid);
                if (type == BPF_WRITE)
-                       *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
+                       *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
+                                             si->src_reg, off);
                else
-                       *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
+                       *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
+                                             si->src_reg, off);
                break;
 
        case offsetof(struct __sk_buff, data):
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
-                                     dst_reg, src_reg,
+                                     si->dst_reg, si->src_reg,
                                      offsetof(struct sk_buff, data));
                break;
 
        case offsetof(struct __sk_buff, data_end):
-               ctx_off -= offsetof(struct __sk_buff, data_end);
-               ctx_off += offsetof(struct sk_buff, cb);
-               ctx_off += offsetof(struct bpf_skb_data_end, data_end);
-               *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), dst_reg, src_reg,
-                                     ctx_off);
+               off  = si->off;
+               off -= offsetof(struct __sk_buff, data_end);
+               off += offsetof(struct sk_buff, cb);
+               off += offsetof(struct bpf_skb_data_end, data_end);
+               *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
+                                     si->src_reg, off);
                break;
 
        case offsetof(struct __sk_buff, tc_index):
@@ -3105,110 +3144,107 @@ static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
 
                if (type == BPF_WRITE)
-                       *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg,
+                       *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
                                              offsetof(struct sk_buff, tc_index));
                else
-                       *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+                       *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
                                              offsetof(struct sk_buff, tc_index));
-               break;
 #else
                if (type == BPF_WRITE)
-                       *insn++ = BPF_MOV64_REG(dst_reg, dst_reg);
+                       *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
                else
-                       *insn++ = BPF_MOV64_IMM(dst_reg, 0);
-               break;
+                       *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
 #endif
+               break;
        }
 
        return insn - insn_buf;
 }
 
 static u32 sock_filter_convert_ctx_access(enum bpf_access_type type,
-                                         int dst_reg, int src_reg,
-                                         int ctx_off,
+                                         const struct bpf_insn *si,
                                          struct bpf_insn *insn_buf,
                                          struct bpf_prog *prog)
 {
        struct bpf_insn *insn = insn_buf;
 
-       switch (ctx_off) {
+       switch (si->off) {
        case offsetof(struct bpf_sock, bound_dev_if):
                BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4);
 
                if (type == BPF_WRITE)
-                       *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
+                       *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
                                        offsetof(struct sock, sk_bound_dev_if));
                else
-                       *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+                       *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
                                      offsetof(struct sock, sk_bound_dev_if));
                break;
 
        case offsetof(struct bpf_sock, family):
                BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_family) != 2);
 
-               *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+               *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
                                      offsetof(struct sock, sk_family));
                break;
 
        case offsetof(struct bpf_sock, type):
-               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+               *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
                                      offsetof(struct sock, __sk_flags_offset));
-               *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, SK_FL_TYPE_MASK);
-               *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, SK_FL_TYPE_SHIFT);
+               *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
+               *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
                break;
 
        case offsetof(struct bpf_sock, protocol):
-               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+               *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
                                      offsetof(struct sock, __sk_flags_offset));
-               *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, SK_FL_PROTO_MASK);
-               *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, SK_FL_PROTO_SHIFT);
+               *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
+               *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
                break;
        }
 
        return insn - insn_buf;
 }
 
-static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type, int dst_reg,
-                                        int src_reg, int ctx_off,
+static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
+                                        const struct bpf_insn *si,
                                         struct bpf_insn *insn_buf,
                                         struct bpf_prog *prog)
 {
        struct bpf_insn *insn = insn_buf;
 
-       switch (ctx_off) {
+       switch (si->off) {
        case offsetof(struct __sk_buff, ifindex):
                BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
 
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
-                                     dst_reg, src_reg,
+                                     si->dst_reg, si->src_reg,
                                      offsetof(struct sk_buff, dev));
-               *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
+               *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
                                      offsetof(struct net_device, ifindex));
                break;
        default:
-               return sk_filter_convert_ctx_access(type, dst_reg, src_reg,
-                                                   ctx_off, insn_buf, prog);
+               return bpf_convert_ctx_access(type, si, insn_buf, prog);
        }
 
        return insn - insn_buf;
 }
 
-static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg,
-                                 int src_reg, int ctx_off,
+static u32 xdp_convert_ctx_access(enum bpf_access_type type,
+                                 const struct bpf_insn *si,
                                  struct bpf_insn *insn_buf,
                                  struct bpf_prog *prog)
 {
        struct bpf_insn *insn = insn_buf;
 
-       switch (ctx_off) {
+       switch (si->off) {
        case offsetof(struct xdp_md, data):
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
-                                     dst_reg, src_reg,
+                                     si->dst_reg, si->src_reg,
                                      offsetof(struct xdp_buff, data));
                break;
        case offsetof(struct xdp_md, data_end):
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
-                                     dst_reg, src_reg,
+                                     si->dst_reg, si->src_reg,
                                      offsetof(struct xdp_buff, data_end));
                break;
        }
@@ -3219,7 +3255,7 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg,
 static const struct bpf_verifier_ops sk_filter_ops = {
        .get_func_proto         = sk_filter_func_proto,
        .is_valid_access        = sk_filter_is_valid_access,
-       .convert_ctx_access     = sk_filter_convert_ctx_access,
+       .convert_ctx_access     = bpf_convert_ctx_access,
 };
 
 static const struct bpf_verifier_ops tc_cls_act_ops = {
@@ -3238,69 +3274,69 @@ static const struct bpf_verifier_ops xdp_ops = {
 static const struct bpf_verifier_ops cg_skb_ops = {
        .get_func_proto         = cg_skb_func_proto,
        .is_valid_access        = sk_filter_is_valid_access,
-       .convert_ctx_access     = sk_filter_convert_ctx_access,
+       .convert_ctx_access     = bpf_convert_ctx_access,
 };
 
 static const struct bpf_verifier_ops lwt_inout_ops = {
        .get_func_proto         = lwt_inout_func_proto,
        .is_valid_access        = lwt_is_valid_access,
-       .convert_ctx_access     = sk_filter_convert_ctx_access,
+       .convert_ctx_access     = bpf_convert_ctx_access,
 };
 
 static const struct bpf_verifier_ops lwt_xmit_ops = {
        .get_func_proto         = lwt_xmit_func_proto,
        .is_valid_access        = lwt_is_valid_access,
-       .convert_ctx_access     = sk_filter_convert_ctx_access,
+       .convert_ctx_access     = bpf_convert_ctx_access,
        .gen_prologue           = tc_cls_act_prologue,
 };
 
 static const struct bpf_verifier_ops cg_sock_ops = {
-       .get_func_proto         = sk_filter_func_proto,
+       .get_func_proto         = bpf_base_func_proto,
        .is_valid_access        = sock_filter_is_valid_access,
        .convert_ctx_access     = sock_filter_convert_ctx_access,
 };
 
-static struct bpf_prog_type_list sk_filter_type __read_mostly = {
+static struct bpf_prog_type_list sk_filter_type __ro_after_init = {
        .ops    = &sk_filter_ops,
        .type   = BPF_PROG_TYPE_SOCKET_FILTER,
 };
 
-static struct bpf_prog_type_list sched_cls_type __read_mostly = {
+static struct bpf_prog_type_list sched_cls_type __ro_after_init = {
        .ops    = &tc_cls_act_ops,
        .type   = BPF_PROG_TYPE_SCHED_CLS,
 };
 
-static struct bpf_prog_type_list sched_act_type __read_mostly = {
+static struct bpf_prog_type_list sched_act_type __ro_after_init = {
        .ops    = &tc_cls_act_ops,
        .type   = BPF_PROG_TYPE_SCHED_ACT,
 };
 
-static struct bpf_prog_type_list xdp_type __read_mostly = {
+static struct bpf_prog_type_list xdp_type __ro_after_init = {
        .ops    = &xdp_ops,
        .type   = BPF_PROG_TYPE_XDP,
 };
 
-static struct bpf_prog_type_list cg_skb_type __read_mostly = {
+static struct bpf_prog_type_list cg_skb_type __ro_after_init = {
        .ops    = &cg_skb_ops,
        .type   = BPF_PROG_TYPE_CGROUP_SKB,
 };
 
-static struct bpf_prog_type_list lwt_in_type __read_mostly = {
+static struct bpf_prog_type_list lwt_in_type __ro_after_init = {
        .ops    = &lwt_inout_ops,
        .type   = BPF_PROG_TYPE_LWT_IN,
 };
 
-static struct bpf_prog_type_list lwt_out_type __read_mostly = {
+static struct bpf_prog_type_list lwt_out_type __ro_after_init = {
        .ops    = &lwt_inout_ops,
        .type   = BPF_PROG_TYPE_LWT_OUT,
 };
 
-static struct bpf_prog_type_list lwt_xmit_type __read_mostly = {
+static struct bpf_prog_type_list lwt_xmit_type __ro_after_init = {
        .ops    = &lwt_xmit_ops,
        .type   = BPF_PROG_TYPE_LWT_XMIT,
 };
 
-static struct bpf_prog_type_list cg_sock_type __read_mostly = {
+static struct bpf_prog_type_list cg_sock_type __ro_after_init = {
        .ops    = &cg_sock_ops,
        .type   = BPF_PROG_TYPE_CGROUP_SOCK
 };
index 1b7673aac59d51a5f8b5ef3f2076f1440c017fae..c35aae13c8d22680cb07222cbd9f1ee976f0bd64 100644 (file)
@@ -138,6 +138,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
        struct flow_dissector_key_control *key_control;
        struct flow_dissector_key_basic *key_basic;
        struct flow_dissector_key_addrs *key_addrs;
+       struct flow_dissector_key_arp *key_arp;
        struct flow_dissector_key_ports *key_ports;
        struct flow_dissector_key_icmp *key_icmp;
        struct flow_dissector_key_tags *key_tags;
@@ -379,6 +380,62 @@ mpls:
 
                nhoff += FCOE_HEADER_LEN;
                goto out_good;
+
+       case htons(ETH_P_ARP):
+       case htons(ETH_P_RARP): {
+               struct {
+                       unsigned char ar_sha[ETH_ALEN];
+                       unsigned char ar_sip[4];
+                       unsigned char ar_tha[ETH_ALEN];
+                       unsigned char ar_tip[4];
+               } *arp_eth, _arp_eth;
+               const struct arphdr *arp;
+               struct arphdr *_arp;
+
+               arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
+                                          hlen, &_arp);
+               if (!arp)
+                       goto out_bad;
+
+               if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
+                   arp->ar_pro != htons(ETH_P_IP) ||
+                   arp->ar_hln != ETH_ALEN ||
+                   arp->ar_pln != 4 ||
+                   (arp->ar_op != htons(ARPOP_REPLY) &&
+                    arp->ar_op != htons(ARPOP_REQUEST)))
+                       goto out_bad;
+
+               arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
+                                              sizeof(_arp_eth), data,
+                                              hlen,
+                                              &_arp_eth);
+               if (!arp_eth)
+                       goto out_bad;
+
+               if (dissector_uses_key(flow_dissector,
+                                      FLOW_DISSECTOR_KEY_ARP)) {
+
+                       key_arp = skb_flow_dissector_target(flow_dissector,
+                                                           FLOW_DISSECTOR_KEY_ARP,
+                                                           target_container);
+
+                       memcpy(&key_arp->sip, arp_eth->ar_sip,
+                              sizeof(key_arp->sip));
+                       memcpy(&key_arp->tip, arp_eth->ar_tip,
+                              sizeof(key_arp->tip));
+
+                       /* Only store the lower byte of the opcode;
+                        * this covers ARPOP_REPLY and ARPOP_REQUEST.
+                        */
+                       key_arp->op = ntohs(arp->ar_op) & 0xff;
+
+                       ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
+                       ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
+               }
+
+               goto out_good;
+       }
+
        default:
                goto out_bad;
        }
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
new file mode 100644 (file)
index 0000000..c98bbfb
--- /dev/null
@@ -0,0 +1,92 @@
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <net/gro_cells.h>
+
+struct gro_cell {
+       struct sk_buff_head     napi_skbs;
+       struct napi_struct      napi;
+};
+
+int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       struct gro_cell *cell;
+
+       if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO))
+               return netif_rx(skb);
+
+       cell = this_cpu_ptr(gcells->cells);
+
+       if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+               atomic_long_inc(&dev->rx_dropped);
+               kfree_skb(skb);
+               return NET_RX_DROP;
+       }
+
+       __skb_queue_tail(&cell->napi_skbs, skb);
+       if (skb_queue_len(&cell->napi_skbs) == 1)
+               napi_schedule(&cell->napi);
+       return NET_RX_SUCCESS;
+}
+EXPORT_SYMBOL(gro_cells_receive);
+
+/* called under BH context */
+static int gro_cell_poll(struct napi_struct *napi, int budget)
+{
+       struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
+       struct sk_buff *skb;
+       int work_done = 0;
+
+       while (work_done < budget) {
+               skb = __skb_dequeue(&cell->napi_skbs);
+               if (!skb)
+                       break;
+               napi_gro_receive(napi, skb);
+               work_done++;
+       }
+
+       if (work_done < budget)
+               napi_complete_done(napi, work_done);
+       return work_done;
+}
+
+int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
+{
+       int i;
+
+       gcells->cells = alloc_percpu(struct gro_cell);
+       if (!gcells->cells)
+               return -ENOMEM;
+
+       for_each_possible_cpu(i) {
+               struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
+
+               __skb_queue_head_init(&cell->napi_skbs);
+
+               set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
+
+               netif_napi_add(dev, &cell->napi, gro_cell_poll,
+                              NAPI_POLL_WEIGHT);
+               napi_enable(&cell->napi);
+       }
+       return 0;
+}
+EXPORT_SYMBOL(gro_cells_init);
+
+void gro_cells_destroy(struct gro_cells *gcells)
+{
+       int i;
+
+       if (!gcells->cells)
+               return;
+       for_each_possible_cpu(i) {
+               struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
+
+               netif_napi_del(&cell->napi);
+               __skb_queue_purge(&cell->napi_skbs);
+       }
+       free_percpu(gcells->cells);
+       gcells->cells = NULL;
+}
+EXPORT_SYMBOL(gro_cells_destroy);
index b3eef90b2df9d05b62750d3d6fafd7b096f92bd9..0cfe7b0216c3522a8d05d404423de939ad2cc2d5 100644 (file)
@@ -237,7 +237,7 @@ static const struct nla_policy bpf_nl_policy[LWT_BPF_MAX + 1] = {
        [LWT_BPF_XMIT_HEADROOM] = { .type = NLA_U32 },
 };
 
-static int bpf_build_state(struct net_device *dev, struct nlattr *nla,
+static int bpf_build_state(struct nlattr *nla,
                           unsigned int family, const void *cfg,
                           struct lwtunnel_state **ts)
 {
@@ -352,7 +352,7 @@ static int bpf_encap_nlsize(struct lwtunnel_state *lwtstate)
               0;
 }
 
-int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b)
+static int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b)
 {
        /* FIXME:
         * The LWT state is currently rebuilt for delete requests which
index c23465005f2f4ced93d7bcb2754fb267c2cf00d0..6df9f8fabf0ca5d2ced3070406900b7ec28a7924 100644 (file)
@@ -101,7 +101,7 @@ int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops,
 }
 EXPORT_SYMBOL(lwtunnel_encap_del_ops);
 
-int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+int lwtunnel_build_state(u16 encap_type,
                         struct nlattr *encap, unsigned int family,
                         const void *cfg, struct lwtunnel_state **lws)
 {
@@ -116,7 +116,7 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
        rcu_read_lock();
        ops = rcu_dereference(lwtun_encaps[encap_type]);
        if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
-               ret = ops->build_state(dev, encap, family, cfg, lws);
+               ret = ops->build_state(encap, family, cfg, lws);
                if (ret)
                        module_put(ops->owner);
        }
index 2ec86fc552df6e2bd07981fb2ade76305d2545b6..756637dc7a5769ea7041e3c2a67221d79668f751 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/slab.h>
 #include <linux/types.h>
+#include <linux/module.h>
 #include <linux/string.h>
 #include <linux/errno.h>
 #include <linux/skbuff.h>
index 8e69ce4722364e77f78dd3da256680df37c733c0..96947f5d41e42a9a42f91c40457b87824756510e 100644 (file)
@@ -3439,9 +3439,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
                        /* skb was 'freed' by stack, so clean few
                         * bits and reuse it
                         */
-#ifdef CONFIG_NET_CLS_ACT
-                       skb->tc_verd = 0; /* reset reclass/redir ttl */
-#endif
+                       skb_reset_tc(skb);
                } while (--burst > 0);
                goto out; /* Skips xmit_mode M_START_XMIT */
        } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
index 5d26056b6d8f01f4db815423d2d78f6f7d25d55f..9b8727c67b58011227cc09098d96a40fee5c5d69 100644 (file)
@@ -34,8 +34,6 @@
  * and it will increase in proportion to the memory of machine.
  * Note : Dont forget somaxconn that may limit backlog too.
  */
-int sysctl_max_syn_backlog = 256;
-EXPORT_SYMBOL(sysctl_max_syn_backlog);
 
 void reqsk_queue_alloc(struct request_sock_queue *queue)
 {
index 75e3ea7bda08f39e07d515768b56678842e74c40..e3286d32eca57d562e4cb79f9aec0afd202a6977 100644 (file)
@@ -837,8 +837,7 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
 static inline int rtnl_vfinfo_size(const struct net_device *dev,
                                   u32 ext_filter_mask)
 {
-       if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
-           (ext_filter_mask & RTEXT_FILTER_VF)) {
+       if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
                int num_vfs = dev_num_vf(dev->dev.parent);
                size_t size = nla_total_size(0);
                size += num_vfs *
@@ -877,8 +876,6 @@ static size_t rtnl_port_size(const struct net_device *dev,
 {
        size_t port_size = nla_total_size(4)            /* PORT_VF */
                + nla_total_size(PORT_PROFILE_MAX)      /* PORT_PROFILE */
-               + nla_total_size(sizeof(struct ifla_port_vsi))
-                                                       /* PORT_VSI_TYPE */
                + nla_total_size(PORT_UUID_MAX)         /* PORT_INSTANCE_UUID */
                + nla_total_size(PORT_UUID_MAX)         /* PORT_HOST_UUID */
                + nla_total_size(1)                     /* PROT_VDP_REQUEST */
@@ -1492,14 +1489,19 @@ static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
        [IFLA_PORT_VF]          = { .type = NLA_U32 },
        [IFLA_PORT_PROFILE]     = { .type = NLA_STRING,
                                    .len = PORT_PROFILE_MAX },
-       [IFLA_PORT_VSI_TYPE]    = { .type = NLA_BINARY,
-                                   .len = sizeof(struct ifla_port_vsi)},
        [IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
                                      .len = PORT_UUID_MAX },
        [IFLA_PORT_HOST_UUID]   = { .type = NLA_STRING,
                                    .len = PORT_UUID_MAX },
        [IFLA_PORT_REQUEST]     = { .type = NLA_U8, },
        [IFLA_PORT_RESPONSE]    = { .type = NLA_U16, },
+
+       /* Unused, but we need to keep it here since user space could
+        * fill it. It's also broken with regard to NLA_BINARY use in
+        * combination with structs.
+        */
+       [IFLA_PORT_VSI_TYPE]    = { .type = NLA_BINARY,
+                                   .len = sizeof(struct ifla_port_vsi) },
 };
 
 static const struct nla_policy ifla_xdp_policy[IFLA_XDP_MAX + 1] = {
@@ -2571,7 +2573,7 @@ replay:
                        return -ENODEV;
                }
 
-               if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
+               if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
                        return -EOPNOTSUPP;
 
                if (!ops) {
@@ -2653,6 +2655,11 @@ replay:
                        if (err < 0)
                                goto out_unregister;
                }
+               if (tb[IFLA_MASTER]) {
+                       err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
+                       if (err)
+                               goto out_unregister;
+               }
 out:
                if (link_net)
                        put_net(link_net);
@@ -3829,6 +3836,39 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
                *idxattr = 0;
        }
 
+       if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
+               struct rtnl_af_ops *af_ops;
+
+               *idxattr = IFLA_STATS_AF_SPEC;
+               attr = nla_nest_start(skb, IFLA_STATS_AF_SPEC);
+               if (!attr)
+                       goto nla_put_failure;
+
+               list_for_each_entry(af_ops, &rtnl_af_ops, list) {
+                       if (af_ops->fill_stats_af) {
+                               struct nlattr *af;
+                               int err;
+
+                               af = nla_nest_start(skb, af_ops->family);
+                               if (!af)
+                                       goto nla_put_failure;
+
+                               err = af_ops->fill_stats_af(skb, dev);
+
+                               if (err == -ENODATA)
+                                       nla_nest_cancel(skb, af);
+                               else if (err < 0)
+                                       goto nla_put_failure;
+
+                               nla_nest_end(skb, af);
+                       }
+               }
+
+               nla_nest_end(skb, attr);
+
+               *idxattr = 0;
+       }
+
        nlmsg_end(skb, nlh);
 
        return 0;
@@ -3885,6 +3925,23 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev,
        if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
                size += rtnl_get_offload_stats_size(dev);
 
+       if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
+               struct rtnl_af_ops *af_ops;
+
+               /* for IFLA_STATS_AF_SPEC */
+               size += nla_total_size(0);
+
+               list_for_each_entry(af_ops, &rtnl_af_ops, list) {
+                       if (af_ops->get_stats_af_size) {
+                               size += nla_total_size(
+                                       af_ops->get_stats_af_size(dev));
+
+                               /* for AF_* */
+                               size += nla_total_size(0);
+                       }
+               }
+       }
+
        return size;
 }
 
index d8820438ba374be7e723512635b40695853e4807..b6d83686e1496d945ce5362908e7699fb98ea10e 100644 (file)
@@ -71,7 +71,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
        struct file **fpp;
        int i, num;
 
-       num = (cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)))/sizeof(int);
+       num = (cmsg->cmsg_len - sizeof(struct cmsghdr))/sizeof(int);
 
        if (num <= 0)
                return 0;
index 88a8e429fc3e6d5779a5fa989dfecb330acb1cbe..758f140b6bedc51669fed973b39ee317c2bf1570 100644 (file)
@@ -1,3 +1,7 @@
+/*
+ * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/cryptohash.h>
 #include <linux/ktime.h>
 #include <linux/string.h>
 #include <linux/net.h>
-
+#include <linux/siphash.h>
 #include <net/secure_seq.h>
 
 #if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET)
+#include <linux/in6.h>
 #include <net/tcp.h>
-#define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
 
-static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
+static siphash_key_t net_secret __read_mostly;
 
 static __always_inline void net_secret_init(void)
 {
-       net_get_random_once(net_secret, sizeof(net_secret));
+       net_get_random_once(&net_secret, sizeof(net_secret));
 }
 #endif
 
@@ -44,80 +48,70 @@ static u32 seq_scale(u32 seq)
 u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
                                 __be16 sport, __be16 dport, u32 *tsoff)
 {
-       u32 secret[MD5_MESSAGE_BYTES / 4];
-       u32 hash[MD5_DIGEST_WORDS];
-       u32 i;
-
+       const struct {
+               struct in6_addr saddr;
+               struct in6_addr daddr;
+               __be16 sport;
+               __be16 dport;
+       } __aligned(SIPHASH_ALIGNMENT) combined = {
+               .saddr = *(struct in6_addr *)saddr,
+               .daddr = *(struct in6_addr *)daddr,
+               .sport = sport,
+               .dport = dport
+       };
+       u64 hash;
        net_secret_init();
-       memcpy(hash, saddr, 16);
-       for (i = 0; i < 4; i++)
-               secret[i] = net_secret[i] + (__force u32)daddr[i];
-       secret[4] = net_secret[4] +
-               (((__force u16)sport << 16) + (__force u16)dport);
-       for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
-               secret[i] = net_secret[i];
-
-       md5_transform(hash, secret);
-
-       *tsoff = sysctl_tcp_timestamps == 1 ? hash[1] : 0;
-       return seq_scale(hash[0]);
+       hash = siphash(&combined, offsetofend(typeof(combined), dport),
+                      &net_secret);
+       *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0;
+       return seq_scale(hash);
 }
 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
 
 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
                               __be16 dport)
 {
-       u32 secret[MD5_MESSAGE_BYTES / 4];
-       u32 hash[MD5_DIGEST_WORDS];
-       u32 i;
-
+       const struct {
+               struct in6_addr saddr;
+               struct in6_addr daddr;
+               __be16 dport;
+       } __aligned(SIPHASH_ALIGNMENT) combined = {
+               .saddr = *(struct in6_addr *)saddr,
+               .daddr = *(struct in6_addr *)daddr,
+               .dport = dport
+       };
        net_secret_init();
-       memcpy(hash, saddr, 16);
-       for (i = 0; i < 4; i++)
-               secret[i] = net_secret[i] + (__force u32) daddr[i];
-       secret[4] = net_secret[4] + (__force u32)dport;
-       for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
-               secret[i] = net_secret[i];
-
-       md5_transform(hash, secret);
-
-       return hash[0];
+       return siphash(&combined, offsetofend(typeof(combined), dport),
+                      &net_secret);
 }
 EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
 #endif
 
 #ifdef CONFIG_INET
 
+/* secure_tcp_sequence_number(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
+ * but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
+ * it would be easy enough to have the former function use siphash_4u32, passing
+ * the arguments as separate u32.
+ */
+
 u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
                               __be16 sport, __be16 dport, u32 *tsoff)
 {
-       u32 hash[MD5_DIGEST_WORDS];
-
+       u64 hash;
        net_secret_init();
-       hash[0] = (__force u32)saddr;
-       hash[1] = (__force u32)daddr;
-       hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
-       hash[3] = net_secret[15];
-
-       md5_transform(hash, net_secret);
-
-       *tsoff = sysctl_tcp_timestamps == 1 ? hash[1] : 0;
-       return seq_scale(hash[0]);
+       hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
+                           (__force u32)sport << 16 | (__force u32)dport,
+                           &net_secret);
+       *tsoff = sysctl_tcp_timestamps == 1 ? (hash >> 32) : 0;
+       return seq_scale(hash);
 }
 
 u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
 {
-       u32 hash[MD5_DIGEST_WORDS];
-
        net_secret_init();
-       hash[0] = (__force u32)saddr;
-       hash[1] = (__force u32)daddr;
-       hash[2] = (__force u32)dport ^ net_secret[14];
-       hash[3] = net_secret[15];
-
-       md5_transform(hash, net_secret);
-
-       return hash[0];
+       return siphash_3u32((__force u32)saddr, (__force u32)daddr,
+                           (__force u16)dport, &net_secret);
 }
 EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
 #endif
@@ -126,21 +120,13 @@ EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
 u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
                                __be16 sport, __be16 dport)
 {
-       u32 hash[MD5_DIGEST_WORDS];
        u64 seq;
-
        net_secret_init();
-       hash[0] = (__force u32)saddr;
-       hash[1] = (__force u32)daddr;
-       hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
-       hash[3] = net_secret[15];
-
-       md5_transform(hash, net_secret);
-
-       seq = hash[0] | (((u64)hash[1]) << 32);
+       seq = siphash_3u32((__force u32)saddr, (__force u32)daddr,
+                          (__force u32)sport << 16 | (__force u32)dport,
+                          &net_secret);
        seq += ktime_get_real_ns();
        seq &= (1ull << 48) - 1;
-
        return seq;
 }
 EXPORT_SYMBOL(secure_dccp_sequence_number);
@@ -149,26 +135,23 @@ EXPORT_SYMBOL(secure_dccp_sequence_number);
 u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
                                  __be16 sport, __be16 dport)
 {
-       u32 secret[MD5_MESSAGE_BYTES / 4];
-       u32 hash[MD5_DIGEST_WORDS];
+       const struct {
+               struct in6_addr saddr;
+               struct in6_addr daddr;
+               __be16 sport;
+               __be16 dport;
+       } __aligned(SIPHASH_ALIGNMENT) combined = {
+               .saddr = *(struct in6_addr *)saddr,
+               .daddr = *(struct in6_addr *)daddr,
+               .sport = sport,
+               .dport = dport
+       };
        u64 seq;
-       u32 i;
-
        net_secret_init();
-       memcpy(hash, saddr, 16);
-       for (i = 0; i < 4; i++)
-               secret[i] = net_secret[i] + (__force u32)daddr[i];
-       secret[4] = net_secret[4] +
-               (((__force u16)sport << 16) + (__force u16)dport);
-       for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
-               secret[i] = net_secret[i];
-
-       md5_transform(hash, secret);
-
-       seq = hash[0] | (((u64)hash[1]) << 32);
+       seq = siphash(&combined, offsetofend(typeof(combined), dport),
+                     &net_secret);
        seq += ktime_get_real_ns();
        seq &= (1ull << 48) - 1;
-
        return seq;
 }
 EXPORT_SYMBOL(secure_dccpv6_sequence_number);
index 734c71468b013838516cfe8c744dcd0e797a6e2b..f3557958e9bf147631a90b51fef0630920acd97b 100644 (file)
@@ -271,7 +271,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
                atomic_set(&fclones->fclone_ref, 1);
 
                fclones->skb2.fclone = SKB_FCLONE_CLONE;
-               fclones->skb2.pfmemalloc = pfmemalloc;
        }
 out:
        return skb;
@@ -655,7 +654,7 @@ static void skb_release_head_state(struct sk_buff *skb)
                skb->destructor(skb);
        }
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-       nf_conntrack_put(skb->nfct);
+       nf_conntrack_put(skb_nfct(skb));
 #endif
 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
        nf_bridge_put(skb->nf_bridge);
@@ -878,9 +877,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 #endif
 #ifdef CONFIG_NET_SCHED
        CHECK_SKB_FIELD(tc_index);
-#ifdef CONFIG_NET_CLS_ACT
-       CHECK_SKB_FIELD(tc_verd);
-#endif
 #endif
 
 }
@@ -1195,10 +1191,10 @@ EXPORT_SYMBOL(__pskb_copy_fclone);
 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
                     gfp_t gfp_mask)
 {
-       int i;
-       u8 *data;
-       int size = nhead + skb_end_offset(skb) + ntail;
+       int i, osize = skb_end_offset(skb);
+       int size = osize + nhead + ntail;
        long off;
+       u8 *data;
 
        BUG_ON(nhead < 0);
 
@@ -1260,6 +1256,14 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
        skb->hdr_len  = 0;
        skb->nohdr    = 0;
        atomic_set(&skb_shinfo(skb)->dataref, 1);
+
+       /* It is not generally safe to change skb->truesize.
+        * For the moment, we really care of rx path, or
+        * when skb is orphaned (not attached to a socket).
+        */
+       if (!skb->sk || skb->destructor == sock_edemux)
+               skb->truesize += size - osize;
+
        return 0;
 
 nofrags:
index 4eca27dc5c9478e36120a5128a7c11d6208b45a9..b74356535559cb763f8fa40489b0936a1fea6c15 100644 (file)
@@ -222,7 +222,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
   "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_KCM"      ,
-  "sk_lock-AF_QIPCRTR", "sk_lock-AF_MAX"
+  "sk_lock-AF_QIPCRTR", "sk_lock-AF_SMC"     , "sk_lock-AF_MAX"
 };
 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
@@ -239,7 +239,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
   "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_KCM"       ,
-  "slock-AF_QIPCRTR", "slock-AF_MAX"
+  "slock-AF_QIPCRTR", "slock-AF_SMC"     , "slock-AF_MAX"
 };
 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
@@ -256,7 +256,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
   "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_KCM"      ,
-  "clock-AF_QIPCRTR", "clock-AF_MAX"
+  "clock-AF_QIPCRTR", "clock-AF_SMC"     , "clock-AF_MAX"
 };
 
 /*
@@ -502,6 +502,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 
        if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
                sk_tx_queue_clear(sk);
+               sk->sk_dst_pending_confirm = 0;
                RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
                dst_release(dst);
                return NULL;
@@ -762,11 +763,8 @@ set_rcvbuf:
                goto set_rcvbuf;
 
        case SO_KEEPALIVE:
-#ifdef CONFIG_INET
-               if (sk->sk_protocol == IPPROTO_TCP &&
-                   sk->sk_type == SOCK_STREAM)
-                       tcp_set_keepalive(sk, valbool);
-#endif
+               if (sk->sk_prot->keepalive)
+                       sk->sk_prot->keepalive(sk, valbool);
                sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
                break;
 
@@ -1522,6 +1520,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                                af_family_clock_key_strings[newsk->sk_family]);
 
                newsk->sk_dst_cache     = NULL;
+               newsk->sk_dst_pending_confirm = 0;
                newsk->sk_wmem_queued   = 0;
                newsk->sk_forward_alloc = 0;
                atomic_set(&newsk->sk_drops, 0);
index 2a46e4009f62d8c2ac8949789ae9626b0c016a11..4ead336e14ea0b8fc5fdcf8e679da54dfca0716b 100644 (file)
@@ -222,6 +222,21 @@ static int set_default_qdisc(struct ctl_table *table, int write,
 }
 #endif
 
+static int proc_do_dev_weight(struct ctl_table *table, int write,
+                          void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       int ret;
+
+       ret = proc_dointvec(table, write, buffer, lenp, ppos);
+       if (ret != 0)
+               return ret;
+
+       dev_rx_weight = weight_p * dev_weight_rx_bias;
+       dev_tx_weight = weight_p * dev_weight_tx_bias;
+
+       return ret;
+}
+
 static int proc_do_rss_key(struct ctl_table *table, int write,
                           void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -273,7 +288,21 @@ static struct ctl_table net_core_table[] = {
                .data           = &weight_p,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_do_dev_weight,
+       },
+       {
+               .procname       = "dev_weight_rx_bias",
+               .data           = &dev_weight_rx_bias,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_do_dev_weight,
+       },
+       {
+               .procname       = "dev_weight_tx_bias",
+               .data           = &dev_weight_tx_bias,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_do_dev_weight,
        },
        {
                .procname       = "netdev_max_backlog",
@@ -305,6 +334,13 @@ static struct ctl_table net_core_table[] = {
                .mode           = 0600,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "bpf_jit_kallsyms",
+               .data           = &bpf_jit_kallsyms,
+               .maxlen         = sizeof(int),
+               .mode           = 0600,
+               .proc_handler   = proc_dointvec,
+       },
 # endif
 #endif
        {
index d859a5c36e7065c93af8e275ed112f6601e186ec..b043ec833785c9a670315d7c960faf1e6c85ea46 100644 (file)
@@ -904,7 +904,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
        .getsockopt        = ip_getsockopt,
        .addr2sockaddr     = inet_csk_addr2sockaddr,
        .sockaddr_len      = sizeof(struct sockaddr_in),
-       .bind_conflict     = inet_csk_bind_conflict,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_ip_setsockopt,
        .compat_getsockopt = compat_ip_getsockopt,
index c4e879c021868719d928f49e2ef2f5c5d1876534..cef60a4a28030d2066ba84e0ef195855d0014fbb 100644 (file)
@@ -937,7 +937,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
        .getsockopt        = ipv6_getsockopt,
        .addr2sockaddr     = inet6_csk_addr2sockaddr,
        .sockaddr_len      = sizeof(struct sockaddr_in6),
-       .bind_conflict     = inet6_csk_bind_conflict,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_ipv6_setsockopt,
        .compat_getsockopt = compat_ipv6_getsockopt,
@@ -958,7 +957,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
        .getsockopt        = ipv6_getsockopt,
        .addr2sockaddr     = inet6_csk_addr2sockaddr,
        .sockaddr_len      = sizeof(struct sockaddr_in6),
-       .bind_conflict     = inet6_csk_bind_conflict,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_ipv6_setsockopt,
        .compat_getsockopt = compat_ipv6_getsockopt,
index 96e47c539beed85ac031192f8943f91f8bb86855..9649238eef404095a89d34a006dc0b504bc47038 100644 (file)
@@ -1,12 +1,13 @@
 config HAVE_NET_DSA
        def_bool y
-       depends on NETDEVICES && !S390
+       depends on INET && NETDEVICES && !S390
 
 # Drivers must select NET_DSA and the appropriate tagging format
 
 config NET_DSA
        tristate "Distributed Switch Architecture"
-       depends on HAVE_NET_DSA && NET_SWITCHDEV
+       depends on HAVE_NET_DSA
+       select NET_SWITCHDEV
        select PHYLIB
        ---help---
          Say Y if you want to enable support for the hardware switches supported
@@ -14,17 +15,6 @@ config NET_DSA
 
 if NET_DSA
 
-config NET_DSA_HWMON
-       bool "Distributed Switch Architecture HWMON support"
-       default y
-       depends on HWMON && !(NET_DSA=y && HWMON=m)
-       ---help---
-         Say Y if you want to expose thermal sensor data on switches supported
-         by the Distributed Switch Architecture.
-
-         Some of those switches contain thermal sensors. This data is available
-         via the hwmon sysfs interface and exposes the onboard sensors.
-
 # tagging formats
 config NET_DSA_TAG_BRCM
        bool
index a3380ed0e0be56d1cc3a22be1929c63ba9090288..31d343796251da06c979b3428f275f5911dfb2ab 100644 (file)
@@ -1,6 +1,6 @@
 # the core
 obj-$(CONFIG_NET_DSA) += dsa_core.o
-dsa_core-y += dsa.o slave.o dsa2.o
+dsa_core-y += dsa.o slave.o dsa2.o switch.o
 
 # tagging formats
 dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
index 7899919cd9f0e3435828b1cca9e2ef897a75a3b7..b6d4f6a23f06c9d794a5eedc4c9f79810d5b06e5 100644 (file)
@@ -9,9 +9,7 @@
  * (at your option) any later version.
  */
 
-#include <linux/ctype.h>
 #include <linux/device.h>
-#include <linux/hwmon.h>
 #include <linux/list.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -27,8 +25,6 @@
 #include <linux/gpio/consumer.h>
 #include "dsa_priv.h"
 
-char dsa_driver_version[] = "0.1";
-
 static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb,
                                            struct net_device *dev)
 {
@@ -64,27 +60,27 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = {
 static DEFINE_MUTEX(dsa_switch_drivers_mutex);
 static LIST_HEAD(dsa_switch_drivers);
 
-void register_switch_driver(struct dsa_switch_ops *ops)
+void register_switch_driver(struct dsa_switch_driver *drv)
 {
        mutex_lock(&dsa_switch_drivers_mutex);
-       list_add_tail(&ops->list, &dsa_switch_drivers);
+       list_add_tail(&drv->list, &dsa_switch_drivers);
        mutex_unlock(&dsa_switch_drivers_mutex);
 }
 EXPORT_SYMBOL_GPL(register_switch_driver);
 
-void unregister_switch_driver(struct dsa_switch_ops *ops)
+void unregister_switch_driver(struct dsa_switch_driver *drv)
 {
        mutex_lock(&dsa_switch_drivers_mutex);
-       list_del_init(&ops->list);
+       list_del_init(&drv->list);
        mutex_unlock(&dsa_switch_drivers_mutex);
 }
 EXPORT_SYMBOL_GPL(unregister_switch_driver);
 
-static struct dsa_switch_ops *
+static const struct dsa_switch_ops *
 dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr,
                 const char **_name, void **priv)
 {
-       struct dsa_switch_ops *ret;
+       const struct dsa_switch_ops *ret;
        struct list_head *list;
        const char *name;
 
@@ -93,9 +89,11 @@ dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr,
 
        mutex_lock(&dsa_switch_drivers_mutex);
        list_for_each(list, &dsa_switch_drivers) {
-               struct dsa_switch_ops *ops;
+               const struct dsa_switch_ops *ops;
+               struct dsa_switch_driver *drv;
 
-               ops = list_entry(list, struct dsa_switch_ops, list);
+               drv = list_entry(list, struct dsa_switch_driver, list);
+               ops = drv->ops;
 
                name = ops->probe(parent, host_dev, sw_addr, priv);
                if (name != NULL) {
@@ -110,109 +108,11 @@ dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr,
        return ret;
 }
 
-/* hwmon support ************************************************************/
-
-#ifdef CONFIG_NET_DSA_HWMON
-
-static ssize_t temp1_input_show(struct device *dev,
-                               struct device_attribute *attr, char *buf)
-{
-       struct dsa_switch *ds = dev_get_drvdata(dev);
-       int temp, ret;
-
-       ret = ds->ops->get_temp(ds, &temp);
-       if (ret < 0)
-               return ret;
-
-       return sprintf(buf, "%d\n", temp * 1000);
-}
-static DEVICE_ATTR_RO(temp1_input);
-
-static ssize_t temp1_max_show(struct device *dev,
-                             struct device_attribute *attr, char *buf)
-{
-       struct dsa_switch *ds = dev_get_drvdata(dev);
-       int temp, ret;
-
-       ret = ds->ops->get_temp_limit(ds, &temp);
-       if (ret < 0)
-               return ret;
-
-       return sprintf(buf, "%d\n", temp * 1000);
-}
-
-static ssize_t temp1_max_store(struct device *dev,
-                              struct device_attribute *attr, const char *buf,
-                              size_t count)
-{
-       struct dsa_switch *ds = dev_get_drvdata(dev);
-       int temp, ret;
-
-       ret = kstrtoint(buf, 0, &temp);
-       if (ret < 0)
-               return ret;
-
-       ret = ds->ops->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000));
-       if (ret < 0)
-               return ret;
-
-       return count;
-}
-static DEVICE_ATTR_RW(temp1_max);
-
-static ssize_t temp1_max_alarm_show(struct device *dev,
-                                   struct device_attribute *attr, char *buf)
-{
-       struct dsa_switch *ds = dev_get_drvdata(dev);
-       bool alarm;
-       int ret;
-
-       ret = ds->ops->get_temp_alarm(ds, &alarm);
-       if (ret < 0)
-               return ret;
-
-       return sprintf(buf, "%d\n", alarm);
-}
-static DEVICE_ATTR_RO(temp1_max_alarm);
-
-static struct attribute *dsa_hwmon_attrs[] = {
-       &dev_attr_temp1_input.attr,     /* 0 */
-       &dev_attr_temp1_max.attr,       /* 1 */
-       &dev_attr_temp1_max_alarm.attr, /* 2 */
-       NULL
-};
-
-static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj,
-                                      struct attribute *attr, int index)
-{
-       struct device *dev = container_of(kobj, struct device, kobj);
-       struct dsa_switch *ds = dev_get_drvdata(dev);
-       struct dsa_switch_ops *ops = ds->ops;
-       umode_t mode = attr->mode;
-
-       if (index == 1) {
-               if (!ops->get_temp_limit)
-                       mode = 0;
-               else if (!ops->set_temp_limit)
-                       mode &= ~S_IWUSR;
-       } else if (index == 2 && !ops->get_temp_alarm) {
-               mode = 0;
-       }
-       return mode;
-}
-
-static const struct attribute_group dsa_hwmon_group = {
-       .attrs = dsa_hwmon_attrs,
-       .is_visible = dsa_hwmon_attrs_visible,
-};
-__ATTRIBUTE_GROUPS(dsa_hwmon);
-
-#endif /* CONFIG_NET_DSA_HWMON */
-
 /* basic switch operations **************************************************/
 int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev,
-                     struct device_node *port_dn, int port)
+                     struct dsa_port *dport, int port)
 {
+       struct device_node *port_dn = dport->dn;
        struct phy_device *phydev;
        int ret, mode;
 
@@ -242,15 +142,15 @@ int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev,
 
 static int dsa_cpu_dsa_setups(struct dsa_switch *ds, struct device *dev)
 {
-       struct device_node *port_dn;
+       struct dsa_port *dport;
        int ret, port;
 
-       for (port = 0; port < DSA_MAX_PORTS; port++) {
+       for (port = 0; port < ds->num_ports; port++) {
                if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
                        continue;
 
-               port_dn = ds->ports[port].dn;
-               ret = dsa_cpu_dsa_setup(ds, dev, port_dn, port);
+               dport = &ds->ports[port];
+               ret = dsa_cpu_dsa_setup(ds, dev, dport, port);
                if (ret)
                        return ret;
        }
@@ -308,7 +208,7 @@ void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds)
 
 static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
 {
-       struct dsa_switch_ops *ops = ds->ops;
+       const struct dsa_switch_ops *ops = ds->ops;
        struct dsa_switch_tree *dst = ds->dst;
        struct dsa_chip_data *cd = ds->cd;
        bool valid_name_found = false;
@@ -318,7 +218,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
        /*
         * Validate supplied switch configuration.
         */
-       for (i = 0; i < DSA_MAX_PORTS; i++) {
+       for (i = 0; i < ds->num_ports; i++) {
                char *name;
 
                name = cd->port_names[i];
@@ -326,13 +226,12 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
                        continue;
 
                if (!strcmp(name, "cpu")) {
-                       if (dst->cpu_switch != -1) {
+                       if (dst->cpu_switch) {
                                netdev_err(dst->master_netdev,
                                           "multiple cpu ports?!\n");
-                               ret = -EINVAL;
-                               goto out;
+                               return -EINVAL;
                        }
-                       dst->cpu_switch = index;
+                       dst->cpu_switch = ds;
                        dst->cpu_port = i;
                        ds->cpu_port_mask |= 1 << i;
                } else if (!strcmp(name, "dsa")) {
@@ -343,10 +242,8 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
                valid_name_found = true;
        }
 
-       if (!valid_name_found && i == DSA_MAX_PORTS) {
-               ret = -EINVAL;
-               goto out;
-       }
+       if (!valid_name_found && i == ds->num_ports)
+               return -EINVAL;
 
        /* Make the built-in MII bus mask match the number of ports,
         * switch drivers can override this later
@@ -358,15 +255,13 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
         * tagging protocol to the preferred tagging format of this
         * switch.
         */
-       if (dst->cpu_switch == index) {
+       if (dst->cpu_switch == ds) {
                enum dsa_tag_protocol tag_protocol;
 
                tag_protocol = ops->get_tag_protocol(ds);
                dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol);
-               if (IS_ERR(dst->tag_ops)) {
-                       ret = PTR_ERR(dst->tag_ops);
-                       goto out;
-               }
+               if (IS_ERR(dst->tag_ops))
+                       return PTR_ERR(dst->tag_ops);
 
                dst->rcv = dst->tag_ops->rcv;
        }
@@ -378,85 +273,55 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
         */
        ret = ops->setup(ds);
        if (ret < 0)
-               goto out;
+               return ret;
+
+       ret = dsa_switch_register_notifier(ds);
+       if (ret)
+               return ret;
 
        if (ops->set_addr) {
                ret = ops->set_addr(ds, dst->master_netdev->dev_addr);
                if (ret < 0)
-                       goto out;
+                       return ret;
        }
 
        if (!ds->slave_mii_bus && ops->phy_read) {
                ds->slave_mii_bus = devm_mdiobus_alloc(parent);
-               if (!ds->slave_mii_bus) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
+               if (!ds->slave_mii_bus)
+                       return -ENOMEM;
                dsa_slave_mii_bus_init(ds);
 
                ret = mdiobus_register(ds->slave_mii_bus);
                if (ret < 0)
-                       goto out;
+                       return ret;
        }
 
        /*
         * Create network devices for physical switch ports.
         */
-       for (i = 0; i < DSA_MAX_PORTS; i++) {
+       for (i = 0; i < ds->num_ports; i++) {
                ds->ports[i].dn = cd->port_dn[i];
 
                if (!(ds->enabled_port_mask & (1 << i)))
                        continue;
 
                ret = dsa_slave_create(ds, parent, i, cd->port_names[i]);
-               if (ret < 0) {
+               if (ret < 0)
                        netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s): %d\n",
                                   index, i, cd->port_names[i], ret);
-                       ret = 0;
-               }
        }
 
        /* Perform configuration of the CPU and DSA ports */
        ret = dsa_cpu_dsa_setups(ds, parent);
-       if (ret < 0) {
+       if (ret < 0)
                netdev_err(dst->master_netdev, "[%d] : can't configure CPU and DSA ports\n",
                           index);
-               ret = 0;
-       }
 
        ret = dsa_cpu_port_ethtool_setup(ds);
        if (ret)
                return ret;
 
-#ifdef CONFIG_NET_DSA_HWMON
-       /* If the switch provides a temperature sensor,
-        * register with hardware monitoring subsystem.
-        * Treat registration error as non-fatal and ignore it.
-        */
-       if (ops->get_temp) {
-               const char *netname = netdev_name(dst->master_netdev);
-               char hname[IFNAMSIZ + 1];
-               int i, j;
-
-               /* Create valid hwmon 'name' attribute */
-               for (i = j = 0; i < IFNAMSIZ && netname[i]; i++) {
-                       if (isalnum(netname[i]))
-                               hname[j++] = netname[i];
-               }
-               hname[j] = '\0';
-               scnprintf(ds->hwmon_name, sizeof(ds->hwmon_name), "%s_dsa%d",
-                         hname, index);
-               ds->hwmon_dev = hwmon_device_register_with_groups(NULL,
-                                       ds->hwmon_name, ds, dsa_hwmon_groups);
-               if (IS_ERR(ds->hwmon_dev))
-                       ds->hwmon_dev = NULL;
-       }
-#endif /* CONFIG_NET_DSA_HWMON */
-
-       return ret;
-
-out:
-       return ret;
+       return 0;
 }
 
 static struct dsa_switch *
@@ -464,7 +329,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
                 struct device *parent, struct device *host_dev)
 {
        struct dsa_chip_data *cd = dst->pd->chip + index;
-       struct dsa_switch_ops *ops;
+       const struct dsa_switch_ops *ops;
        struct dsa_switch *ds;
        int ret;
        const char *name;
@@ -486,8 +351,8 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
        /*
         * Allocate and initialise switch state.
         */
-       ds = devm_kzalloc(parent, sizeof(*ds), GFP_KERNEL);
-       if (ds == NULL)
+       ds = dsa_switch_alloc(parent, DSA_MAX_PORTS);
+       if (!ds)
                return ERR_PTR(-ENOMEM);
 
        ds->dst = dst;
@@ -495,7 +360,6 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
        ds->cd = cd;
        ds->ops = ops;
        ds->priv = priv;
-       ds->dev = parent;
 
        ret = dsa_switch_setup_one(ds, parent);
        if (ret)
@@ -504,8 +368,10 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
        return ds;
 }
 
-void dsa_cpu_dsa_destroy(struct device_node *port_dn)
+void dsa_cpu_dsa_destroy(struct dsa_port *port)
 {
+       struct device_node *port_dn = port->dn;
+
        if (of_phy_is_fixed_link(port_dn))
                of_phy_deregister_fixed_link(port_dn);
 }
@@ -514,13 +380,8 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
 {
        int port;
 
-#ifdef CONFIG_NET_DSA_HWMON
-       if (ds->hwmon_dev)
-               hwmon_device_unregister(ds->hwmon_dev);
-#endif
-
        /* Destroy network devices for physical switch ports. */
-       for (port = 0; port < DSA_MAX_PORTS; port++) {
+       for (port = 0; port < ds->num_ports; port++) {
                if (!(ds->enabled_port_mask & (1 << port)))
                        continue;
 
@@ -531,10 +392,10 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
        }
 
        /* Disable configuration of the CPU and DSA ports */
-       for (port = 0; port < DSA_MAX_PORTS; port++) {
+       for (port = 0; port < ds->num_ports; port++) {
                if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
                        continue;
-               dsa_cpu_dsa_destroy(ds->ports[port].dn);
+               dsa_cpu_dsa_destroy(&ds->ports[port]);
 
                /* Clearing a bit which is not set does no harm */
                ds->cpu_port_mask |= ~(1 << port);
@@ -543,6 +404,8 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
 
        if (ds->slave_mii_bus && ds->ops->phy_read)
                mdiobus_unregister(ds->slave_mii_bus);
+
+       dsa_switch_unregister_notifier(ds);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -551,7 +414,7 @@ int dsa_switch_suspend(struct dsa_switch *ds)
        int i, ret = 0;
 
        /* Suspend slave network devices */
-       for (i = 0; i < DSA_MAX_PORTS; i++) {
+       for (i = 0; i < ds->num_ports; i++) {
                if (!dsa_is_port_initialized(ds, i))
                        continue;
 
@@ -578,7 +441,7 @@ int dsa_switch_resume(struct dsa_switch *ds)
                return ret;
 
        /* Resume slave network devices */
-       for (i = 0; i < DSA_MAX_PORTS; i++) {
+       for (i = 0; i < ds->num_ports; i++) {
                if (!dsa_is_port_initialized(ds, i))
                        continue;
 
@@ -629,7 +492,7 @@ struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(dsa_host_dev_to_mii_bus);
 
-static struct net_device *dev_to_net_device(struct device *dev)
+struct net_device *dsa_dev_to_net_device(struct device *dev)
 {
        struct device *d;
 
@@ -646,6 +509,7 @@ static struct net_device *dev_to_net_device(struct device *dev)
 
        return NULL;
 }
+EXPORT_SYMBOL_GPL(dsa_dev_to_net_device);
 
 #ifdef CONFIG_OF
 static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
@@ -898,7 +762,6 @@ static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
 
        dst->pd = pd;
        dst->master_netdev = dev;
-       dst->cpu_switch = -1;
        dst->cpu_port = -1;
 
        for (i = 0; i < pd->nr_chips; i++) {
@@ -940,9 +803,6 @@ static int dsa_probe(struct platform_device *pdev)
        struct dsa_switch_tree *dst;
        int ret;
 
-       pr_notice_once("Distributed Switch Architecture driver version %s\n",
-                      dsa_driver_version);
-
        if (pdev->dev.of_node) {
                ret = dsa_of_probe(&pdev->dev);
                if (ret)
@@ -958,7 +818,7 @@ static int dsa_probe(struct platform_device *pdev)
                dev = pd->of_netdev;
                dev_hold(dev);
        } else {
-               dev = dev_to_net_device(pd->netdev);
+               dev = dsa_dev_to_net_device(pd->netdev);
        }
        if (dev == NULL) {
                ret = -EPROBE_DEFER;
@@ -1013,7 +873,7 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst)
                        dsa_switch_destroy(ds);
        }
 
-       dsa_cpu_port_ethtool_restore(dst->ds[0]);
+       dsa_cpu_port_ethtool_restore(dst->cpu_switch);
 
        dev_put(dst->master_netdev);
 }
@@ -1050,10 +910,6 @@ static struct packet_type dsa_pack_type __read_mostly = {
        .func   = dsa_switch_rcv,
 };
 
-static struct notifier_block dsa_netdevice_nb __read_mostly = {
-       .notifier_call  = dsa_slave_netdevice_event,
-};
-
 #ifdef CONFIG_PM_SLEEP
 static int dsa_suspend(struct device *d)
 {
@@ -1111,7 +967,9 @@ static int __init dsa_init_module(void)
 {
        int rc;
 
-       register_netdevice_notifier(&dsa_netdevice_nb);
+       rc = dsa_slave_register_notifier();
+       if (rc)
+               return rc;
 
        rc = platform_driver_register(&dsa_driver);
        if (rc)
@@ -1125,7 +983,7 @@ module_init(dsa_init_module);
 
 static void __exit dsa_cleanup_module(void)
 {
-       unregister_netdevice_notifier(&dsa_netdevice_nb);
+       dsa_slave_unregister_notifier();
        dev_remove_pack(&dsa_pack_type);
        platform_driver_unregister(&dsa_driver);
 }
index 0f99297b2fb3517942bf74fee08ed3e61d65a4f0..737be6470c7f27ba032d01667e039f3c03c17ae8 100644 (file)
@@ -57,7 +57,6 @@ static struct dsa_switch_tree *dsa_add_dst(u32 tree)
        if (!dst)
                return NULL;
        dst->tree = tree;
-       dst->cpu_switch = -1;
        INIT_LIST_HEAD(&dst->list);
        list_add_tail(&dsa_switch_trees, &dst->list);
        kref_init(&dst->refcount);
@@ -79,47 +78,43 @@ static void dsa_dst_del_ds(struct dsa_switch_tree *dst,
        kref_put(&dst->refcount, dsa_free_dst);
 }
 
-static bool dsa_port_is_dsa(struct device_node *port)
+/* For platform data configurations, we need to have a valid name argument to
+ * differentiate a disabled port from an enabled one
+ */
+static bool dsa_port_is_valid(struct dsa_port *port)
 {
-       const char *name;
-
-       name = of_get_property(port, "label", NULL);
-       if (!name)
-               return false;
+       return !!(port->dn || port->name);
+}
 
-       if (!strcmp(name, "dsa"))
+static bool dsa_port_is_dsa(struct dsa_port *port)
+{
+       if (port->name && !strcmp(port->name, "dsa"))
                return true;
-
-       return false;
+       else
+               return !!of_parse_phandle(port->dn, "link", 0);
 }
 
-static bool dsa_port_is_cpu(struct device_node *port)
+static bool dsa_port_is_cpu(struct dsa_port *port)
 {
-       const char *name;
-
-       name = of_get_property(port, "label", NULL);
-       if (!name)
-               return false;
-
-       if (!strcmp(name, "cpu"))
+       if (port->name && !strcmp(port->name, "cpu"))
                return true;
-
-       return false;
+       else
+               return !!of_parse_phandle(port->dn, "ethernet", 0);
 }
 
-static bool dsa_ds_find_port(struct dsa_switch *ds,
-                            struct device_node *port)
+static bool dsa_ds_find_port_dn(struct dsa_switch *ds,
+                               struct device_node *port)
 {
        u32 index;
 
-       for (index = 0; index < DSA_MAX_PORTS; index++)
+       for (index = 0; index < ds->num_ports; index++)
                if (ds->ports[index].dn == port)
                        return true;
        return false;
 }
 
-static struct dsa_switch *dsa_dst_find_port(struct dsa_switch_tree *dst,
-                                           struct device_node *port)
+static struct dsa_switch *dsa_dst_find_port_dn(struct dsa_switch_tree *dst,
+                                              struct device_node *port)
 {
        struct dsa_switch *ds;
        u32 index;
@@ -129,7 +124,7 @@ static struct dsa_switch *dsa_dst_find_port(struct dsa_switch_tree *dst,
                if (!ds)
                        continue;
 
-               if (dsa_ds_find_port(ds, port))
+               if (dsa_ds_find_port_dn(ds, port))
                        return ds;
        }
 
@@ -138,7 +133,7 @@ static struct dsa_switch *dsa_dst_find_port(struct dsa_switch_tree *dst,
 
 static int dsa_port_complete(struct dsa_switch_tree *dst,
                             struct dsa_switch *src_ds,
-                            struct device_node *port,
+                            struct dsa_port *port,
                             u32 src_port)
 {
        struct device_node *link;
@@ -146,11 +141,11 @@ static int dsa_port_complete(struct dsa_switch_tree *dst,
        struct dsa_switch *dst_ds;
 
        for (index = 0;; index++) {
-               link = of_parse_phandle(port, "link", index);
+               link = of_parse_phandle(port->dn, "link", index);
                if (!link)
                        break;
 
-               dst_ds = dsa_dst_find_port(dst, link);
+               dst_ds = dsa_dst_find_port_dn(dst, link);
                of_node_put(link);
 
                if (!dst_ds)
@@ -169,13 +164,13 @@ static int dsa_port_complete(struct dsa_switch_tree *dst,
  */
 static int dsa_ds_complete(struct dsa_switch_tree *dst, struct dsa_switch *ds)
 {
-       struct device_node *port;
+       struct dsa_port *port;
        u32 index;
        int err;
 
-       for (index = 0; index < DSA_MAX_PORTS; index++) {
-               port = ds->ports[index].dn;
-               if (!port)
+       for (index = 0; index < ds->num_ports; index++) {
+               port = &ds->ports[index];
+               if (!dsa_port_is_valid(port))
                        continue;
 
                if (!dsa_port_is_dsa(port))
@@ -215,7 +210,7 @@ static int dsa_dst_complete(struct dsa_switch_tree *dst)
        return 0;
 }
 
-static int dsa_dsa_port_apply(struct device_node *port, u32 index,
+static int dsa_dsa_port_apply(struct dsa_port *port, u32 index,
                              struct dsa_switch *ds)
 {
        int err;
@@ -230,13 +225,13 @@ static int dsa_dsa_port_apply(struct device_node *port, u32 index,
        return 0;
 }
 
-static void dsa_dsa_port_unapply(struct device_node *port, u32 index,
+static void dsa_dsa_port_unapply(struct dsa_port *port, u32 index,
                                 struct dsa_switch *ds)
 {
        dsa_cpu_dsa_destroy(port);
 }
 
-static int dsa_cpu_port_apply(struct device_node *port, u32 index,
+static int dsa_cpu_port_apply(struct dsa_port *port, u32 index,
                              struct dsa_switch *ds)
 {
        int err;
@@ -253,7 +248,7 @@ static int dsa_cpu_port_apply(struct device_node *port, u32 index,
        return 0;
 }
 
-static void dsa_cpu_port_unapply(struct device_node *port, u32 index,
+static void dsa_cpu_port_unapply(struct dsa_port *port, u32 index,
                                 struct dsa_switch *ds)
 {
        dsa_cpu_dsa_destroy(port);
@@ -261,13 +256,16 @@ static void dsa_cpu_port_unapply(struct device_node *port, u32 index,
 
 }
 
-static int dsa_user_port_apply(struct device_node *port, u32 index,
+static int dsa_user_port_apply(struct dsa_port *port, u32 index,
                               struct dsa_switch *ds)
 {
-       const char *name;
+       const char *name = port->name;
        int err;
 
-       name = of_get_property(port, "label", NULL);
+       if (port->dn)
+               name = of_get_property(port->dn, "label", NULL);
+       if (!name)
+               name = "eth%d";
 
        err = dsa_slave_create(ds, ds->dev, index, name);
        if (err) {
@@ -280,7 +278,7 @@ static int dsa_user_port_apply(struct device_node *port, u32 index,
        return 0;
 }
 
-static void dsa_user_port_unapply(struct device_node *port, u32 index,
+static void dsa_user_port_unapply(struct dsa_port *port, u32 index,
                                  struct dsa_switch *ds)
 {
        if (ds->ports[index].netdev) {
@@ -292,7 +290,7 @@ static void dsa_user_port_unapply(struct device_node *port, u32 index,
 
 static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
 {
-       struct device_node *port;
+       struct dsa_port *port;
        u32 index;
        int err;
 
@@ -307,6 +305,10 @@ static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
        if (err < 0)
                return err;
 
+       err = dsa_switch_register_notifier(ds);
+       if (err)
+               return err;
+
        if (ds->ops->set_addr) {
                err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr);
                if (err < 0)
@@ -325,9 +327,9 @@ static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
                        return err;
        }
 
-       for (index = 0; index < DSA_MAX_PORTS; index++) {
-               port = ds->ports[index].dn;
-               if (!port)
+       for (index = 0; index < ds->num_ports; index++) {
+               port = &ds->ports[index];
+               if (!dsa_port_is_valid(port))
                        continue;
 
                if (dsa_port_is_dsa(port)) {
@@ -354,12 +356,12 @@ static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
 
 static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
 {
-       struct device_node *port;
+       struct dsa_port *port;
        u32 index;
 
-       for (index = 0; index < DSA_MAX_PORTS; index++) {
-               port = ds->ports[index].dn;
-               if (!port)
+       for (index = 0; index < ds->num_ports; index++) {
+               port = &ds->ports[index];
+               if (!dsa_port_is_valid(port))
                        continue;
 
                if (dsa_port_is_dsa(port)) {
@@ -377,6 +379,8 @@ static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
 
        if (ds->slave_mii_bus && ds->ops->phy_read)
                mdiobus_unregister(ds->slave_mii_bus);
+
+       dsa_switch_unregister_notifier(ds);
 }
 
 static int dsa_dst_apply(struct dsa_switch_tree *dst)
@@ -395,8 +399,8 @@ static int dsa_dst_apply(struct dsa_switch_tree *dst)
                        return err;
        }
 
-       if (dst->ds[0]) {
-               err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
+       if (dst->cpu_switch) {
+               err = dsa_cpu_port_ethtool_setup(dst->cpu_switch);
                if (err)
                        return err;
        }
@@ -436,14 +440,14 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
                dsa_ds_unapply(dst, ds);
        }
 
-       if (dst->ds[0])
-               dsa_cpu_port_ethtool_restore(dst->ds[0]);
+       if (dst->cpu_switch)
+               dsa_cpu_port_ethtool_restore(dst->cpu_switch);
 
        pr_info("DSA: tree %d unapplied\n", dst->tree);
        dst->applied = false;
 }
 
-static int dsa_cpu_parse(struct device_node *port, u32 index,
+static int dsa_cpu_parse(struct dsa_port *port, u32 index,
                         struct dsa_switch_tree *dst,
                         struct dsa_switch *ds)
 {
@@ -451,11 +455,16 @@ static int dsa_cpu_parse(struct device_node *port, u32 index,
        struct net_device *ethernet_dev;
        struct device_node *ethernet;
 
-       ethernet = of_parse_phandle(port, "ethernet", 0);
-       if (!ethernet)
-               return -EINVAL;
+       if (port->dn) {
+               ethernet = of_parse_phandle(port->dn, "ethernet", 0);
+               if (!ethernet)
+                       return -EINVAL;
+               ethernet_dev = of_find_net_device_by_node(ethernet);
+       } else {
+               ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]);
+               dev_put(ethernet_dev);
+       }
 
-       ethernet_dev = of_find_net_device_by_node(ethernet);
        if (!ethernet_dev)
                return -EPROBE_DEFER;
 
@@ -465,8 +474,8 @@ static int dsa_cpu_parse(struct device_node *port, u32 index,
        if (!dst->master_netdev)
                dst->master_netdev = ethernet_dev;
 
-       if (dst->cpu_switch == -1) {
-               dst->cpu_switch = ds->index;
+       if (!dst->cpu_switch) {
+               dst->cpu_switch = ds;
                dst->cpu_port = index;
        }
 
@@ -484,13 +493,13 @@ static int dsa_cpu_parse(struct device_node *port, u32 index,
 
 static int dsa_ds_parse(struct dsa_switch_tree *dst, struct dsa_switch *ds)
 {
-       struct device_node *port;
+       struct dsa_port *port;
        u32 index;
        int err;
 
-       for (index = 0; index < DSA_MAX_PORTS; index++) {
-               port = ds->ports[index].dn;
-               if (!port)
+       for (index = 0; index < ds->num_ports; index++) {
+               port = &ds->ports[index];
+               if (!dsa_port_is_valid(port))
                        continue;
 
                if (dsa_port_is_cpu(port)) {
@@ -542,7 +551,7 @@ static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds)
                if (err)
                        return err;
 
-               if (reg >= DSA_MAX_PORTS)
+               if (reg >= ds->num_ports)
                        return -EINVAL;
 
                ds->ports[reg].dn = port;
@@ -551,14 +560,41 @@ static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds)
                 * to have access to a correct value, just like what
                 * net/dsa/dsa.c::dsa_switch_setup_one does.
                 */
-               if (!dsa_port_is_cpu(port))
+               if (!dsa_port_is_cpu(&ds->ports[reg]))
                        ds->enabled_port_mask |= 1 << reg;
        }
 
        return 0;
 }
 
-static int dsa_parse_member(struct device_node *np, u32 *tree, u32 *index)
+static int dsa_parse_ports(struct dsa_chip_data *cd, struct dsa_switch *ds)
+{
+       bool valid_name_found = false;
+       unsigned int i;
+
+       for (i = 0; i < DSA_MAX_PORTS; i++) {
+               if (!cd->port_names[i])
+                       continue;
+
+               ds->ports[i].name = cd->port_names[i];
+
+               /* Initialize enabled_port_mask now for drv->setup()
+                * to have access to a correct value, just like what
+                * net/dsa/dsa.c::dsa_switch_setup_one does.
+                */
+               if (!dsa_port_is_cpu(&ds->ports[i]))
+                       ds->enabled_port_mask |= 1 << i;
+
+               valid_name_found = true;
+       }
+
+       if (!valid_name_found && i == DSA_MAX_PORTS)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int dsa_parse_member_dn(struct device_node *np, u32 *tree, u32 *index)
 {
        int err;
 
@@ -582,6 +618,18 @@ static int dsa_parse_member(struct device_node *np, u32 *tree, u32 *index)
        return 0;
 }
 
+static int dsa_parse_member(struct dsa_chip_data *pd, u32 *tree, u32 *index)
+{
+       if (!pd)
+               return -ENODEV;
+
+       /* We do not support complex trees with dsa_chip_data */
+       *tree = 0;
+       *index = 0;
+
+       return 0;
+}
+
 static struct device_node *dsa_get_ports(struct dsa_switch *ds,
                                         struct device_node *np)
 {
@@ -596,23 +644,36 @@ static struct device_node *dsa_get_ports(struct dsa_switch *ds,
        return ports;
 }
 
-static int _dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
+static int _dsa_register_switch(struct dsa_switch *ds, struct device *dev)
 {
-       struct device_node *ports = dsa_get_ports(ds, np);
+       struct dsa_chip_data *pdata = dev->platform_data;
+       struct device_node *np = dev->of_node;
        struct dsa_switch_tree *dst;
+       struct device_node *ports;
        u32 tree, index;
        int i, err;
 
-       err = dsa_parse_member(np, &tree, &index);
-       if (err)
-               return err;
+       if (np) {
+               err = dsa_parse_member_dn(np, &tree, &index);
+               if (err)
+                       return err;
 
-       if (IS_ERR(ports))
-               return PTR_ERR(ports);
+               ports = dsa_get_ports(ds, np);
+               if (IS_ERR(ports))
+                       return PTR_ERR(ports);
 
-       err = dsa_parse_ports_dn(ports, ds);
-       if (err)
-               return err;
+               err = dsa_parse_ports_dn(ports, ds);
+               if (err)
+                       return err;
+       } else {
+               err = dsa_parse_member(pdata, &tree, &index);
+               if (err)
+                       return err;
+
+               err = dsa_parse_ports(pdata, ds);
+               if (err)
+                       return err;
+       }
 
        dst = dsa_get_dst(tree);
        if (!dst) {
@@ -628,6 +689,7 @@ static int _dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
 
        ds->dst = dst;
        ds->index = index;
+       ds->cd = pdata;
 
        /* Initialize the routing table */
        for (i = 0; i < DSA_MAX_SWITCHES; ++i)
@@ -651,8 +713,14 @@ static int _dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
        }
 
        err = dsa_dst_parse(dst);
-       if (err)
+       if (err) {
+               if (err == -EPROBE_DEFER) {
+                       dsa_dst_del_ds(dst, ds, ds->index);
+                       return err;
+               }
+
                goto out_del_dst;
+       }
 
        err = dsa_dst_apply(dst);
        if (err) {
@@ -671,12 +739,34 @@ out:
        return err;
 }
 
-int dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
+struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n)
+{
+       size_t size = sizeof(struct dsa_switch) + n * sizeof(struct dsa_port);
+       struct dsa_switch *ds;
+       int i;
+
+       ds = devm_kzalloc(dev, size, GFP_KERNEL);
+       if (!ds)
+               return NULL;
+
+       ds->dev = dev;
+       ds->num_ports = n;
+
+       for (i = 0; i < ds->num_ports; ++i) {
+               ds->ports[i].index = i;
+               ds->ports[i].ds = ds;
+       }
+
+       return ds;
+}
+EXPORT_SYMBOL_GPL(dsa_switch_alloc);
+
+int dsa_register_switch(struct dsa_switch *ds, struct device *dev)
 {
        int err;
 
        mutex_lock(&dsa2_mutex);
-       err = _dsa_register_switch(ds, np);
+       err = _dsa_register_switch(ds, dev);
        mutex_unlock(&dsa2_mutex);
 
        return err;
index 6cfd7388834e3a118e81ece3779f56023bfca488..0706a511244e92ff0174173eb388a41ff59141f4 100644 (file)
@@ -25,12 +25,8 @@ struct dsa_slave_priv {
        struct sk_buff *        (*xmit)(struct sk_buff *skb,
                                        struct net_device *dev);
 
-       /*
-        * Which switch this port is a part of, and the port index
-        * for this port.
-        */
-       struct dsa_switch       *parent;
-       u8                      port;
+       /* DSA port data, such as switch, port index, etc. */
+       struct dsa_port         *dp;
 
        /*
         * The phylib phy_device pointer for the PHY connected
@@ -42,17 +38,18 @@ struct dsa_slave_priv {
        int                     old_pause;
        int                     old_duplex;
 
-       struct net_device       *bridge_dev;
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll          *netpoll;
 #endif
+
+       /* TC context */
+       struct list_head        mall_tc_list;
 };
 
 /* dsa.c */
-extern char dsa_driver_version[];
 int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev,
-                     struct device_node *port_dn, int port);
-void dsa_cpu_dsa_destroy(struct device_node *port_dn);
+                     struct dsa_port *dport, int port);
+void dsa_cpu_dsa_destroy(struct dsa_port *dport);
 const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol);
 int dsa_cpu_port_ethtool_setup(struct dsa_switch *ds);
 void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds);
@@ -66,8 +63,12 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
 void dsa_slave_destroy(struct net_device *slave_dev);
 int dsa_slave_suspend(struct net_device *slave_dev);
 int dsa_slave_resume(struct net_device *slave_dev);
-int dsa_slave_netdevice_event(struct notifier_block *unused,
-                             unsigned long event, void *ptr);
+int dsa_slave_register_notifier(void);
+void dsa_slave_unregister_notifier(void);
+
+/* switch.c */
+int dsa_switch_register_notifier(struct dsa_switch *ds);
+void dsa_switch_unregister_notifier(struct dsa_switch *ds);
 
 /* tag_dsa.c */
 extern const struct dsa_device_ops dsa_netdev_ops;
index 7d45961108511488003380da656bf8a4f2fd7d84..c34872e1febc4b75d1b69b18a8a1189405ca30fa 100644 (file)
 #include <linux/of_net.h>
 #include <linux/of_mdio.h>
 #include <linux/mdio.h>
+#include <linux/list.h>
 #include <net/rtnetlink.h>
 #include <net/switchdev.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_mirred.h>
 #include <linux/if_bridge.h>
 #include <linux/netpoll.h>
 #include "dsa_priv.h"
 
+static bool dsa_slave_dev_check(struct net_device *dev);
+
+static int dsa_slave_notify(struct net_device *dev, unsigned long e, void *v)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct raw_notifier_head *nh = &p->dp->ds->dst->nh;
+       int err;
+
+       err = raw_notifier_call_chain(nh, e, v);
+
+       return notifier_to_errno(err);
+}
+
 /* slave mii_bus handling ***************************************************/
 static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
 {
@@ -61,17 +77,20 @@ static int dsa_slave_get_iflink(const struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
 
-       return p->parent->dst->master_netdev->ifindex;
+       return p->dp->ds->dst->master_netdev->ifindex;
 }
 
-static inline bool dsa_port_is_bridged(struct dsa_slave_priv *p)
+static inline bool dsa_port_is_bridged(struct dsa_port *dp)
 {
-       return !!p->bridge_dev;
+       return !!dp->bridge_dev;
 }
 
-static void dsa_port_set_stp_state(struct dsa_switch *ds, int port, u8 state)
+static void dsa_slave_set_state(struct net_device *dev, u8 state)
 {
-       struct dsa_port *dp = &ds->ports[port];
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_port *dp = p->dp;
+       struct dsa_switch *ds = dp->ds;
+       int port = dp->index;
 
        if (ds->ops->port_stp_state_set)
                ds->ops->port_stp_state_set(ds, port, state);
@@ -96,9 +115,9 @@ static void dsa_port_set_stp_state(struct dsa_switch *ds, int port, u8 state)
 static int dsa_slave_open(struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct net_device *master = p->parent->dst->master_netdev;
-       struct dsa_switch *ds = p->parent;
-       u8 stp_state = dsa_port_is_bridged(p) ?
+       struct net_device *master = p->dp->ds->dst->master_netdev;
+       struct dsa_switch *ds = p->dp->ds;
+       u8 stp_state = dsa_port_is_bridged(p->dp) ?
                        BR_STATE_BLOCKING : BR_STATE_FORWARDING;
        int err;
 
@@ -123,12 +142,12 @@ static int dsa_slave_open(struct net_device *dev)
        }
 
        if (ds->ops->port_enable) {
-               err = ds->ops->port_enable(ds, p->port, p->phy);
+               err = ds->ops->port_enable(ds, p->dp->index, p->phy);
                if (err)
                        goto clear_promisc;
        }
 
-       dsa_port_set_stp_state(ds, p->port, stp_state);
+       dsa_slave_set_state(dev, stp_state);
 
        if (p->phy)
                phy_start(p->phy);
@@ -151,8 +170,8 @@ out:
 static int dsa_slave_close(struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct net_device *master = p->parent->dst->master_netdev;
-       struct dsa_switch *ds = p->parent;
+       struct net_device *master = p->dp->ds->dst->master_netdev;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (p->phy)
                phy_stop(p->phy);
@@ -168,9 +187,9 @@ static int dsa_slave_close(struct net_device *dev)
                dev_uc_del(master, dev->dev_addr);
 
        if (ds->ops->port_disable)
-               ds->ops->port_disable(ds, p->port, p->phy);
+               ds->ops->port_disable(ds, p->dp->index, p->phy);
 
-       dsa_port_set_stp_state(ds, p->port, BR_STATE_DISABLED);
+       dsa_slave_set_state(dev, BR_STATE_DISABLED);
 
        return 0;
 }
@@ -178,7 +197,7 @@ static int dsa_slave_close(struct net_device *dev)
 static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct net_device *master = p->parent->dst->master_netdev;
+       struct net_device *master = p->dp->ds->dst->master_netdev;
 
        if (change & IFF_ALLMULTI)
                dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1);
@@ -189,7 +208,7 @@ static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
 static void dsa_slave_set_rx_mode(struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct net_device *master = p->parent->dst->master_netdev;
+       struct net_device *master = p->dp->ds->dst->master_netdev;
 
        dev_mc_sync(master, dev);
        dev_uc_sync(master, dev);
@@ -198,7 +217,7 @@ static void dsa_slave_set_rx_mode(struct net_device *dev)
 static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct net_device *master = p->parent->dst->master_netdev;
+       struct net_device *master = p->dp->ds->dst->master_netdev;
        struct sockaddr *addr = a;
        int err;
 
@@ -228,16 +247,17 @@ static int dsa_slave_port_vlan_add(struct net_device *dev,
                                   struct switchdev_trans *trans)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_port *dp = p->dp;
+       struct dsa_switch *ds = dp->ds;
 
        if (switchdev_trans_ph_prepare(trans)) {
                if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
                        return -EOPNOTSUPP;
 
-               return ds->ops->port_vlan_prepare(ds, p->port, vlan, trans);
+               return ds->ops->port_vlan_prepare(ds, dp->index, vlan, trans);
        }
 
-       ds->ops->port_vlan_add(ds, p->port, vlan, trans);
+       ds->ops->port_vlan_add(ds, dp->index, vlan, trans);
 
        return 0;
 }
@@ -246,12 +266,12 @@ static int dsa_slave_port_vlan_del(struct net_device *dev,
                                   const struct switchdev_obj_port_vlan *vlan)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (!ds->ops->port_vlan_del)
                return -EOPNOTSUPP;
 
-       return ds->ops->port_vlan_del(ds, p->port, vlan);
+       return ds->ops->port_vlan_del(ds, p->dp->index, vlan);
 }
 
 static int dsa_slave_port_vlan_dump(struct net_device *dev,
@@ -259,10 +279,10 @@ static int dsa_slave_port_vlan_dump(struct net_device *dev,
                                    switchdev_obj_dump_cb_t *cb)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (ds->ops->port_vlan_dump)
-               return ds->ops->port_vlan_dump(ds, p->port, vlan, cb);
+               return ds->ops->port_vlan_dump(ds, p->dp->index, vlan, cb);
 
        return -EOPNOTSUPP;
 }
@@ -272,16 +292,16 @@ static int dsa_slave_port_fdb_add(struct net_device *dev,
                                  struct switchdev_trans *trans)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (switchdev_trans_ph_prepare(trans)) {
                if (!ds->ops->port_fdb_prepare || !ds->ops->port_fdb_add)
                        return -EOPNOTSUPP;
 
-               return ds->ops->port_fdb_prepare(ds, p->port, fdb, trans);
+               return ds->ops->port_fdb_prepare(ds, p->dp->index, fdb, trans);
        }
 
-       ds->ops->port_fdb_add(ds, p->port, fdb, trans);
+       ds->ops->port_fdb_add(ds, p->dp->index, fdb, trans);
 
        return 0;
 }
@@ -290,11 +310,11 @@ static int dsa_slave_port_fdb_del(struct net_device *dev,
                                  const struct switchdev_obj_port_fdb *fdb)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
        int ret = -EOPNOTSUPP;
 
        if (ds->ops->port_fdb_del)
-               ret = ds->ops->port_fdb_del(ds, p->port, fdb);
+               ret = ds->ops->port_fdb_del(ds, p->dp->index, fdb);
 
        return ret;
 }
@@ -304,10 +324,10 @@ static int dsa_slave_port_fdb_dump(struct net_device *dev,
                                   switchdev_obj_dump_cb_t *cb)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (ds->ops->port_fdb_dump)
-               return ds->ops->port_fdb_dump(ds, p->port, fdb, cb);
+               return ds->ops->port_fdb_dump(ds, p->dp->index, fdb, cb);
 
        return -EOPNOTSUPP;
 }
@@ -317,16 +337,16 @@ static int dsa_slave_port_mdb_add(struct net_device *dev,
                                  struct switchdev_trans *trans)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (switchdev_trans_ph_prepare(trans)) {
                if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
                        return -EOPNOTSUPP;
 
-               return ds->ops->port_mdb_prepare(ds, p->port, mdb, trans);
+               return ds->ops->port_mdb_prepare(ds, p->dp->index, mdb, trans);
        }
 
-       ds->ops->port_mdb_add(ds, p->port, mdb, trans);
+       ds->ops->port_mdb_add(ds, p->dp->index, mdb, trans);
 
        return 0;
 }
@@ -335,10 +355,10 @@ static int dsa_slave_port_mdb_del(struct net_device *dev,
                                  const struct switchdev_obj_port_mdb *mdb)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (ds->ops->port_mdb_del)
-               return ds->ops->port_mdb_del(ds, p->port, mdb);
+               return ds->ops->port_mdb_del(ds, p->dp->index, mdb);
 
        return -EOPNOTSUPP;
 }
@@ -348,10 +368,10 @@ static int dsa_slave_port_mdb_dump(struct net_device *dev,
                                   switchdev_obj_dump_cb_t *cb)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (ds->ops->port_mdb_dump)
-               return ds->ops->port_mdb_dump(ds, p->port, mdb, cb);
+               return ds->ops->port_mdb_dump(ds, p->dp->index, mdb, cb);
 
        return -EOPNOTSUPP;
 }
@@ -371,12 +391,12 @@ static int dsa_slave_stp_state_set(struct net_device *dev,
                                   struct switchdev_trans *trans)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (switchdev_trans_ph_prepare(trans))
                return ds->ops->port_stp_state_set ? 0 : -EOPNOTSUPP;
 
-       dsa_port_set_stp_state(ds, p->port, attr->u.stp_state);
+       dsa_slave_set_state(dev, attr->u.stp_state);
 
        return 0;
 }
@@ -386,14 +406,14 @@ static int dsa_slave_vlan_filtering(struct net_device *dev,
                                    struct switchdev_trans *trans)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        /* bridge skips -EOPNOTSUPP, so skip the prepare phase */
        if (switchdev_trans_ph_prepare(trans))
                return 0;
 
        if (ds->ops->port_vlan_filtering)
-               return ds->ops->port_vlan_filtering(ds, p->port,
+               return ds->ops->port_vlan_filtering(ds, p->dp->index,
                                                    attr->u.vlan_filtering);
 
        return 0;
@@ -404,7 +424,7 @@ static int dsa_fastest_ageing_time(struct dsa_switch *ds,
 {
        int i;
 
-       for (i = 0; i < DSA_MAX_PORTS; ++i) {
+       for (i = 0; i < ds->num_ports; ++i) {
                struct dsa_port *dp = &ds->ports[i];
 
                if (dp && dp->ageing_time && dp->ageing_time < ageing_time)
@@ -419,7 +439,7 @@ static int dsa_slave_ageing_time(struct net_device *dev,
                                 struct switchdev_trans *trans)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
        unsigned long ageing_jiffies = clock_t_to_jiffies(attr->u.ageing_time);
        unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
 
@@ -428,7 +448,7 @@ static int dsa_slave_ageing_time(struct net_device *dev,
                return 0;
 
        /* Keep the fastest ageing time in case of multiple bridges */
-       ds->ports[p->port].ageing_time = ageing_time;
+       p->dp->ageing_time = ageing_time;
        ageing_time = dsa_fastest_ageing_time(ds, ageing_time);
 
        if (ds->ops->set_ageing_time)
@@ -553,39 +573,58 @@ static int dsa_slave_bridge_port_join(struct net_device *dev,
                                      struct net_device *br)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
-       int ret = -EOPNOTSUPP;
+       struct dsa_notifier_bridge_info info = {
+               .sw_index = p->dp->ds->index,
+               .port = p->dp->index,
+               .br = br,
+       };
+       int err;
+
+       /* Here the port is already bridged. Reflect the current configuration
+        * so that drivers can program their chips accordingly.
+        */
+       p->dp->bridge_dev = br;
 
-       p->bridge_dev = br;
+       err = dsa_slave_notify(dev, DSA_NOTIFIER_BRIDGE_JOIN, &info);
 
-       if (ds->ops->port_bridge_join)
-               ret = ds->ops->port_bridge_join(ds, p->port, br);
+       /* The bridging is rolled back on error */
+       if (err)
+               p->dp->bridge_dev = NULL;
 
-       return ret == -EOPNOTSUPP ? 0 : ret;
+       return err;
 }
 
-static void dsa_slave_bridge_port_leave(struct net_device *dev)
+static void dsa_slave_bridge_port_leave(struct net_device *dev,
+                                       struct net_device *br)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
-
+       struct dsa_notifier_bridge_info info = {
+               .sw_index = p->dp->ds->index,
+               .port = p->dp->index,
+               .br = br,
+       };
+       int err;
 
-       if (ds->ops->port_bridge_leave)
-               ds->ops->port_bridge_leave(ds, p->port);
+       /* Here the port is already unbridged. Reflect the current configuration
+        * so that drivers can program their chips accordingly.
+        */
+       p->dp->bridge_dev = NULL;
 
-       p->bridge_dev = NULL;
+       err = dsa_slave_notify(dev, DSA_NOTIFIER_BRIDGE_LEAVE, &info);
+       if (err)
+               netdev_err(dev, "failed to notify DSA_NOTIFIER_BRIDGE_LEAVE\n");
 
        /* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
         * so allow it to be in BR_STATE_FORWARDING to be kept functional
         */
-       dsa_port_set_stp_state(ds, p->port, BR_STATE_FORWARDING);
+       dsa_slave_set_state(dev, BR_STATE_FORWARDING);
 }
 
 static int dsa_slave_port_attr_get(struct net_device *dev,
                                   struct switchdev_attr *attr)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        switch (attr->id) {
        case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
@@ -633,7 +672,7 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Queue the SKB for transmission on the parent interface, but
         * do not modify its EtherType
         */
-       nskb->dev = p->parent->dst->master_netdev;
+       nskb->dev = p->dp->ds->dst->master_netdev;
        dev_queue_xmit(nskb);
 
        return NETDEV_TX_OK;
@@ -645,14 +684,10 @@ dsa_slave_get_link_ksettings(struct net_device *dev,
                             struct ethtool_link_ksettings *cmd)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       int err;
+       int err = -EOPNOTSUPP;
 
-       err = -EOPNOTSUPP;
-       if (p->phy != NULL) {
-               err = phy_read_status(p->phy);
-               if (err == 0)
-                       err = phy_ethtool_ksettings_get(p->phy, cmd);
-       }
+       if (p->phy != NULL)
+               err = phy_ethtool_ksettings_get(p->phy, cmd);
 
        return err;
 }
@@ -673,7 +708,6 @@ static void dsa_slave_get_drvinfo(struct net_device *dev,
                                  struct ethtool_drvinfo *drvinfo)
 {
        strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
-       strlcpy(drvinfo->version, dsa_driver_version, sizeof(drvinfo->version));
        strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
 }
@@ -681,10 +715,10 @@ static void dsa_slave_get_drvinfo(struct net_device *dev,
 static int dsa_slave_get_regs_len(struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (ds->ops->get_regs_len)
-               return ds->ops->get_regs_len(ds, p->port);
+               return ds->ops->get_regs_len(ds, p->dp->index);
 
        return -EOPNOTSUPP;
 }
@@ -693,10 +727,10 @@ static void
 dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (ds->ops->get_regs)
-               ds->ops->get_regs(ds, p->port, regs, _p);
+               ds->ops->get_regs(ds, p->dp->index, regs, _p);
 }
 
 static int dsa_slave_nway_reset(struct net_device *dev)
@@ -724,7 +758,7 @@ static u32 dsa_slave_get_link(struct net_device *dev)
 static int dsa_slave_get_eeprom_len(struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (ds->cd && ds->cd->eeprom_len)
                return ds->cd->eeprom_len;
@@ -739,7 +773,7 @@ static int dsa_slave_get_eeprom(struct net_device *dev,
                                struct ethtool_eeprom *eeprom, u8 *data)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (ds->ops->get_eeprom)
                return ds->ops->get_eeprom(ds, eeprom, data);
@@ -751,7 +785,7 @@ static int dsa_slave_set_eeprom(struct net_device *dev,
                                struct ethtool_eeprom *eeprom, u8 *data)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (ds->ops->set_eeprom)
                return ds->ops->set_eeprom(ds, eeprom, data);
@@ -763,7 +797,7 @@ static void dsa_slave_get_strings(struct net_device *dev,
                                  uint32_t stringset, uint8_t *data)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (stringset == ETH_SS_STATS) {
                int len = ETH_GSTRING_LEN;
@@ -773,7 +807,7 @@ static void dsa_slave_get_strings(struct net_device *dev,
                strncpy(data + 2 * len, "rx_packets", len);
                strncpy(data + 3 * len, "rx_bytes", len);
                if (ds->ops->get_strings)
-                       ds->ops->get_strings(ds, p->port, data + 4 * len);
+                       ds->ops->get_strings(ds, p->dp->index, data + 4 * len);
        }
 }
 
@@ -782,7 +816,7 @@ static void dsa_cpu_port_get_ethtool_stats(struct net_device *dev,
                                           uint64_t *data)
 {
        struct dsa_switch_tree *dst = dev->dsa_ptr;
-       struct dsa_switch *ds = dst->ds[0];
+       struct dsa_switch *ds = dst->cpu_switch;
        s8 cpu_port = dst->cpu_port;
        int count = 0;
 
@@ -799,7 +833,7 @@ static void dsa_cpu_port_get_ethtool_stats(struct net_device *dev,
 static int dsa_cpu_port_get_sset_count(struct net_device *dev, int sset)
 {
        struct dsa_switch_tree *dst = dev->dsa_ptr;
-       struct dsa_switch *ds = dst->ds[0];
+       struct dsa_switch *ds = dst->cpu_switch;
        int count = 0;
 
        if (dst->master_ethtool_ops.get_sset_count)
@@ -815,7 +849,7 @@ static void dsa_cpu_port_get_strings(struct net_device *dev,
                                     uint32_t stringset, uint8_t *data)
 {
        struct dsa_switch_tree *dst = dev->dsa_ptr;
-       struct dsa_switch *ds = dst->ds[0];
+       struct dsa_switch *ds = dst->cpu_switch;
        s8 cpu_port = dst->cpu_port;
        int len = ETH_GSTRING_LEN;
        int mcount = 0, count;
@@ -854,20 +888,20 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
                                        uint64_t *data)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        data[0] = dev->stats.tx_packets;
        data[1] = dev->stats.tx_bytes;
        data[2] = dev->stats.rx_packets;
        data[3] = dev->stats.rx_bytes;
        if (ds->ops->get_ethtool_stats)
-               ds->ops->get_ethtool_stats(ds, p->port, data + 4);
+               ds->ops->get_ethtool_stats(ds, p->dp->index, data + 4);
 }
 
 static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (sset == ETH_SS_STATS) {
                int count;
@@ -885,20 +919,20 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
 static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        if (ds->ops->get_wol)
-               ds->ops->get_wol(ds, p->port, w);
+               ds->ops->get_wol(ds, p->dp->index, w);
 }
 
 static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
        int ret = -EOPNOTSUPP;
 
        if (ds->ops->set_wol)
-               ret = ds->ops->set_wol(ds, p->port, w);
+               ret = ds->ops->set_wol(ds, p->dp->index, w);
 
        return ret;
 }
@@ -906,13 +940,13 @@ static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
 static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
        int ret;
 
        if (!ds->ops->set_eee)
                return -EOPNOTSUPP;
 
-       ret = ds->ops->set_eee(ds, p->port, p->phy, e);
+       ret = ds->ops->set_eee(ds, p->dp->index, p->phy, e);
        if (ret)
                return ret;
 
@@ -925,13 +959,13 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
 static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
        int ret;
 
        if (!ds->ops->get_eee)
                return -EOPNOTSUPP;
 
-       ret = ds->ops->get_eee(ds, p->port, e);
+       ret = ds->ops->get_eee(ds, p->dp->index, e);
        if (ret)
                return ret;
 
@@ -946,7 +980,7 @@ static int dsa_slave_netpoll_setup(struct net_device *dev,
                                   struct netpoll_info *ni)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
        struct net_device *master = ds->dst->master_netdev;
        struct netpoll *netpoll;
        int err = 0;
@@ -984,6 +1018,144 @@ static void dsa_slave_poll_controller(struct net_device *dev)
 }
 #endif
 
+static int dsa_slave_get_phys_port_name(struct net_device *dev,
+                                       char *name, size_t len)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+
+       if (snprintf(name, len, "p%d", p->dp->index) >= len)
+               return -EINVAL;
+
+       return 0;
+}
+
+static struct dsa_mall_tc_entry *
+dsa_slave_mall_tc_entry_find(struct dsa_slave_priv *p,
+                            unsigned long cookie)
+{
+       struct dsa_mall_tc_entry *mall_tc_entry;
+
+       list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
+               if (mall_tc_entry->cookie == cookie)
+                       return mall_tc_entry;
+
+       return NULL;
+}
+
+static int dsa_slave_add_cls_matchall(struct net_device *dev,
+                                     __be16 protocol,
+                                     struct tc_cls_matchall_offload *cls,
+                                     bool ingress)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_mall_tc_entry *mall_tc_entry;
+       struct dsa_switch *ds = p->dp->ds;
+       struct net *net = dev_net(dev);
+       struct dsa_slave_priv *to_p;
+       struct net_device *to_dev;
+       const struct tc_action *a;
+       int err = -EOPNOTSUPP;
+       LIST_HEAD(actions);
+       int ifindex;
+
+       if (!ds->ops->port_mirror_add)
+               return err;
+
+       if (!tc_single_action(cls->exts))
+               return err;
+
+       tcf_exts_to_list(cls->exts, &actions);
+       a = list_first_entry(&actions, struct tc_action, list);
+
+       if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
+               struct dsa_mall_mirror_tc_entry *mirror;
+
+               ifindex = tcf_mirred_ifindex(a);
+               to_dev = __dev_get_by_index(net, ifindex);
+               if (!to_dev)
+                       return -EINVAL;
+
+               if (!dsa_slave_dev_check(to_dev))
+                       return -EOPNOTSUPP;
+
+               mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
+               if (!mall_tc_entry)
+                       return -ENOMEM;
+
+               mall_tc_entry->cookie = cls->cookie;
+               mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
+               mirror = &mall_tc_entry->mirror;
+
+               to_p = netdev_priv(to_dev);
+
+               mirror->to_local_port = to_p->dp->index;
+               mirror->ingress = ingress;
+
+               err = ds->ops->port_mirror_add(ds, p->dp->index, mirror,
+                                              ingress);
+               if (err) {
+                       kfree(mall_tc_entry);
+                       return err;
+               }
+
+               list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
+       }
+
+       return 0;
+}
+
+static void dsa_slave_del_cls_matchall(struct net_device *dev,
+                                      struct tc_cls_matchall_offload *cls)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_mall_tc_entry *mall_tc_entry;
+       struct dsa_switch *ds = p->dp->ds;
+
+       if (!ds->ops->port_mirror_del)
+               return;
+
+       mall_tc_entry = dsa_slave_mall_tc_entry_find(p, cls->cookie);
+       if (!mall_tc_entry)
+               return;
+
+       list_del(&mall_tc_entry->list);
+
+       switch (mall_tc_entry->type) {
+       case DSA_PORT_MALL_MIRROR:
+               ds->ops->port_mirror_del(ds, p->dp->index,
+                                        &mall_tc_entry->mirror);
+               break;
+       default:
+               WARN_ON(1);
+       }
+
+       kfree(mall_tc_entry);
+}
+
+static int dsa_slave_setup_tc(struct net_device *dev, u32 handle,
+                             __be16 protocol, struct tc_to_netdev *tc)
+{
+       bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
+       int ret = -EOPNOTSUPP;
+
+       switch (tc->type) {
+       case TC_SETUP_MATCHALL:
+               switch (tc->cls_mall->command) {
+               case TC_CLSMATCHALL_REPLACE:
+                       return dsa_slave_add_cls_matchall(dev, protocol,
+                                                         tc->cls_mall,
+                                                         ingress);
+               case TC_CLSMATCHALL_DESTROY:
+                       dsa_slave_del_cls_matchall(dev, tc->cls_mall);
+                       return 0;
+               }
+       default:
+               break;
+       }
+
+       return ret;
+}
+
 void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops)
 {
        ops->get_sset_count = dsa_cpu_port_get_sset_count;
@@ -991,6 +1163,30 @@ void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops)
        ops->get_strings = dsa_cpu_port_get_strings;
 }
 
+static int dsa_slave_get_rxnfc(struct net_device *dev,
+                              struct ethtool_rxnfc *nfc, u32 *rule_locs)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->dp->ds;
+
+       if (!ds->ops->get_rxnfc)
+               return -EOPNOTSUPP;
+
+       return ds->ops->get_rxnfc(ds, p->dp->index, nfc, rule_locs);
+}
+
+static int dsa_slave_set_rxnfc(struct net_device *dev,
+                              struct ethtool_rxnfc *nfc)
+{
+       struct dsa_slave_priv *p = netdev_priv(dev);
+       struct dsa_switch *ds = p->dp->ds;
+
+       if (!ds->ops->set_rxnfc)
+               return -EOPNOTSUPP;
+
+       return ds->ops->set_rxnfc(ds, p->dp->index, nfc);
+}
+
 static const struct ethtool_ops dsa_slave_ethtool_ops = {
        .get_drvinfo            = dsa_slave_get_drvinfo,
        .get_regs_len           = dsa_slave_get_regs_len,
@@ -1009,6 +1205,8 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
        .get_eee                = dsa_slave_get_eee,
        .get_link_ksettings     = dsa_slave_get_link_ksettings,
        .set_link_ksettings     = dsa_slave_set_link_ksettings,
+       .get_rxnfc              = dsa_slave_get_rxnfc,
+       .set_rxnfc              = dsa_slave_set_rxnfc,
 };
 
 static const struct net_device_ops dsa_slave_netdev_ops = {
@@ -1031,6 +1229,8 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
        .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
        .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
        .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
+       .ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
+       .ndo_setup_tc           = dsa_slave_setup_tc,
 };
 
 static const struct switchdev_ops dsa_slave_switchdev_ops = {
@@ -1048,7 +1248,7 @@ static struct device_type dsa_type = {
 static void dsa_slave_adjust_link(struct net_device *dev)
 {
        struct dsa_slave_priv *p = netdev_priv(dev);
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
        unsigned int status_changed = 0;
 
        if (p->old_link != p->phy->link) {
@@ -1067,7 +1267,7 @@ static void dsa_slave_adjust_link(struct net_device *dev)
        }
 
        if (ds->ops->adjust_link && status_changed)
-               ds->ops->adjust_link(ds, p->port, p->phy);
+               ds->ops->adjust_link(ds, p->dp->index, p->phy);
 
        if (status_changed)
                phy_print_status(p->phy);
@@ -1081,9 +1281,9 @@ static int dsa_slave_fixed_link_update(struct net_device *dev,
 
        if (dev) {
                p = netdev_priv(dev);
-               ds = p->parent;
+               ds = p->dp->ds;
                if (ds->ops->fixed_link_update)
-                       ds->ops->fixed_link_update(ds, p->port, status);
+                       ds->ops->fixed_link_update(ds, p->dp->index, status);
        }
 
        return 0;
@@ -1094,7 +1294,7 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
                                 struct net_device *slave_dev,
                                 int addr)
 {
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
 
        p->phy = mdiobus_get_phy(ds->slave_mii_bus, addr);
        if (!p->phy) {
@@ -1112,13 +1312,13 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
 static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
                                struct net_device *slave_dev)
 {
-       struct dsa_switch *ds = p->parent;
+       struct dsa_switch *ds = p->dp->ds;
        struct device_node *phy_dn, *port_dn;
        bool phy_is_fixed = false;
        u32 phy_flags = 0;
        int mode, ret;
 
-       port_dn = ds->ports[p->port].dn;
+       port_dn = p->dp->dn;
        mode = of_get_phy_mode(port_dn);
        if (mode < 0)
                mode = PHY_INTERFACE_MODE_NA;
@@ -1139,7 +1339,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
        }
 
        if (ds->ops->get_phy_flags)
-               phy_flags = ds->ops->get_phy_flags(ds, p->port);
+               phy_flags = ds->ops->get_phy_flags(ds, p->dp->index);
 
        if (phy_dn) {
                int phy_id = of_mdio_parse_addr(&slave_dev->dev, phy_dn);
@@ -1174,9 +1374,10 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
         * MDIO bus instead
         */
        if (!p->phy) {
-               ret = dsa_slave_phy_connect(p, slave_dev, p->port);
+               ret = dsa_slave_phy_connect(p, slave_dev, p->dp->index);
                if (ret) {
-                       netdev_err(slave_dev, "failed to connect to port %d: %d\n", p->port, ret);
+                       netdev_err(slave_dev, "failed to connect to port %d: %d\n",
+                                  p->dp->index, ret);
                        if (phy_is_fixed)
                                of_phy_deregister_fixed_link(port_dn);
                        return ret;
@@ -1246,7 +1447,8 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
        if (slave_dev == NULL)
                return -ENOMEM;
 
-       slave_dev->features = master->vlan_features;
+       slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
+       slave_dev->hw_features |= NETIF_F_HW_TC;
        slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
        eth_hw_addr_inherit(slave_dev, master);
        slave_dev->priv_flags |= IFF_NO_QUEUE;
@@ -1264,8 +1466,8 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
        slave_dev->vlan_features = master->vlan_features;
 
        p = netdev_priv(slave_dev);
-       p->parent = ds;
-       p->port = port;
+       p->dp = &ds->ports[port];
+       INIT_LIST_HEAD(&p->mall_tc_list);
        p->xmit = dst->tag_ops->xmit;
 
        p->old_pause = -1;
@@ -1298,10 +1500,9 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
 void dsa_slave_destroy(struct net_device *slave_dev)
 {
        struct dsa_slave_priv *p = netdev_priv(slave_dev);
-       struct dsa_switch *ds = p->parent;
        struct device_node *port_dn;
 
-       port_dn = ds->ports[p->port].dn;
+       port_dn = p->dp->dn;
 
        netif_carrier_off(slave_dev);
        if (p->phy) {
@@ -1319,46 +1520,52 @@ static bool dsa_slave_dev_check(struct net_device *dev)
        return dev->netdev_ops == &dsa_slave_netdev_ops;
 }
 
-static int dsa_slave_port_upper_event(struct net_device *dev,
-                                     unsigned long event, void *ptr)
+static int dsa_slave_changeupper(struct net_device *dev,
+                                struct netdev_notifier_changeupper_info *info)
 {
-       struct netdev_notifier_changeupper_info *info = ptr;
-       struct net_device *upper = info->upper_dev;
-       int err = 0;
+       int err = NOTIFY_DONE;
 
-       switch (event) {
-       case NETDEV_CHANGEUPPER:
-               if (netif_is_bridge_master(upper)) {
-                       if (info->linking)
-                               err = dsa_slave_bridge_port_join(dev, upper);
-                       else
-                               dsa_slave_bridge_port_leave(dev);
+       if (netif_is_bridge_master(info->upper_dev)) {
+               if (info->linking) {
+                       err = dsa_slave_bridge_port_join(dev, info->upper_dev);
+                       err = notifier_from_errno(err);
+               } else {
+                       dsa_slave_bridge_port_leave(dev, info->upper_dev);
+                       err = NOTIFY_OK;
                }
-
-               break;
        }
 
-       return notifier_from_errno(err);
+       return err;
 }
 
-static int dsa_slave_port_event(struct net_device *dev, unsigned long event,
-                               void *ptr)
+static int dsa_slave_netdevice_event(struct notifier_block *nb,
+                                    unsigned long event, void *ptr)
 {
-       switch (event) {
-       case NETDEV_CHANGEUPPER:
-               return dsa_slave_port_upper_event(dev, event, ptr);
-       }
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+       if (dev->netdev_ops != &dsa_slave_netdev_ops)
+               return NOTIFY_DONE;
+
+       if (event == NETDEV_CHANGEUPPER)
+               return dsa_slave_changeupper(dev, ptr);
 
        return NOTIFY_DONE;
 }
 
-int dsa_slave_netdevice_event(struct notifier_block *unused,
-                             unsigned long event, void *ptr)
+static struct notifier_block dsa_slave_nb __read_mostly = {
+       .notifier_call  = dsa_slave_netdevice_event,
+};
+
+int dsa_slave_register_notifier(void)
 {
-       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       return register_netdevice_notifier(&dsa_slave_nb);
+}
 
-       if (dsa_slave_dev_check(dev))
-               return dsa_slave_port_event(dev, event, ptr);
+void dsa_slave_unregister_notifier(void)
+{
+       int err;
 
-       return NOTIFY_DONE;
+       err = unregister_netdevice_notifier(&dsa_slave_nb);
+       if (err)
+               pr_err("DSA: failed to unregister slave notifier (%d)\n", err);
 }
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
new file mode 100644 (file)
index 0000000..6456dac
--- /dev/null
@@ -0,0 +1,85 @@
+/*
+ * Handling of a single switch chip, part of a switch fabric
+ *
+ * Copyright (c) 2017 Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <net/dsa.h>
+
+static int dsa_switch_bridge_join(struct dsa_switch *ds,
+                                 struct dsa_notifier_bridge_info *info)
+{
+       if (ds->index == info->sw_index && ds->ops->port_bridge_join)
+               return ds->ops->port_bridge_join(ds, info->port, info->br);
+
+       if (ds->index != info->sw_index)
+               dev_dbg(ds->dev, "crosschip DSA port %d.%d bridged to %s\n",
+                       info->sw_index, info->port, netdev_name(info->br));
+
+       return 0;
+}
+
+static int dsa_switch_bridge_leave(struct dsa_switch *ds,
+                                  struct dsa_notifier_bridge_info *info)
+{
+       if (ds->index == info->sw_index && ds->ops->port_bridge_leave)
+               ds->ops->port_bridge_leave(ds, info->port, info->br);
+
+       if (ds->index != info->sw_index)
+               dev_dbg(ds->dev, "crosschip DSA port %d.%d unbridged from %s\n",
+                       info->sw_index, info->port, netdev_name(info->br));
+
+       return 0;
+}
+
+static int dsa_switch_event(struct notifier_block *nb,
+                           unsigned long event, void *info)
+{
+       struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
+       int err;
+
+       switch (event) {
+       case DSA_NOTIFIER_BRIDGE_JOIN:
+               err = dsa_switch_bridge_join(ds, info);
+               break;
+       case DSA_NOTIFIER_BRIDGE_LEAVE:
+               err = dsa_switch_bridge_leave(ds, info);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       /* Non-switchdev operations cannot be rolled back. If a DSA driver
+        * returns an error during the chained call, switch chips may be in an
+        * inconsistent state.
+        */
+       if (err)
+               dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
+                       event, err);
+
+       return notifier_from_errno(err);
+}
+
+int dsa_switch_register_notifier(struct dsa_switch *ds)
+{
+       ds->nb.notifier_call = dsa_switch_event;
+
+       return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
+}
+
+void dsa_switch_unregister_notifier(struct dsa_switch *ds)
+{
+       int err;
+
+       err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
+       if (err)
+               dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
+}
index 21bffde6e4bf1043c65eeef86f206c592e987eb7..5d925b6b2bb14f78f84a06b84b4fa19bd6846e82 100644 (file)
@@ -80,9 +80,9 @@ static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev
                        ((skb->priority << BRCM_IG_TC_SHIFT) & BRCM_IG_TC_MASK);
        brcm_tag[1] = 0;
        brcm_tag[2] = 0;
-       if (p->port == 8)
+       if (p->dp->index == 8)
                brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
-       brcm_tag[3] = (1 << p->port) & BRCM_IG_DSTMAP1_MASK;
+       brcm_tag[3] = (1 << p->dp->index) & BRCM_IG_DSTMAP1_MASK;
 
        return skb;
 
@@ -102,7 +102,7 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
        if (unlikely(dst == NULL))
                goto out_drop;
 
-       ds = dst->ds[0];
+       ds = dst->cpu_switch;
 
        skb = skb_unshare(skb, GFP_ATOMIC);
        if (skb == NULL)
@@ -121,13 +121,14 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
        /* We should never see a reserved reason code without knowing how to
         * handle it
         */
-       WARN_ON(brcm_tag[2] & BRCM_EG_RC_RSVD);
+       if (unlikely(brcm_tag[2] & BRCM_EG_RC_RSVD))
+               goto out_drop;
 
        /* Locate which port this is coming from */
        source_port = brcm_tag[3] & BRCM_EG_PID_MASK;
 
        /* Validate port against switch setup, either the port is totally */
-       if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
+       if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
                goto out_drop;
 
        /* Remove Broadcom tag and update checksum */
index bce79ffe342bccad589f06b9b5402daee68998c3..72579ceea381b7e2bce99a28208810b707434f09 100644 (file)
@@ -33,8 +33,8 @@ static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
                 * Construct tagged FROM_CPU DSA tag from 802.1q tag.
                 */
                dsa_header = skb->data + 2 * ETH_ALEN;
-               dsa_header[0] = 0x60 | p->parent->index;
-               dsa_header[1] = p->port << 3;
+               dsa_header[0] = 0x60 | p->dp->ds->index;
+               dsa_header[1] = p->dp->index << 3;
 
                /*
                 * Move CFI field from byte 2 to byte 1.
@@ -54,8 +54,8 @@ static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
                 * Construct untagged FROM_CPU DSA tag.
                 */
                dsa_header = skb->data + 2 * ETH_ALEN;
-               dsa_header[0] = 0x40 | p->parent->index;
-               dsa_header[1] = p->port << 3;
+               dsa_header[0] = 0x40 | p->dp->ds->index;
+               dsa_header[1] = p->dp->index << 3;
                dsa_header[2] = 0x00;
                dsa_header[3] = 0x00;
        }
@@ -114,7 +114,7 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
        if (!ds)
                goto out_drop;
 
-       if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
+       if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
                goto out_drop;
 
        /*
index 6c1720e88537af73a93a744c1132db4c5f2b1873..648c051817a1b4a4e64cda67bab8c81288027a4e 100644 (file)
@@ -42,8 +42,8 @@ static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
                edsa_header[1] = ETH_P_EDSA & 0xff;
                edsa_header[2] = 0x00;
                edsa_header[3] = 0x00;
-               edsa_header[4] = 0x60 | p->parent->index;
-               edsa_header[5] = p->port << 3;
+               edsa_header[4] = 0x60 | p->dp->ds->index;
+               edsa_header[5] = p->dp->index << 3;
 
                /*
                 * Move CFI field from byte 6 to byte 5.
@@ -67,8 +67,8 @@ static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
                edsa_header[1] = ETH_P_EDSA & 0xff;
                edsa_header[2] = 0x00;
                edsa_header[3] = 0x00;
-               edsa_header[4] = 0x40 | p->parent->index;
-               edsa_header[5] = p->port << 3;
+               edsa_header[4] = 0x40 | p->dp->ds->index;
+               edsa_header[5] = p->dp->index << 3;
                edsa_header[6] = 0x00;
                edsa_header[7] = 0x00;
        }
@@ -127,7 +127,7 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
        if (!ds)
                goto out_drop;
 
-       if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
+       if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
                goto out_drop;
 
        /*
index 0c90cacee7aa7fe3fd309f0e693999215da3cc28..30240f343aea8450b13936159b4b9a76126a8977 100644 (file)
@@ -54,7 +54,7 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Set the version field, and set destination port information */
        hdr = QCA_HDR_VERSION << QCA_HDR_XMIT_VERSION_S |
                QCA_HDR_XMIT_FROM_CPU |
-               BIT(p->port);
+               BIT(p->dp->index);
 
        *phdr = htons(hdr);
 
@@ -104,7 +104,7 @@ static int qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
        /* This protocol doesn't support cascading multiple switches so it's
         * safe to assume the switch is first in the tree
         */
-       ds = dst->ds[0];
+       ds = dst->cpu_switch;
        if (!ds)
                goto out_drop;
 
index 5e3903eb1afa644b8b8861e60e112957346e5104..26f977176978085af9c034319c754a1ac7501d4c 100644 (file)
@@ -50,7 +50,7 @@ static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
 
        trailer = skb_put(nskb, 4);
        trailer[0] = 0x80;
-       trailer[1] = 1 << p->port;
+       trailer[1] = 1 << p->dp->index;
        trailer[2] = 0x10;
        trailer[3] = 0x00;
 
@@ -67,7 +67,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
 
        if (unlikely(dst == NULL))
                goto out_drop;
-       ds = dst->ds[0];
+       ds = dst->cpu_switch;
 
        skb = skb_unshare(skb, GFP_ATOMIC);
        if (skb == NULL)
@@ -82,7 +82,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
                goto out_drop;
 
        source_port = trailer[1] & 7;
-       if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
+       if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
                goto out_drop;
 
        pskb_trim_rcsum(skb, skb->len - 4);
index 516c87e75de7009e9e4f0cbdc80f8160c318fdf4..1446810047f5af7ee34732fbddb0a99c6939da67 100644 (file)
@@ -393,6 +393,34 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
 }
 EXPORT_SYMBOL(alloc_etherdev_mqs);
 
+static void devm_free_netdev(struct device *dev, void *res)
+{
+       free_netdev(*(struct net_device **)res);
+}
+
+struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
+                                          unsigned int txqs, unsigned int rxqs)
+{
+       struct net_device **dr;
+       struct net_device *netdev;
+
+       dr = devres_alloc(devm_free_netdev, sizeof(*dr), GFP_KERNEL);
+       if (!dr)
+               return NULL;
+
+       netdev = alloc_etherdev_mqs(sizeof_priv, txqs, rxqs);
+       if (!netdev) {
+               devres_free(dr);
+               return NULL;
+       }
+
+       *dr = netdev;
+       devres_add(dev, dr);
+
+       return netdev;
+}
+EXPORT_SYMBOL(devm_alloc_etherdev_mqs);
+
 ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
 {
        return scnprintf(buf, PAGE_SIZE, "%*phC\n", len, addr);
@@ -447,7 +475,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index f5b60388d02fa255eb20362872be62eb96ce73ea..56080da4aa77ef581f4a3c23b4179d72e511980b 100644 (file)
@@ -12,6 +12,7 @@
 #include "hsr_slave.h"
 #include <linux/etherdevice.h>
 #include <linux/if_arp.h>
+#include <linux/if_vlan.h>
 #include "hsr_main.h"
 #include "hsr_device.h"
 #include "hsr_forward.h"
@@ -81,7 +82,7 @@ static int hsr_check_dev_ok(struct net_device *dev)
                return -EINVAL;
        }
 
-       if (dev->priv_flags & IFF_802_1Q_VLAN) {
+       if (is_vlan_dev(dev)) {
                netdev_info(dev, "HSR on top of VLAN is not yet supported in this driver.\n");
                return -EINVAL;
        }
diff --git a/net/ife/Kconfig b/net/ife/Kconfig
new file mode 100644 (file)
index 0000000..31e48b6
--- /dev/null
@@ -0,0 +1,16 @@
+#
+# IFE subsystem configuration
+#
+
+menuconfig NET_IFE
+       depends on NET
+        tristate "Inter-FE based on IETF ForCES InterFE LFB"
+       default n
+       help
+         Say Y here to add support of IFE encapsulation protocol
+         For details refer to netdev01 paper:
+         "Distributing Linux Traffic Control Classifier-Action Subsystem"
+          Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
+
+         To compile this support as a module, choose M here: the module will
+         be called ife.
diff --git a/net/ife/Makefile b/net/ife/Makefile
new file mode 100644 (file)
index 0000000..2a90d97
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the IFE encapsulation protocol
+#
+
+obj-$(CONFIG_NET_IFE) += ife.o
diff --git a/net/ife/ife.c b/net/ife/ife.c
new file mode 100644 (file)
index 0000000..f360341
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * net/ife/ife.c - Inter-FE protocol based on ForCES WG InterFE LFB
+ * Copyright (c) 2015 Jamal Hadi Salim <jhs@mojatatu.com>
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Refer to: draft-ietf-forces-interfelfb-03 and netdev01 paper:
+ * "Distributing Linux Traffic Control Classifier-Action Subsystem"
+ * Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <net/net_namespace.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <linux/etherdevice.h>
+#include <net/ife.h>
+
+struct ifeheadr {
+       __be16 metalen;
+       u8 tlv_data[];
+};
+
+void *ife_encode(struct sk_buff *skb, u16 metalen)
+{
+       /* OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
+        * where ORIGDATA = original ethernet header ...
+        */
+       int hdrm = metalen + IFE_METAHDRLEN;
+       int total_push = hdrm + skb->dev->hard_header_len;
+       struct ifeheadr *ifehdr;
+       struct ethhdr *iethh;   /* inner ether header */
+       int skboff = 0;
+       int err;
+
+       err = skb_cow_head(skb, total_push);
+       if (unlikely(err))
+               return NULL;
+
+       iethh = (struct ethhdr *) skb->data;
+
+       __skb_push(skb, total_push);
+       memcpy(skb->data, iethh, skb->dev->hard_header_len);
+       skb_reset_mac_header(skb);
+       skboff += skb->dev->hard_header_len;
+
+       /* total metadata length */
+       ifehdr = (struct ifeheadr *) (skb->data + skboff);
+       metalen += IFE_METAHDRLEN;
+       ifehdr->metalen = htons(metalen);
+
+       return ifehdr->tlv_data;
+}
+EXPORT_SYMBOL_GPL(ife_encode);
+
+void *ife_decode(struct sk_buff *skb, u16 *metalen)
+{
+       struct ifeheadr *ifehdr;
+       int total_pull;
+       u16 ifehdrln;
+
+       ifehdr = (struct ifeheadr *) (skb->data + skb->dev->hard_header_len);
+       ifehdrln = ntohs(ifehdr->metalen);
+       total_pull = skb->dev->hard_header_len + ifehdrln;
+
+       if (unlikely(ifehdrln < 2))
+               return NULL;
+
+       if (unlikely(!pskb_may_pull(skb, total_pull)))
+               return NULL;
+
+       skb_set_mac_header(skb, total_pull);
+       __skb_pull(skb, total_pull);
+       *metalen = ifehdrln - IFE_METAHDRLEN;
+
+       return &ifehdr->tlv_data;
+}
+EXPORT_SYMBOL_GPL(ife_decode);
+
+struct meta_tlvhdr {
+       __be16 type;
+       __be16 len;
+};
+
+/* Caller takes care of presenting data in network order
+ */
+void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen)
+{
+       struct meta_tlvhdr *tlv = (struct meta_tlvhdr *) skbdata;
+
+       *dlen = ntohs(tlv->len) - NLA_HDRLEN;
+       *attrtype = ntohs(tlv->type);
+
+       if (totlen)
+               *totlen = nla_total_size(*dlen);
+
+       return skbdata + sizeof(struct meta_tlvhdr);
+}
+EXPORT_SYMBOL_GPL(ife_tlv_meta_decode);
+
+void *ife_tlv_meta_next(void *skbdata)
+{
+       struct meta_tlvhdr *tlv = (struct meta_tlvhdr *) skbdata;
+       u16 tlvlen = ntohs(tlv->len);
+
+       tlvlen = NLA_ALIGN(tlvlen);
+
+       return skbdata + tlvlen;
+}
+EXPORT_SYMBOL_GPL(ife_tlv_meta_next);
+
+/* Caller takes care of presenting data in network order
+ */
+int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
+{
+       __be32 *tlv = (__be32 *) (skbdata);
+       u16 totlen = nla_total_size(dlen);      /*alignment + hdr */
+       char *dptr = (char *) tlv + NLA_HDRLEN;
+       u32 htlv = attrtype << 16 | (dlen + NLA_HDRLEN);
+
+       *tlv = htonl(htlv);
+       memset(dptr, 0, totlen - NLA_HDRLEN);
+       memcpy(dptr, dval, dlen);
+
+       return totlen;
+}
+EXPORT_SYMBOL_GPL(ife_tlv_meta_encode);
+
+MODULE_AUTHOR("Jamal Hadi Salim <jhs@mojatatu.com>");
+MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>");
+MODULE_DESCRIPTION("Inter-FE LFB action");
+MODULE_LICENSE("GPL");
index 6e7baaf814c608b4e222406c42287382d4eef474..91a2557942fa8533564943f1f8e8d9df4d7df141 100644 (file)
@@ -187,6 +187,7 @@ config NET_IPGRE_DEMUX
 config NET_IP_TUNNEL
        tristate
        select DST_CACHE
+       select GRO_CELLS
        default n
 
 config NET_IPGRE
@@ -360,6 +361,19 @@ config INET_ESP
 
          If unsure, say Y.
 
+config INET_ESP_OFFLOAD
+       tristate "IP: ESP transformation offload"
+       depends on INET_ESP
+       select XFRM_OFFLOAD
+       default n
+       ---help---
+         Support for ESP transformation offload. This makes sense
+         only if this system really does IPsec and want to do it
+         with high throughput. A typical desktop system does not
+         need it, even if it does IPsec.
+
+         If unsure, say N.
+
 config INET_IPCOMP
        tristate "IP: IPComp transformation"
        select INET_XFRM_TUNNEL
index 48af58a5686e3e9d81120217e5949d06f5ee6ce9..c6d4238ff94a8a329bf44bf59b30fb09fb07f707 100644 (file)
@@ -29,6 +29,7 @@ obj-$(CONFIG_NET_IPVTI) += ip_vti.o
 obj-$(CONFIG_SYN_COOKIES) += syncookies.o
 obj-$(CONFIG_INET_AH) += ah4.o
 obj-$(CONFIG_INET_ESP) += esp4.o
+obj-$(CONFIG_INET_ESP_OFFLOAD) += esp4_offload.o
 obj-$(CONFIG_INET_IPCOMP) += ipcomp.o
 obj-$(CONFIG_INET_XFRM_TUNNEL) += xfrm4_tunnel.o
 obj-$(CONFIG_INET_XFRM_MODE_BEET) += xfrm4_mode_beet.o
index f75069883f2b517b2121707ef753d52ab3da5d26..602d40f43687c91db7250822439bacbe85318fa3 100644 (file)
@@ -479,7 +479,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 
        snum = ntohs(addr->sin_port);
        err = -EACCES;
-       if (snum && snum < PROT_SOCK &&
+       if (snum && snum < inet_prot_sock(net) &&
            !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
                goto out;
 
@@ -570,19 +570,30 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
  *     TCP 'magic' in here.
  */
 int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
-                         int addr_len, int flags)
+                         int addr_len, int flags, int is_sendmsg)
 {
        struct sock *sk = sock->sk;
        int err;
        long timeo;
 
-       if (addr_len < sizeof(uaddr->sa_family))
-               return -EINVAL;
+       /*
+        * uaddr can be NULL and addr_len can be 0 if:
+        * sk is a TCP fastopen active socket and
+        * TCP_FASTOPEN_CONNECT sockopt is set and
+        * we already have a valid cookie for this socket.
+        * In this case, user can call write() after connect().
+        * write() will invoke tcp_sendmsg_fastopen() which calls
+        * __inet_stream_connect().
+        */
+       if (uaddr) {
+               if (addr_len < sizeof(uaddr->sa_family))
+                       return -EINVAL;
 
-       if (uaddr->sa_family == AF_UNSPEC) {
-               err = sk->sk_prot->disconnect(sk, flags);
-               sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
-               goto out;
+               if (uaddr->sa_family == AF_UNSPEC) {
+                       err = sk->sk_prot->disconnect(sk, flags);
+                       sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
+                       goto out;
+               }
        }
 
        switch (sock->state) {
@@ -593,7 +604,10 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
                err = -EISCONN;
                goto out;
        case SS_CONNECTING:
-               err = -EALREADY;
+               if (inet_sk(sk)->defer_connect)
+                       err = is_sendmsg ? -EINPROGRESS : -EISCONN;
+               else
+                       err = -EALREADY;
                /* Fall out of switch with err, set for this state */
                break;
        case SS_UNCONNECTED:
@@ -607,6 +621,9 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
 
                sock->state = SS_CONNECTING;
 
+               if (!err && inet_sk(sk)->defer_connect)
+                       goto out;
+
                /* Just entered SS_CONNECTING state; the only
                 * difference is that return value in non-blocking
                 * case is EINPROGRESS, rather than EALREADY.
@@ -662,7 +679,7 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
        int err;
 
        lock_sock(sock->sk);
-       err = __inet_stream_connect(sock, uaddr, addr_len, flags);
+       err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
        release_sock(sock->sk);
        return err;
 }
@@ -1406,7 +1423,7 @@ out_unlock:
        rcu_read_unlock();
 
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
@@ -1700,6 +1717,9 @@ static __net_init int inet_init_net(struct net *net)
        net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
        net->ipv4.sysctl_ip_dynaddr = 0;
        net->ipv4.sysctl_ip_early_demux = 1;
+#ifdef CONFIG_SYSCTL
+       net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
+#endif
 
        return 0;
 }
@@ -1831,8 +1851,6 @@ static int __init inet_init(void)
 
        ip_init();
 
-       tcp_v4_init();
-
        /* Setup TCP slab cache for open requests. */
        tcp_init();
 
index f2a71025a7705d5c7b10dc108440ee2ea5cd03aa..22377c8ff14b72292caa5c2f72898fc60dcfa576 100644 (file)
@@ -270,6 +270,9 @@ static void ah_input_done(struct crypto_async_request *base, int err)
        int ihl = ip_hdrlen(skb);
        int ah_hlen = (ah->hdrlen + 2) << 2;
 
+       if (err)
+               goto out;
+
        work_iph = AH_SKB_CB(skb)->tmp;
        auth_data = ah_tmp_auth(work_iph, ihl);
        icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
index 4cd2ee8857d2c72d7b67ee37ca36bf5442169581..5d367b7ff542c038cb1a944595df458ae7661132 100644 (file)
@@ -65,8 +65,6 @@
 #include <net/net_namespace.h>
 #include <net/addrconf.h>
 
-#include "fib_lookup.h"
-
 static struct ipv4_devconf ipv4_devconf = {
        .data = {
                [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
index 20fb25e3027bbbf8b8c2068751caf40df939546d..b1e24446e2972444835dd85c02434f3b089ba552 100644 (file)
@@ -18,6 +18,8 @@
 #include <net/protocol.h>
 #include <net/udp.h>
 
+#include <linux/highmem.h>
+
 struct esp_skb_cb {
        struct xfrm_skb_cb xfrm;
        void *tmp;
@@ -92,11 +94,40 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
                             __alignof__(struct scatterlist));
 }
 
+static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
+{
+       struct esp_output_extra *extra = esp_tmp_extra(tmp);
+       struct crypto_aead *aead = x->data;
+       int extralen = 0;
+       u8 *iv;
+       struct aead_request *req;
+       struct scatterlist *sg;
+
+       if (x->props.flags & XFRM_STATE_ESN)
+               extralen += sizeof(*extra);
+
+       extra = esp_tmp_extra(tmp);
+       iv = esp_tmp_iv(aead, tmp, extralen);
+       req = esp_tmp_req(aead, iv);
+
+       /* Unref skb_frag_pages in the src scatterlist if necessary.
+        * Skip the first sg which comes from skb->data.
+        */
+       if (req->src != req->dst)
+               for (sg = sg_next(req->src); sg; sg = sg_next(sg))
+                       put_page(sg_page(sg));
+}
+
 static void esp_output_done(struct crypto_async_request *base, int err)
 {
        struct sk_buff *skb = base->data;
+       void *tmp;
+       struct dst_entry *dst = skb_dst(skb);
+       struct xfrm_state *x = dst->xfrm;
 
-       kfree(ESP_SKB_CB(skb)->tmp);
+       tmp = ESP_SKB_CB(skb)->tmp;
+       esp_ssg_unref(x, tmp);
+       kfree(tmp);
        xfrm_output_resume(skb, err);
 }
 
@@ -120,6 +151,29 @@ static void esp_output_restore_header(struct sk_buff *skb)
                                sizeof(__be32));
 }
 
+static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
+                                              struct ip_esp_hdr *esph,
+                                              struct esp_output_extra *extra)
+{
+       struct xfrm_state *x = skb_dst(skb)->xfrm;
+
+       /* For ESN we move the header forward by 4 bytes to
+        * accomodate the high bits.  We will move it back after
+        * encryption.
+        */
+       if ((x->props.flags & XFRM_STATE_ESN)) {
+               extra->esphoff = (unsigned char *)esph -
+                                skb_transport_header(skb);
+               esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
+               extra->seqhi = esph->spi;
+               esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
+       }
+
+       esph->spi = x->id.spi;
+
+       return esph;
+}
+
 static void esp_output_done_esn(struct crypto_async_request *base, int err)
 {
        struct sk_buff *skb = base->data;
@@ -128,18 +182,36 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err)
        esp_output_done(base, err);
 }
 
+static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
+{
+       /* Fill padding... */
+       if (tfclen) {
+               memset(tail, 0, tfclen);
+               tail += tfclen;
+       }
+       do {
+               int i;
+               for (i = 0; i < plen - 2; i++)
+                       tail[i] = i + 1;
+       } while (0);
+       tail[plen - 2] = plen - 2;
+       tail[plen - 1] = proto;
+}
+
 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
 {
-       int err;
        struct esp_output_extra *extra;
+       int err = -ENOMEM;
        struct ip_esp_hdr *esph;
        struct crypto_aead *aead;
        struct aead_request *req;
-       struct scatterlist *sg;
+       struct scatterlist *sg, *dsg;
        struct sk_buff *trailer;
+       struct page *page;
        void *tmp;
        u8 *iv;
        u8 *tail;
+       u8 *vaddr;
        int blksize;
        int clen;
        int alen;
@@ -149,7 +221,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
        int nfrags;
        int assoclen;
        int extralen;
+       int tailen;
        __be64 seqno;
+       __u8 proto = *skb_mac_header(skb);
 
        /* skb is pure payload to encrypt */
 
@@ -169,12 +243,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
        blksize = ALIGN(crypto_aead_blocksize(aead), 4);
        clen = ALIGN(skb->len + 2 + tfclen, blksize);
        plen = clen - skb->len - tfclen;
-
-       err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
-       if (err < 0)
-               goto error;
-       nfrags = err;
-
+       tailen = tfclen + plen + alen;
        assoclen = sizeof(*esph);
        extralen = 0;
 
@@ -183,35 +252,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
                assoclen += sizeof(__be32);
        }
 
-       tmp = esp_alloc_tmp(aead, nfrags, extralen);
-       if (!tmp) {
-               err = -ENOMEM;
-               goto error;
-       }
-
-       extra = esp_tmp_extra(tmp);
-       iv = esp_tmp_iv(aead, tmp, extralen);
-       req = esp_tmp_req(aead, iv);
-       sg = esp_req_sg(aead, req);
-
-       /* Fill padding... */
-       tail = skb_tail_pointer(trailer);
-       if (tfclen) {
-               memset(tail, 0, tfclen);
-               tail += tfclen;
-       }
-       do {
-               int i;
-               for (i = 0; i < plen - 2; i++)
-                       tail[i] = i + 1;
-       } while (0);
-       tail[plen - 2] = plen - 2;
-       tail[plen - 1] = *skb_mac_header(skb);
-       pskb_put(skb, trailer, clen - skb->len + alen);
-
-       skb_push(skb, -skb_network_offset(skb));
-       esph = ip_esp_hdr(skb);
        *skb_mac_header(skb) = IPPROTO_ESP;
+       esph = ip_esp_hdr(skb);
 
        /* this is non-NULL only with UDP Encapsulation */
        if (x->encap) {
@@ -230,7 +272,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
                uh = (struct udphdr *)esph;
                uh->source = sport;
                uh->dest = dport;
-               uh->len = htons(skb->len - skb_transport_offset(skb));
+               uh->len = htons(skb->len + tailen
+                               - skb_transport_offset(skb));
                uh->check = 0;
 
                switch (encap_type) {
@@ -248,31 +291,148 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
                *skb_mac_header(skb) = IPPROTO_UDP;
        }
 
-       esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
+       if (!skb_cloned(skb)) {
+               if (tailen <= skb_availroom(skb)) {
+                       nfrags = 1;
+                       trailer = skb;
+                       tail = skb_tail_pointer(trailer);
 
-       aead_request_set_callback(req, 0, esp_output_done, skb);
+                       goto skip_cow;
+               } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
+                          && !skb_has_frag_list(skb)) {
+                       int allocsize;
+                       struct sock *sk = skb->sk;
+                       struct page_frag *pfrag = &x->xfrag;
 
-       /* For ESN we move the header forward by 4 bytes to
-        * accomodate the high bits.  We will move it back after
-        * encryption.
-        */
-       if ((x->props.flags & XFRM_STATE_ESN)) {
-               extra->esphoff = (unsigned char *)esph -
-                                skb_transport_header(skb);
-               esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
-               extra->seqhi = esph->spi;
-               esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
-               aead_request_set_callback(req, 0, esp_output_done_esn, skb);
+                       allocsize = ALIGN(tailen, L1_CACHE_BYTES);
+
+                       spin_lock_bh(&x->lock);
+
+                       if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
+                               spin_unlock_bh(&x->lock);
+                               goto cow;
+                       }
+
+                       page = pfrag->page;
+                       get_page(page);
+
+                       vaddr = kmap_atomic(page);
+
+                       tail = vaddr + pfrag->offset;
+
+                       esp_output_fill_trailer(tail, tfclen, plen, proto);
+
+                       kunmap_atomic(vaddr);
+
+                       nfrags = skb_shinfo(skb)->nr_frags;
+
+                       __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
+                                            tailen);
+                       skb_shinfo(skb)->nr_frags = ++nfrags;
+
+                       pfrag->offset = pfrag->offset + allocsize;
+                       nfrags++;
+
+                       skb->len += tailen;
+                       skb->data_len += tailen;
+                       skb->truesize += tailen;
+                       if (sk)
+                               atomic_add(tailen, &sk->sk_wmem_alloc);
+
+                       skb_push(skb, -skb_network_offset(skb));
+
+                       esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
+                       esph->spi = x->id.spi;
+
+                       tmp = esp_alloc_tmp(aead, nfrags + 2, extralen);
+                       if (!tmp) {
+                               spin_unlock_bh(&x->lock);
+                               err = -ENOMEM;
+                               goto error;
+                       }
+
+                       extra = esp_tmp_extra(tmp);
+                       iv = esp_tmp_iv(aead, tmp, extralen);
+                       req = esp_tmp_req(aead, iv);
+                       sg = esp_req_sg(aead, req);
+                       dsg = &sg[nfrags];
+
+                       esph = esp_output_set_extra(skb, esph, extra);
+
+                       sg_init_table(sg, nfrags);
+                       skb_to_sgvec(skb, sg,
+                                    (unsigned char *)esph - skb->data,
+                                    assoclen + ivlen + clen + alen);
+
+                       allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
+
+                       if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
+                               spin_unlock_bh(&x->lock);
+                               err = -ENOMEM;
+                               goto error;
+                       }
+
+                       skb_shinfo(skb)->nr_frags = 1;
+
+                       page = pfrag->page;
+                       get_page(page);
+                       /* replace page frags in skb with new page */
+                       __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
+                       pfrag->offset = pfrag->offset + allocsize;
+
+                       sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
+                       skb_to_sgvec(skb, dsg,
+                                    (unsigned char *)esph - skb->data,
+                                    assoclen + ivlen + clen + alen);
+
+                       spin_unlock_bh(&x->lock);
+
+                       goto skip_cow2;
+               }
        }
 
+cow:
+       err = skb_cow_data(skb, tailen, &trailer);
+       if (err < 0)
+               goto error;
+       nfrags = err;
+       tail = skb_tail_pointer(trailer);
+       esph = ip_esp_hdr(skb);
+
+skip_cow:
+       esp_output_fill_trailer(tail, tfclen, plen, proto);
+
+       pskb_put(skb, trailer, clen - skb->len + alen);
+       skb_push(skb, -skb_network_offset(skb));
+       esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
        esph->spi = x->id.spi;
 
+       tmp = esp_alloc_tmp(aead, nfrags, extralen);
+       if (!tmp) {
+               err = -ENOMEM;
+               goto error;
+       }
+
+       extra = esp_tmp_extra(tmp);
+       iv = esp_tmp_iv(aead, tmp, extralen);
+       req = esp_tmp_req(aead, iv);
+       sg = esp_req_sg(aead, req);
+       dsg = sg;
+
+       esph = esp_output_set_extra(skb, esph, extra);
+
        sg_init_table(sg, nfrags);
        skb_to_sgvec(skb, sg,
                     (unsigned char *)esph - skb->data,
                     assoclen + ivlen + clen + alen);
 
-       aead_request_set_crypt(req, sg, sg, ivlen + clen, iv);
+skip_cow2:
+       if ((x->props.flags & XFRM_STATE_ESN))
+               aead_request_set_callback(req, 0, esp_output_done_esn, skb);
+       else
+               aead_request_set_callback(req, 0, esp_output_done, skb);
+
+       aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv);
        aead_request_set_ad(req, assoclen);
 
        seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
@@ -298,6 +458,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
                        esp_output_restore_header(skb);
        }
 
+       if (sg != dsg)
+               esp_ssg_unref(x, tmp);
        kfree(tmp);
 
 error:
@@ -401,6 +563,23 @@ static void esp_input_restore_header(struct sk_buff *skb)
        __skb_pull(skb, 4);
 }
 
+static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
+{
+       struct xfrm_state *x = xfrm_input_state(skb);
+       struct ip_esp_hdr *esph = (struct ip_esp_hdr *)skb->data;
+
+       /* For ESN we move the header forward by 4 bytes to
+        * accomodate the high bits.  We will move it back after
+        * decryption.
+        */
+       if ((x->props.flags & XFRM_STATE_ESN)) {
+               esph = (void *)skb_push(skb, 4);
+               *seqhi = esph->spi;
+               esph->spi = esph->seq_no;
+               esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
+       }
+}
+
 static void esp_input_done_esn(struct crypto_async_request *base, int err)
 {
        struct sk_buff *skb = base->data;
@@ -437,12 +616,6 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
        if (elen <= 0)
                goto out;
 
-       err = skb_cow_data(skb, 0, &trailer);
-       if (err < 0)
-               goto out;
-
-       nfrags = err;
-
        assoclen = sizeof(*esph);
        seqhilen = 0;
 
@@ -451,6 +624,26 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
                assoclen += seqhilen;
        }
 
+       if (!skb_cloned(skb)) {
+               if (!skb_is_nonlinear(skb)) {
+                       nfrags = 1;
+
+                       goto skip_cow;
+               } else if (!skb_has_frag_list(skb)) {
+                       nfrags = skb_shinfo(skb)->nr_frags;
+                       nfrags++;
+
+                       goto skip_cow;
+               }
+       }
+
+       err = skb_cow_data(skb, 0, &trailer);
+       if (err < 0)
+               goto out;
+
+       nfrags = err;
+
+skip_cow:
        err = -ENOMEM;
        tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
        if (!tmp)
@@ -462,26 +655,17 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
        req = esp_tmp_req(aead, iv);
        sg = esp_req_sg(aead, req);
 
-       skb->ip_summed = CHECKSUM_NONE;
+       esp_input_set_header(skb, seqhi);
 
-       esph = (struct ip_esp_hdr *)skb->data;
+       sg_init_table(sg, nfrags);
+       skb_to_sgvec(skb, sg, 0, skb->len);
 
-       aead_request_set_callback(req, 0, esp_input_done, skb);
+       skb->ip_summed = CHECKSUM_NONE;
 
-       /* For ESN we move the header forward by 4 bytes to
-        * accomodate the high bits.  We will move it back after
-        * decryption.
-        */
-       if ((x->props.flags & XFRM_STATE_ESN)) {
-               esph = (void *)skb_push(skb, 4);
-               *seqhi = esph->spi;
-               esph->spi = esph->seq_no;
-               esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
+       if ((x->props.flags & XFRM_STATE_ESN))
                aead_request_set_callback(req, 0, esp_input_done_esn, skb);
-       }
-
-       sg_init_table(sg, nfrags);
-       skb_to_sgvec(skb, sg, 0, skb->len);
+       else
+               aead_request_set_callback(req, 0, esp_input_done, skb);
 
        aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
        aead_request_set_ad(req, assoclen);
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
new file mode 100644 (file)
index 0000000..1de4426
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * IPV4 GSO/GRO offload support
+ * Linux INET implementation
+ *
+ * Copyright (C) 2016 secunet Security Networks AG
+ * Author: Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * ESP GRO support
+ */
+
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <net/protocol.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <net/ip.h>
+#include <net/xfrm.h>
+#include <net/esp.h>
+#include <linux/scatterlist.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <net/udp.h>
+
+static struct sk_buff **esp4_gro_receive(struct sk_buff **head,
+                                        struct sk_buff *skb)
+{
+       int offset = skb_gro_offset(skb);
+       struct xfrm_offload *xo;
+       struct xfrm_state *x;
+       __be32 seq;
+       __be32 spi;
+       int err;
+
+       skb_pull(skb, offset);
+
+       if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
+               goto out;
+
+       err = secpath_set(skb);
+       if (err)
+               goto out;
+
+       if (skb->sp->len == XFRM_MAX_DEPTH)
+               goto out;
+
+       x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
+                             (xfrm_address_t *)&ip_hdr(skb)->daddr,
+                             spi, IPPROTO_ESP, AF_INET);
+       if (!x)
+               goto out;
+
+       skb->sp->xvec[skb->sp->len++] = x;
+       skb->sp->olen++;
+
+       xo = xfrm_offload(skb);
+       if (!xo) {
+               xfrm_state_put(x);
+               goto out;
+       }
+       xo->flags |= XFRM_GRO;
+
+       XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+       XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+       XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+       XFRM_SPI_SKB_CB(skb)->seq = seq;
+
+       /* We don't need to handle errors from xfrm_input, it does all
+        * the error handling and frees the resources on error. */
+       xfrm_input(skb, IPPROTO_ESP, spi, -2);
+
+       return ERR_PTR(-EINPROGRESS);
+out:
+       skb_push(skb, offset);
+       NAPI_GRO_CB(skb)->same_flow = 0;
+       NAPI_GRO_CB(skb)->flush = 1;
+
+       return NULL;
+}
+
+static const struct net_offload esp4_offload = {
+       .callbacks = {
+               .gro_receive = esp4_gro_receive,
+       },
+};
+
+static int __init esp4_offload_init(void)
+{
+       return inet_add_offload(&esp4_offload, IPPROTO_ESP);
+}
+
+static void __exit esp4_offload_exit(void)
+{
+       inet_del_offload(&esp4_offload, IPPROTO_ESP);
+}
+
+module_init(esp4_offload_init);
+module_exit(esp4_offload_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
index 9a375b908d01fae08b46b46df2e2579a4c268027..317026a39cfa2b49bf06182d89a11af0fa2688af 100644 (file)
@@ -471,7 +471,6 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
                       int remaining, struct fib_config *cfg)
 {
-       struct net *net = cfg->fc_nlinfo.nl_net;
        int ret;
 
        change_nexthops(fi) {
@@ -503,16 +502,14 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
                        nla = nla_find(attrs, attrlen, RTA_ENCAP);
                        if (nla) {
                                struct lwtunnel_state *lwtstate;
-                               struct net_device *dev = NULL;
                                struct nlattr *nla_entype;
 
                                nla_entype = nla_find(attrs, attrlen,
                                                      RTA_ENCAP_TYPE);
                                if (!nla_entype)
                                        goto err_inval;
-                               if (cfg->fc_oif)
-                                       dev = __dev_get_by_index(net, cfg->fc_oif);
-                               ret = lwtunnel_build_state(dev, nla_get_u16(
+
+                               ret = lwtunnel_build_state(nla_get_u16(
                                                           nla_entype),
                                                           nla,  AF_INET, cfg,
                                                           &lwtstate);
@@ -597,21 +594,18 @@ static inline void fib_add_weight(struct fib_info *fi,
 
 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
 
-static int fib_encap_match(struct net *net, u16 encap_type,
+static int fib_encap_match(u16 encap_type,
                           struct nlattr *encap,
-                          int oif, const struct fib_nh *nh,
+                          const struct fib_nh *nh,
                           const struct fib_config *cfg)
 {
        struct lwtunnel_state *lwtstate;
-       struct net_device *dev = NULL;
        int ret, result = 0;
 
        if (encap_type == LWTUNNEL_ENCAP_NONE)
                return 0;
 
-       if (oif)
-               dev = __dev_get_by_index(net, oif);
-       ret = lwtunnel_build_state(dev, encap_type, encap,
+       ret = lwtunnel_build_state(encap_type, encap,
                                   AF_INET, cfg, &lwtstate);
        if (!ret) {
                result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
@@ -623,7 +617,6 @@ static int fib_encap_match(struct net *net, u16 encap_type,
 
 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
 {
-       struct net *net = cfg->fc_nlinfo.nl_net;
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        struct rtnexthop *rtnh;
        int remaining;
@@ -634,9 +627,8 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
 
        if (cfg->fc_oif || cfg->fc_gw) {
                if (cfg->fc_encap) {
-                       if (fib_encap_match(net, cfg->fc_encap_type,
-                                           cfg->fc_encap, cfg->fc_oif,
-                                           fi->fib_nh, cfg))
+                       if (fib_encap_match(cfg->fc_encap_type,
+                                           cfg->fc_encap, fi->fib_nh, cfg))
                            return 1;
                }
                if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
@@ -1093,13 +1085,10 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
 
                if (cfg->fc_encap) {
                        struct lwtunnel_state *lwtstate;
-                       struct net_device *dev = NULL;
 
                        if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE)
                                goto err_inval;
-                       if (cfg->fc_oif)
-                               dev = __dev_get_by_index(net, cfg->fc_oif);
-                       err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+                       err = lwtunnel_build_state(cfg->fc_encap_type,
                                                   cfg->fc_encap, AF_INET, cfg,
                                                   &lwtstate);
                        if (err)
@@ -1366,6 +1355,36 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local)
        return ret;
 }
 
+static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
+                                enum fib_event_type event_type)
+{
+       struct in_device *in_dev = __in_dev_get_rtnl(fib_nh->nh_dev);
+       struct fib_nh_notifier_info info = {
+               .fib_nh = fib_nh,
+       };
+
+       switch (event_type) {
+       case FIB_EVENT_NH_ADD:
+               if (fib_nh->nh_flags & RTNH_F_DEAD)
+                       break;
+               if (IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
+                   fib_nh->nh_flags & RTNH_F_LINKDOWN)
+                       break;
+               return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type,
+                                         &info.info);
+       case FIB_EVENT_NH_DEL:
+               if ((IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
+                    fib_nh->nh_flags & RTNH_F_LINKDOWN) ||
+                   (fib_nh->nh_flags & RTNH_F_DEAD))
+                       return call_fib_notifiers(dev_net(fib_nh->nh_dev),
+                                                 event_type, &info.info);
+       default:
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
 /* Event              force Flags           Description
  * NETDEV_CHANGE      0     LINKDOWN        Carrier OFF, not for scope host
  * NETDEV_DOWN        0     LINKDOWN|DEAD   Link down, not for scope host
@@ -1407,6 +1426,8 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
                                        nexthop_nh->nh_flags |= RTNH_F_LINKDOWN;
                                        break;
                                }
+                               call_fib_nh_notifiers(nexthop_nh,
+                                                     FIB_EVENT_NH_DEL);
                                dead++;
                        }
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -1437,7 +1458,7 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force)
 }
 
 /* Must be invoked inside of an RCU protected region.  */
-void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
+static void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
 {
        struct fib_info *fi = NULL, *last_resort = NULL;
        struct hlist_head *fa_head = res->fa_head;
@@ -1561,6 +1582,7 @@ int fib_sync_up(struct net_device *dev, unsigned int nh_flags)
                                continue;
                        alive++;
                        nexthop_nh->nh_flags &= ~nh_flags;
+                       call_fib_nh_notifiers(nexthop_nh, FIB_EVENT_NH_ADD);
                } endfor_nexthops(fi)
 
                if (alive > 0) {
index 2919d1a10cfdd83f4c68ee12ee6ba29ce8dcb989..d8cea210af0ed939a30c345fc2169a53b9098976 100644 (file)
@@ -124,7 +124,7 @@ static void fib_notify(struct net *net, struct notifier_block *nb,
 static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net,
                                   enum fib_event_type event_type, u32 dst,
                                   int dst_len, struct fib_info *fi,
-                                  u8 tos, u8 type, u32 tb_id, u32 nlflags)
+                                  u8 tos, u8 type, u32 tb_id)
 {
        struct fib_entry_notifier_info info = {
                .dst = dst,
@@ -133,7 +133,6 @@ static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net,
                .tos = tos,
                .type = type,
                .tb_id = tb_id,
-               .nlflags = nlflags,
        };
        return call_fib_notifier(nb, net, event_type, &info.info);
 }
@@ -197,7 +196,7 @@ int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
 static int call_fib_entry_notifiers(struct net *net,
                                    enum fib_event_type event_type, u32 dst,
                                    int dst_len, struct fib_info *fi,
-                                   u8 tos, u8 type, u32 tb_id, u32 nlflags)
+                                   u8 tos, u8 type, u32 tb_id)
 {
        struct fib_entry_notifier_info info = {
                .dst = dst,
@@ -206,7 +205,6 @@ static int call_fib_entry_notifiers(struct net *net,
                .tos = tos,
                .type = type,
                .tb_id = tb_id,
-               .nlflags = nlflags,
        };
        return call_fib_notifiers(net, event_type, &info.info);
 }
@@ -1198,6 +1196,7 @@ static int fib_insert_alias(struct trie *t, struct key_vector *tp,
 int fib_table_insert(struct net *net, struct fib_table *tb,
                     struct fib_config *cfg)
 {
+       enum fib_event_type event = FIB_EVENT_ENTRY_ADD;
        struct trie *t = (struct trie *)tb->tb_data;
        struct fib_alias *fa, *new_fa;
        struct key_vector *l, *tp;
@@ -1295,6 +1294,13 @@ int fib_table_insert(struct net *net, struct fib_table *tb,
                        new_fa->tb_id = tb->tb_id;
                        new_fa->fa_default = -1;
 
+                       call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
+                                                key, plen, fi,
+                                                new_fa->fa_tos, cfg->fc_type,
+                                                tb->tb_id);
+                       rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
+                                 tb->tb_id, &cfg->fc_nlinfo, nlflags);
+
                        hlist_replace_rcu(&fa->fa_list, &new_fa->fa_list);
 
                        alias_free_mem_rcu(fa);
@@ -1303,13 +1309,6 @@ int fib_table_insert(struct net *net, struct fib_table *tb,
                        if (state & FA_S_ACCESSED)
                                rt_cache_flush(cfg->fc_nlinfo.nl_net);
 
-                       call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
-                                                key, plen, fi,
-                                                new_fa->fa_tos, cfg->fc_type,
-                                                tb->tb_id, cfg->fc_nlflags);
-                       rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen,
-                               tb->tb_id, &cfg->fc_nlinfo, nlflags);
-
                        goto succeeded;
                }
                /* Error if we find a perfect match which
@@ -1319,10 +1318,12 @@ int fib_table_insert(struct net *net, struct fib_table *tb,
                if (fa_match)
                        goto out;
 
-               if (cfg->fc_nlflags & NLM_F_APPEND)
+               if (cfg->fc_nlflags & NLM_F_APPEND) {
+                       event = FIB_EVENT_ENTRY_APPEND;
                        nlflags |= NLM_F_APPEND;
-               else
+               } else {
                        fa = fa_first;
+               }
        }
        err = -ENOENT;
        if (!(cfg->fc_nlflags & NLM_F_CREATE))
@@ -1351,8 +1352,8 @@ int fib_table_insert(struct net *net, struct fib_table *tb,
                tb->tb_num_default++;
 
        rt_cache_flush(cfg->fc_nlinfo.nl_net);
-       call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, key, plen, fi, tos,
-                                cfg->fc_type, tb->tb_id, cfg->fc_nlflags);
+       call_fib_entry_notifiers(net, event, key, plen, fi, tos, cfg->fc_type,
+                                tb->tb_id);
        rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id,
                  &cfg->fc_nlinfo, nlflags);
 succeeded:
@@ -1653,8 +1654,8 @@ int fib_table_delete(struct net *net, struct fib_table *tb,
                return -ESRCH;
 
        call_fib_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, key, plen,
-                                fa_to_delete->fa_info, tos, cfg->fc_type,
-                                tb->tb_id, 0);
+                                fa_to_delete->fa_info, tos,
+                                fa_to_delete->fa_type, tb->tb_id);
        rtmsg_fib(RTM_DELROUTE, htonl(key), fa_to_delete, plen, tb->tb_id,
                  &cfg->fc_nlinfo, 0);
 
@@ -1963,7 +1964,8 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
                hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
                        struct fib_info *fi = fa->fa_info;
 
-                       if (!fi || !(fi->fib_flags & RTNH_F_DEAD)) {
+                       if (!fi || !(fi->fib_flags & RTNH_F_DEAD) ||
+                           tb->tb_id != fa->tb_id) {
                                slen = fa->fa_slen;
                                continue;
                        }
@@ -1972,7 +1974,7 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
                                                 n->key,
                                                 KEYLENGTH - fa->fa_slen,
                                                 fi, fa->fa_tos, fa->fa_type,
-                                                tb->tb_id, 0);
+                                                tb->tb_id);
                        hlist_del_rcu(&fa->fa_list);
                        fib_release_info(fa->fa_info);
                        alias_free_mem_rcu(fa);
@@ -2012,7 +2014,7 @@ static void fib_leaf_notify(struct net *net, struct key_vector *l,
 
                call_fib_entry_notifier(nb, net, event_type, l->key,
                                        KEYLENGTH - fa->fa_slen, fi, fa->fa_tos,
-                                       fa->fa_type, fa->tb_id, 0);
+                                       fa->fa_type, fa->tb_id);
        }
 }
 
index 0777ea9492238944575b6d302ae16fb220b4f24d..fc310db2708bf6c9e96befe413e89ac931818f74 100644 (file)
@@ -209,19 +209,17 @@ static struct sock *icmp_sk(struct net *net)
        return *this_cpu_ptr(net->ipv4.icmp_sk);
 }
 
+/* Called with BH disabled */
 static inline struct sock *icmp_xmit_lock(struct net *net)
 {
        struct sock *sk;
 
-       local_bh_disable();
-
        sk = icmp_sk(net);
 
        if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
                /* This can happen if the output path signals a
                 * dst_link_failure() for an outgoing ICMP packet.
                 */
-               local_bh_enable();
                return NULL;
        }
        return sk;
@@ -229,7 +227,7 @@ static inline struct sock *icmp_xmit_lock(struct net *net)
 
 static inline void icmp_xmit_unlock(struct sock *sk)
 {
-       spin_unlock_bh(&sk->sk_lock.slock);
+       spin_unlock(&sk->sk_lock.slock);
 }
 
 int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
@@ -282,6 +280,33 @@ bool icmp_global_allow(void)
 }
 EXPORT_SYMBOL(icmp_global_allow);
 
+static bool icmpv4_mask_allow(struct net *net, int type, int code)
+{
+       if (type > NR_ICMP_TYPES)
+               return true;
+
+       /* Don't limit PMTU discovery. */
+       if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
+               return true;
+
+       /* Limit if icmp type is enabled in ratemask. */
+       if (!((1 << type) & net->ipv4.sysctl_icmp_ratemask))
+               return true;
+
+       return false;
+}
+
+static bool icmpv4_global_allow(struct net *net, int type, int code)
+{
+       if (icmpv4_mask_allow(net, type, code))
+               return true;
+
+       if (icmp_global_allow())
+               return true;
+
+       return false;
+}
+
 /*
  *     Send an ICMP frame.
  */
@@ -290,34 +315,22 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
                               struct flowi4 *fl4, int type, int code)
 {
        struct dst_entry *dst = &rt->dst;
+       struct inet_peer *peer;
        bool rc = true;
+       int vif;
 
-       if (type > NR_ICMP_TYPES)
-               goto out;
-
-       /* Don't limit PMTU discovery. */
-       if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
+       if (icmpv4_mask_allow(net, type, code))
                goto out;
 
        /* No rate limit on loopback */
        if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
                goto out;
 
-       /* Limit if icmp type is enabled in ratemask. */
-       if (!((1 << type) & net->ipv4.sysctl_icmp_ratemask))
-               goto out;
-
-       rc = false;
-       if (icmp_global_allow()) {
-               int vif = l3mdev_master_ifindex(dst->dev);
-               struct inet_peer *peer;
-
-               peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
-               rc = inet_peer_xrlim_allow(peer,
-                                          net->ipv4.sysctl_icmp_ratelimit);
-               if (peer)
-                       inet_putpeer(peer);
-       }
+       vif = l3mdev_master_ifindex(dst->dev);
+       peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
+       rc = inet_peer_xrlim_allow(peer, net->ipv4.sysctl_icmp_ratelimit);
+       if (peer)
+               inet_putpeer(peer);
 out:
        return rc;
 }
@@ -396,13 +409,22 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        struct inet_sock *inet;
        __be32 daddr, saddr;
        u32 mark = IP4_REPLY_MARK(net, skb->mark);
+       int type = icmp_param->data.icmph.type;
+       int code = icmp_param->data.icmph.code;
 
        if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
                return;
 
+       /* Needed by both icmp_global_allow and icmp_xmit_lock */
+       local_bh_disable();
+
+       /* global icmp_msgs_per_sec */
+       if (!icmpv4_global_allow(net, type, code))
+               goto out_bh_enable;
+
        sk = icmp_xmit_lock(net);
        if (!sk)
-               return;
+               goto out_bh_enable;
        inet = inet_sk(sk);
 
        icmp_param->data.icmph.checksum = 0;
@@ -433,12 +455,13 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
        rt = ip_route_output_key(net, &fl4);
        if (IS_ERR(rt))
                goto out_unlock;
-       if (icmpv4_xrlim_allow(net, rt, &fl4, icmp_param->data.icmph.type,
-                              icmp_param->data.icmph.code))
+       if (icmpv4_xrlim_allow(net, rt, &fl4, type, code))
                icmp_push_reply(icmp_param, &fl4, &ipc, &rt);
        ip_rt_put(rt);
 out_unlock:
        icmp_xmit_unlock(sk);
+out_bh_enable:
+       local_bh_enable();
 }
 
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -571,7 +594,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
 {
        struct iphdr *iph;
        int room;
-       struct icmp_bxm *icmp_param;
+       struct icmp_bxm icmp_param;
        struct rtable *rt = skb_rtable(skb_in);
        struct ipcm_cookie ipc;
        struct flowi4 fl4;
@@ -648,13 +671,16 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
                }
        }
 
-       icmp_param = kmalloc(sizeof(*icmp_param), GFP_ATOMIC);
-       if (!icmp_param)
-               return;
+       /* Needed by both icmp_global_allow and icmp_xmit_lock */
+       local_bh_disable();
+
+       /* Check global sysctl_icmp_msgs_per_sec ratelimit */
+       if (!icmpv4_global_allow(net, type, code))
+               goto out_bh_enable;
 
        sk = icmp_xmit_lock(net);
        if (!sk)
-               goto out_free;
+               goto out_bh_enable;
 
        /*
         *      Construct source address and options.
@@ -681,7 +707,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
                                          iph->tos;
        mark = IP4_REPLY_MARK(net, skb_in->mark);
 
-       if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in))
+       if (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in))
                goto out_unlock;
 
 
@@ -689,25 +715,26 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
         *      Prepare data for ICMP header.
         */
 
-       icmp_param->data.icmph.type      = type;
-       icmp_param->data.icmph.code      = code;
-       icmp_param->data.icmph.un.gateway = info;
-       icmp_param->data.icmph.checksum  = 0;
-       icmp_param->skb   = skb_in;
-       icmp_param->offset = skb_network_offset(skb_in);
+       icmp_param.data.icmph.type       = type;
+       icmp_param.data.icmph.code       = code;
+       icmp_param.data.icmph.un.gateway = info;
+       icmp_param.data.icmph.checksum   = 0;
+       icmp_param.skb    = skb_in;
+       icmp_param.offset = skb_network_offset(skb_in);
        inet_sk(sk)->tos = tos;
        sk->sk_mark = mark;
        ipc.addr = iph->saddr;
-       ipc.opt = &icmp_param->replyopts.opt;
+       ipc.opt = &icmp_param.replyopts.opt;
        ipc.tx_flags = 0;
        ipc.ttl = 0;
        ipc.tos = -1;
 
        rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
-                              type, code, icmp_param);
+                              type, code, &icmp_param);
        if (IS_ERR(rt))
                goto out_unlock;
 
+       /* peer icmp_ratelimit */
        if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code))
                goto ende;
 
@@ -716,21 +743,21 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
        room = dst_mtu(&rt->dst);
        if (room > 576)
                room = 576;
-       room -= sizeof(struct iphdr) + icmp_param->replyopts.opt.opt.optlen;
+       room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
        room -= sizeof(struct icmphdr);
 
-       icmp_param->data_len = skb_in->len - icmp_param->offset;
-       if (icmp_param->data_len > room)
-               icmp_param->data_len = room;
-       icmp_param->head_len = sizeof(struct icmphdr);
+       icmp_param.data_len = skb_in->len - icmp_param.offset;
+       if (icmp_param.data_len > room)
+               icmp_param.data_len = room;
+       icmp_param.head_len = sizeof(struct icmphdr);
 
-       icmp_push_reply(icmp_param, &fl4, &ipc, &rt);
+       icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
 ende:
        ip_rt_put(rt);
 out_unlock:
        icmp_xmit_unlock(sk);
-out_free:
-       kfree(icmp_param);
+out_bh_enable:
+       local_bh_enable();
 out:;
 }
 EXPORT_SYMBOL(icmp_send);
index 19ea045c50ed7d9bfc885e13a67f6bca6075674c..b4d5980ade3b584c444d0f0c6523f03a2f71f884 100644 (file)
@@ -31,6 +31,86 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
 EXPORT_SYMBOL(inet_csk_timer_bug_msg);
 #endif
 
+#if IS_ENABLED(CONFIG_IPV6)
+/* match_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6
+ *                          only, and any IPv4 addresses if not IPv6 only
+ * match_wildcard == false: addresses must be exactly the same, i.e.
+ *                          IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
+ *                          and 0.0.0.0 equals to 0.0.0.0 only
+ */
+static int ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
+                               const struct in6_addr *sk2_rcv_saddr6,
+                               __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
+                               bool sk1_ipv6only, bool sk2_ipv6only,
+                               bool match_wildcard)
+{
+       int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
+       int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
+
+       /* if both are mapped, treat as IPv4 */
+       if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
+               if (!sk2_ipv6only) {
+                       if (sk1_rcv_saddr == sk2_rcv_saddr)
+                               return 1;
+                       if (!sk1_rcv_saddr || !sk2_rcv_saddr)
+                               return match_wildcard;
+               }
+               return 0;
+       }
+
+       if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
+               return 1;
+
+       if (addr_type2 == IPV6_ADDR_ANY && match_wildcard &&
+           !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
+               return 1;
+
+       if (addr_type == IPV6_ADDR_ANY && match_wildcard &&
+           !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
+               return 1;
+
+       if (sk2_rcv_saddr6 &&
+           ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
+               return 1;
+
+       return 0;
+}
+#endif
+
+/* match_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
+ * match_wildcard == false: addresses must be exactly the same, i.e.
+ *                          0.0.0.0 only equals to 0.0.0.0
+ */
+static int ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
+                               bool sk2_ipv6only, bool match_wildcard)
+{
+       if (!sk2_ipv6only) {
+               if (sk1_rcv_saddr == sk2_rcv_saddr)
+                       return 1;
+               if (!sk1_rcv_saddr || !sk2_rcv_saddr)
+                       return match_wildcard;
+       }
+       return 0;
+}
+
+int inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
+                        bool match_wildcard)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == AF_INET6)
+               return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
+                                           inet6_rcv_saddr(sk2),
+                                           sk->sk_rcv_saddr,
+                                           sk2->sk_rcv_saddr,
+                                           ipv6_only_sock(sk),
+                                           ipv6_only_sock(sk2),
+                                           match_wildcard);
+#endif
+       return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
+                                   ipv6_only_sock(sk2), match_wildcard);
+}
+EXPORT_SYMBOL(inet_rcv_saddr_equal);
+
 void inet_get_local_port_range(struct net *net, int *low, int *high)
 {
        unsigned int seq;
@@ -44,9 +124,9 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
 }
 EXPORT_SYMBOL(inet_get_local_port_range);
 
-int inet_csk_bind_conflict(const struct sock *sk,
-                          const struct inet_bind_bucket *tb, bool relax,
-                          bool reuseport_ok)
+static int inet_csk_bind_conflict(const struct sock *sk,
+                                 const struct inet_bind_bucket *tb,
+                                 bool relax, bool reuseport_ok)
 {
        struct sock *sk2;
        bool reuse = sk->sk_reuse;
@@ -62,7 +142,6 @@ int inet_csk_bind_conflict(const struct sock *sk,
 
        sk_for_each_bound(sk2, &tb->owners) {
                if (sk != sk2 &&
-                   !inet_v6_ipv6only(sk2) &&
                    (!sk->sk_bound_dev_if ||
                     !sk2->sk_bound_dev_if ||
                     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
@@ -72,54 +151,34 @@ int inet_csk_bind_conflict(const struct sock *sk,
                             rcu_access_pointer(sk->sk_reuseport_cb) ||
                             (sk2->sk_state != TCP_TIME_WAIT &&
                             !uid_eq(uid, sock_i_uid(sk2))))) {
-
-                               if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
-                                   sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
+                               if (inet_rcv_saddr_equal(sk, sk2, true))
                                        break;
                        }
                        if (!relax && reuse && sk2->sk_reuse &&
                            sk2->sk_state != TCP_LISTEN) {
-
-                               if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
-                                   sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
+                               if (inet_rcv_saddr_equal(sk, sk2, true))
                                        break;
                        }
                }
        }
        return sk2 != NULL;
 }
-EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
 
-/* Obtain a reference to a local port for the given sock,
- * if snum is zero it means select any available local port.
- * We try to allocate an odd port (and leave even ports for connect())
+/*
+ * Find an open port number for the socket.  Returns with the
+ * inet_bind_hashbucket lock held.
  */
-int inet_csk_get_port(struct sock *sk, unsigned short snum)
+static struct inet_bind_hashbucket *
+inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *port_ret)
 {
-       bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
        struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
-       int ret = 1, attempts = 5, port = snum;
-       int smallest_size = -1, smallest_port;
+       int port = 0;
        struct inet_bind_hashbucket *head;
        struct net *net = sock_net(sk);
        int i, low, high, attempt_half;
        struct inet_bind_bucket *tb;
-       kuid_t uid = sock_i_uid(sk);
        u32 remaining, offset;
-       bool reuseport_ok = !!snum;
 
-       if (port) {
-have_port:
-               head = &hinfo->bhash[inet_bhashfn(net, port,
-                                                 hinfo->bhash_size)];
-               spin_lock_bh(&head->lock);
-               inet_bind_bucket_for_each(tb, &head->chain)
-                       if (net_eq(ib_net(tb), net) && tb->port == port)
-                               goto tb_found;
-
-               goto tb_not_found;
-       }
-again:
        attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
 other_half_scan:
        inet_get_local_port_range(net, &low, &high);
@@ -143,8 +202,6 @@ other_half_scan:
         * We do the opposite to not pollute connect() users.
         */
        offset |= 1U;
-       smallest_size = -1;
-       smallest_port = low; /* avoid compiler warning */
 
 other_parity_scan:
        port = low + offset;
@@ -158,30 +215,17 @@ other_parity_scan:
                spin_lock_bh(&head->lock);
                inet_bind_bucket_for_each(tb, &head->chain)
                        if (net_eq(ib_net(tb), net) && tb->port == port) {
-                               if (((tb->fastreuse > 0 && reuse) ||
-                                    (tb->fastreuseport > 0 &&
-                                     sk->sk_reuseport &&
-                                     !rcu_access_pointer(sk->sk_reuseport_cb) &&
-                                     uid_eq(tb->fastuid, uid))) &&
-                                   (tb->num_owners < smallest_size || smallest_size == -1)) {
-                                       smallest_size = tb->num_owners;
-                                       smallest_port = port;
-                               }
-                               if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false,
-                                                                             reuseport_ok))
-                                       goto tb_found;
+                               if (!inet_csk_bind_conflict(sk, tb, false, false))
+                                       goto success;
                                goto next_port;
                        }
-               goto tb_not_found;
+               tb = NULL;
+               goto success;
 next_port:
                spin_unlock_bh(&head->lock);
                cond_resched();
        }
 
-       if (smallest_size != -1) {
-               port = smallest_port;
-               goto have_port;
-       }
        offset--;
        if (!(offset & 1))
                goto other_parity_scan;
@@ -191,8 +235,74 @@ next_port:
                attempt_half = 2;
                goto other_half_scan;
        }
-       return ret;
+       return NULL;
+success:
+       *port_ret = port;
+       *tb_ret = tb;
+       return head;
+}
 
+static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
+                                    struct sock *sk)
+{
+       kuid_t uid = sock_i_uid(sk);
+
+       if (tb->fastreuseport <= 0)
+               return 0;
+       if (!sk->sk_reuseport)
+               return 0;
+       if (rcu_access_pointer(sk->sk_reuseport_cb))
+               return 0;
+       if (!uid_eq(tb->fastuid, uid))
+               return 0;
+       /* We only need to check the rcv_saddr if this tb was once marked
+        * without fastreuseport and then was reset, as we can only know that
+        * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
+        * owners list.
+        */
+       if (tb->fastreuseport == FASTREUSEPORT_ANY)
+               return 1;
+#if IS_ENABLED(CONFIG_IPV6)
+       if (tb->fast_sk_family == AF_INET6)
+               return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
+                                           &sk->sk_v6_rcv_saddr,
+                                           tb->fast_rcv_saddr,
+                                           sk->sk_rcv_saddr,
+                                           tb->fast_ipv6_only,
+                                           ipv6_only_sock(sk), true);
+#endif
+       return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
+                                   ipv6_only_sock(sk), true);
+}
+
+/* Obtain a reference to a local port for the given sock,
+ * if snum is zero it means select any available local port.
+ * We try to allocate an odd port (and leave even ports for connect())
+ */
+int inet_csk_get_port(struct sock *sk, unsigned short snum)
+{
+       bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
+       struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
+       int ret = 1, port = snum;
+       struct inet_bind_hashbucket *head;
+       struct net *net = sock_net(sk);
+       struct inet_bind_bucket *tb = NULL;
+       kuid_t uid = sock_i_uid(sk);
+
+       if (!port) {
+               head = inet_csk_find_open_port(sk, &tb, &port);
+               if (!head)
+                       return ret;
+               if (!tb)
+                       goto tb_not_found;
+               goto success;
+       }
+       head = &hinfo->bhash[inet_bhashfn(net, port,
+                                         hinfo->bhash_size)];
+       spin_lock_bh(&head->lock);
+       inet_bind_bucket_for_each(tb, &head->chain)
+               if (net_eq(ib_net(tb), net) && tb->port == port)
+                       goto tb_found;
 tb_not_found:
        tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
                                     net, head, port);
@@ -203,39 +313,54 @@ tb_found:
                if (sk->sk_reuse == SK_FORCE_REUSE)
                        goto success;
 
-               if (((tb->fastreuse > 0 && reuse) ||
-                    (tb->fastreuseport > 0 &&
-                     !rcu_access_pointer(sk->sk_reuseport_cb) &&
-                     sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
-                   smallest_size == -1)
+               if ((tb->fastreuse > 0 && reuse) ||
+                   sk_reuseport_match(tb, sk))
                        goto success;
-               if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true,
-                                                            reuseport_ok)) {
-                       if ((reuse ||
-                            (tb->fastreuseport > 0 &&
-                             sk->sk_reuseport &&
-                             !rcu_access_pointer(sk->sk_reuseport_cb) &&
-                             uid_eq(tb->fastuid, uid))) &&
-                           !snum && smallest_size != -1 && --attempts >= 0) {
-                               spin_unlock_bh(&head->lock);
-                               goto again;
-                       }
+               if (inet_csk_bind_conflict(sk, tb, true, true))
                        goto fail_unlock;
+       }
+success:
+       if (!hlist_empty(&tb->owners)) {
+               tb->fastreuse = reuse;
+               if (sk->sk_reuseport) {
+                       tb->fastreuseport = FASTREUSEPORT_ANY;
+                       tb->fastuid = uid;
+                       tb->fast_rcv_saddr = sk->sk_rcv_saddr;
+                       tb->fast_ipv6_only = ipv6_only_sock(sk);
+#if IS_ENABLED(CONFIG_IPV6)
+                       tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+#endif
+               } else {
+                       tb->fastreuseport = 0;
                }
+       } else {
                if (!reuse)
                        tb->fastreuse = 0;
-               if (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid))
-                       tb->fastreuseport = 0;
-       } else {
-               tb->fastreuse = reuse;
                if (sk->sk_reuseport) {
-                       tb->fastreuseport = 1;
-                       tb->fastuid = uid;
+                       /* We didn't match or we don't have fastreuseport set on
+                        * the tb, but we have sk_reuseport set on this socket
+                        * and we know that there are no bind conflicts with
+                        * this socket in this tb, so reset our tb's reuseport
+                        * settings so that any subsequent sockets that match
+                        * our current socket will be put on the fast path.
+                        *
+                        * If we reset we need to set FASTREUSEPORT_STRICT so we
+                        * do extra checking for all subsequent sk_reuseport
+                        * socks.
+                        */
+                       if (!sk_reuseport_match(tb, sk)) {
+                               tb->fastreuseport = FASTREUSEPORT_STRICT;
+                               tb->fastuid = uid;
+                               tb->fast_rcv_saddr = sk->sk_rcv_saddr;
+                               tb->fast_ipv6_only = ipv6_only_sock(sk);
+#if IS_ENABLED(CONFIG_IPV6)
+                               tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+#endif
+                       }
                } else {
                        tb->fastreuseport = 0;
                }
        }
-success:
        if (!inet_csk(sk)->icsk_bind_hash)
                inet_bind_hash(sk, tb, port);
        WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
@@ -711,9 +836,8 @@ void inet_csk_destroy_sock(struct sock *sk)
 
        sk_refcnt_debug_release(sk);
 
-       local_bh_disable();
        percpu_counter_dec(sk->sk_prot->orphan_count);
-       local_bh_enable();
+
        sock_put(sk);
 }
 EXPORT_SYMBOL(inet_csk_destroy_sock);
index 4dea33e5f29572e09c29621ee8eadc4e60a9a9a2..3828b3a805cdeae87f4ad9300f35fff048e23691 100644 (file)
@@ -215,7 +215,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
        }
 
        if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
-           icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+           icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
            icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
                r->idiag_timer = 1;
                r->idiag_retrans = icsk->icsk_retransmits;
index ca97835bfec4b2291446a54d7f6bb1af408afc29..8bea74298173f5198e7b3083b5afb5e1398df977 100644 (file)
@@ -73,7 +73,6 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
                tb->port      = snum;
                tb->fastreuse = 0;
                tb->fastreuseport = 0;
-               tb->num_owners = 0;
                INIT_HLIST_HEAD(&tb->owners);
                hlist_add_head(&tb->node, &head->chain);
        }
@@ -96,7 +95,6 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
 {
        inet_sk(sk)->inet_num = snum;
        sk_add_bind_node(sk, &tb->owners);
-       tb->num_owners++;
        inet_csk(sk)->icsk_bind_hash = tb;
 }
 
@@ -114,7 +112,6 @@ static void __inet_put_port(struct sock *sk)
        spin_lock(&head->lock);
        tb = inet_csk(sk)->icsk_bind_hash;
        __sk_del_bind_node(sk);
-       tb->num_owners--;
        inet_csk(sk)->icsk_bind_hash = NULL;
        inet_sk(sk)->inet_num = 0;
        inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
@@ -435,10 +432,7 @@ bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
 EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
 
 static int inet_reuseport_add_sock(struct sock *sk,
-                                  struct inet_listen_hashbucket *ilb,
-                                  int (*saddr_same)(const struct sock *sk1,
-                                                    const struct sock *sk2,
-                                                    bool match_wildcard))
+                                  struct inet_listen_hashbucket *ilb)
 {
        struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
        struct sock *sk2;
@@ -451,7 +445,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
                    sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
                    inet_csk(sk2)->icsk_bind_hash == tb &&
                    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
-                   saddr_same(sk, sk2, false))
+                   inet_rcv_saddr_equal(sk, sk2, false))
                        return reuseport_add_sock(sk, sk2);
        }
 
@@ -461,10 +455,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
        return 0;
 }
 
-int __inet_hash(struct sock *sk, struct sock *osk,
-                int (*saddr_same)(const struct sock *sk1,
-                                  const struct sock *sk2,
-                                  bool match_wildcard))
+int __inet_hash(struct sock *sk, struct sock *osk)
 {
        struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
        struct inet_listen_hashbucket *ilb;
@@ -479,7 +470,7 @@ int __inet_hash(struct sock *sk, struct sock *osk,
 
        spin_lock(&ilb->lock);
        if (sk->sk_reuseport) {
-               err = inet_reuseport_add_sock(sk, ilb, saddr_same);
+               err = inet_reuseport_add_sock(sk, ilb);
                if (err)
                        goto unlock;
        }
@@ -503,7 +494,7 @@ int inet_hash(struct sock *sk)
 
        if (sk->sk_state != TCP_CLOSE) {
                local_bh_disable();
-               err = __inet_hash(sk, NULL, ipv4_rcv_saddr_equal);
+               err = __inet_hash(sk, NULL);
                local_bh_enable();
        }
 
index ddcd56c08d14d37cec17719bb36be234a28dc35e..f8aff2c71cdee55ebb4ac6001e71a874e9eaf6bb 100644 (file)
@@ -257,8 +257,7 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
 }
 EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
 
-void inet_twsk_purge(struct inet_hashinfo *hashinfo,
-                    struct inet_timewait_death_row *twdr, int family)
+void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
 {
        struct inet_timewait_sock *tw;
        struct sock *sk;
index b67719f459537d49d958de9874414ea868c4a8e1..737ce826d7ecfa040d07d7f8e8d6dedd01ca7330 100644 (file)
@@ -222,7 +222,10 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
        if (unlikely(!neigh))
                neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
        if (!IS_ERR(neigh)) {
-               int res = dst_neigh_output(dst, neigh, skb);
+               int res;
+
+               sock_confirm_neigh(skb, neigh);
+               res = neigh_output(neigh, skb);
 
                rcu_read_unlock_bh();
                return res;
@@ -886,6 +889,9 @@ static inline int ip_ufo_append_data(struct sock *sk,
 
                skb->csum = 0;
 
+               if (flags & MSG_CONFIRM)
+                       skb_set_dst_pending_confirm(skb, 1);
+
                __skb_queue_tail(queue, skb);
        } else if (skb_is_gso(skb)) {
                goto append;
@@ -1086,6 +1092,9 @@ alloc_new_skb:
                        exthdrlen = 0;
                        csummode = CHECKSUM_NONE;
 
+                       if ((flags & MSG_CONFIRM) && !skb_prev)
+                               skb_set_dst_pending_confirm(skb, 1);
+
                        /*
                         * Put the packet on the pending queue.
                         */
index 900011709e3b8e4807daaa6bf537c3871a7d9306..ce1386a67e2434203fdffe126958483ffc69fabd 100644 (file)
@@ -272,7 +272,7 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
                        continue;
                switch (cmsg->cmsg_type) {
                case IP_RETOPTS:
-                       err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
+                       err = cmsg->cmsg_len - sizeof(struct cmsghdr);
 
                        /* Our caller is responsible for freeing ipc->opt */
                        err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
@@ -843,6 +843,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
        {
                struct ip_mreqn mreq;
                struct net_device *dev = NULL;
+               int midx;
 
                if (sk->sk_type == SOCK_STREAM)
                        goto e_inval;
@@ -887,11 +888,15 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                err = -EADDRNOTAVAIL;
                if (!dev)
                        break;
+
+               midx = l3mdev_master_ifindex(dev);
+
                dev_put(dev);
 
                err = -EINVAL;
                if (sk->sk_bound_dev_if &&
-                   mreq.imr_ifindex != sk->sk_bound_dev_if)
+                   mreq.imr_ifindex != sk->sk_bound_dev_if &&
+                   (!midx || midx != sk->sk_bound_dev_if))
                        break;
 
                inet->mc_index = mreq.imr_ifindex;
index 0fd1976ab63bbd9be357e41b9acd71dcfa507665..a31f47ccaad90deadb686b2a9091694c8c1ecb9d 100644 (file)
@@ -188,8 +188,8 @@ int iptunnel_handle_offloads(struct sk_buff *skb,
 EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
 
 /* Often modified stats are per cpu, other are shared (netdev->stats) */
-struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
-                                               struct rtnl_link_stats64 *tot)
+void ip_tunnel_get_stats64(struct net_device *dev,
+                          struct rtnl_link_stats64 *tot)
 {
        int i;
 
@@ -214,8 +214,6 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
                tot->rx_bytes   += rx_bytes;
                tot->tx_bytes   += tx_bytes;
        }
-
-       return tot;
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
 
@@ -228,7 +226,7 @@ static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
        [LWTUNNEL_IP_FLAGS]     = { .type = NLA_U16 },
 };
 
-static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
+static int ip_tun_build_state(struct nlattr *attr,
                              unsigned int family, const void *cfg,
                              struct lwtunnel_state **ts)
 {
@@ -325,7 +323,7 @@ static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
        [LWTUNNEL_IP6_FLAGS]            = { .type = NLA_U16 },
 };
 
-static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr,
+static int ip6_tun_build_state(struct nlattr *attr,
                               unsigned int family, const void *cfg,
                               struct lwtunnel_state **ts)
 {
index efc1e76d49770994f065c1bbad1ef8f93fa99bc7..beacd028848c903e1d58e8d7b180d7814ca2f871 100644 (file)
@@ -299,10 +299,29 @@ static void __net_exit ipmr_rules_exit(struct net *net)
 }
 #endif
 
+static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
+                               const void *ptr)
+{
+       const struct mfc_cache_cmp_arg *cmparg = arg->key;
+       struct mfc_cache *c = (struct mfc_cache *)ptr;
+
+       return cmparg->mfc_mcastgrp != c->mfc_mcastgrp ||
+              cmparg->mfc_origin != c->mfc_origin;
+}
+
+static const struct rhashtable_params ipmr_rht_params = {
+       .head_offset = offsetof(struct mfc_cache, mnode),
+       .key_offset = offsetof(struct mfc_cache, cmparg),
+       .key_len = sizeof(struct mfc_cache_cmp_arg),
+       .nelem_hint = 3,
+       .locks_mul = 1,
+       .obj_cmpfn = ipmr_hash_cmp,
+       .automatic_shrinking = true,
+};
+
 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
 {
        struct mr_table *mrt;
-       unsigned int i;
 
        /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
        if (id != RT_TABLE_DEFAULT && id >= 1000000000)
@@ -318,10 +337,8 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
        write_pnet(&mrt->net, net);
        mrt->id = id;
 
-       /* Forwarding cache */
-       for (i = 0; i < MFC_LINES; i++)
-               INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
-
+       rhltable_init(&mrt->mfc_hash, &ipmr_rht_params);
+       INIT_LIST_HEAD(&mrt->mfc_cache_list);
        INIT_LIST_HEAD(&mrt->mfc_unres_queue);
 
        setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
@@ -338,6 +355,7 @@ static void ipmr_free_table(struct mr_table *mrt)
 {
        del_timer_sync(&mrt->ipmr_expire_timer);
        mroute_clean_tables(mrt, true);
+       rhltable_destroy(&mrt->mfc_hash);
        kfree(mrt);
 }
 
@@ -839,13 +857,17 @@ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
                                         __be32 origin,
                                         __be32 mcastgrp)
 {
-       int line = MFC_HASH(mcastgrp, origin);
+       struct mfc_cache_cmp_arg arg = {
+                       .mfc_mcastgrp = mcastgrp,
+                       .mfc_origin = origin
+       };
+       struct rhlist_head *tmp, *list;
        struct mfc_cache *c;
 
-       list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
-               if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
-                       return c;
-       }
+       list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
+       rhl_for_each_entry_rcu(c, tmp, list, mnode)
+               return c;
+
        return NULL;
 }
 
@@ -853,13 +875,16 @@ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
 static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
                                                    int vifi)
 {
-       int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY));
+       struct mfc_cache_cmp_arg arg = {
+                       .mfc_mcastgrp = htonl(INADDR_ANY),
+                       .mfc_origin = htonl(INADDR_ANY)
+       };
+       struct rhlist_head *tmp, *list;
        struct mfc_cache *c;
 
-       list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
-               if (c->mfc_origin == htonl(INADDR_ANY) &&
-                   c->mfc_mcastgrp == htonl(INADDR_ANY) &&
-                   c->mfc_un.res.ttls[vifi] < 255)
+       list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
+       rhl_for_each_entry_rcu(c, tmp, list, mnode)
+               if (c->mfc_un.res.ttls[vifi] < 255)
                        return c;
 
        return NULL;
@@ -869,29 +894,51 @@ static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
                                             __be32 mcastgrp, int vifi)
 {
-       int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY));
+       struct mfc_cache_cmp_arg arg = {
+                       .mfc_mcastgrp = mcastgrp,
+                       .mfc_origin = htonl(INADDR_ANY)
+       };
+       struct rhlist_head *tmp, *list;
        struct mfc_cache *c, *proxy;
 
        if (mcastgrp == htonl(INADDR_ANY))
                goto skip;
 
-       list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
-               if (c->mfc_origin == htonl(INADDR_ANY) &&
-                   c->mfc_mcastgrp == mcastgrp) {
-                       if (c->mfc_un.res.ttls[vifi] < 255)
-                               return c;
-
-                       /* It's ok if the vifi is part of the static tree */
-                       proxy = ipmr_cache_find_any_parent(mrt,
-                                                          c->mfc_parent);
-                       if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
-                               return c;
-               }
+       list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
+       rhl_for_each_entry_rcu(c, tmp, list, mnode) {
+               if (c->mfc_un.res.ttls[vifi] < 255)
+                       return c;
+
+               /* It's ok if the vifi is part of the static tree */
+               proxy = ipmr_cache_find_any_parent(mrt, c->mfc_parent);
+               if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
+                       return c;
+       }
 
 skip:
        return ipmr_cache_find_any_parent(mrt, vifi);
 }
 
+/* Look for a (S,G,iif) entry if parent != -1 */
+static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
+                                               __be32 origin, __be32 mcastgrp,
+                                               int parent)
+{
+       struct mfc_cache_cmp_arg arg = {
+                       .mfc_mcastgrp = mcastgrp,
+                       .mfc_origin = origin,
+       };
+       struct rhlist_head *tmp, *list;
+       struct mfc_cache *c;
+
+       list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
+       rhl_for_each_entry_rcu(c, tmp, list, mnode)
+               if (parent == -1 || parent == c->mfc_parent)
+                       return c;
+
+       return NULL;
+}
+
 /* Allocate a multicast cache entry */
 static struct mfc_cache *ipmr_cache_alloc(void)
 {
@@ -1028,10 +1075,10 @@ static int ipmr_cache_report(struct mr_table *mrt,
 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
                                 struct sk_buff *skb)
 {
+       const struct iphdr *iph = ip_hdr(skb);
+       struct mfc_cache *c;
        bool found = false;
        int err;
-       struct mfc_cache *c;
-       const struct iphdr *iph = ip_hdr(skb);
 
        spin_lock_bh(&mfc_unres_lock);
        list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
@@ -1095,46 +1142,39 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
 
 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
 {
-       int line;
-       struct mfc_cache *c, *next;
+       struct mfc_cache *c;
 
-       line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
+       /* The entries are added/deleted only under RTNL */
+       rcu_read_lock();
+       c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
+                                  mfc->mfcc_mcastgrp.s_addr, parent);
+       rcu_read_unlock();
+       if (!c)
+               return -ENOENT;
+       rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
+       list_del_rcu(&c->list);
+       mroute_netlink_event(mrt, c, RTM_DELROUTE);
+       ipmr_cache_free(c);
 
-       list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
-               if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
-                   c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
-                   (parent == -1 || parent == c->mfc_parent)) {
-                       list_del_rcu(&c->list);
-                       mroute_netlink_event(mrt, c, RTM_DELROUTE);
-                       ipmr_cache_free(c);
-                       return 0;
-               }
-       }
-       return -ENOENT;
+       return 0;
 }
 
 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
                        struct mfcctl *mfc, int mrtsock, int parent)
 {
-       bool found = false;
-       int line;
        struct mfc_cache *uc, *c;
+       bool found;
+       int ret;
 
        if (mfc->mfcc_parent >= MAXVIFS)
                return -ENFILE;
 
-       line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
-
-       list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
-               if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
-                   c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
-                   (parent == -1 || parent == c->mfc_parent)) {
-                       found = true;
-                       break;
-               }
-       }
-
-       if (found) {
+       /* The entries are added/deleted only under RTNL */
+       rcu_read_lock();
+       c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
+                                  mfc->mfcc_mcastgrp.s_addr, parent);
+       rcu_read_unlock();
+       if (c) {
                write_lock_bh(&mrt_lock);
                c->mfc_parent = mfc->mfcc_parent;
                ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
@@ -1160,8 +1200,14 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
        if (!mrtsock)
                c->mfc_flags |= MFC_STATIC;
 
-       list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
-
+       ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->mnode,
+                                 ipmr_rht_params);
+       if (ret) {
+               pr_err("ipmr: rhtable insert error %d\n", ret);
+               ipmr_cache_free(c);
+               return ret;
+       }
+       list_add_tail_rcu(&c->list, &mrt->mfc_cache_list);
        /* Check to see if we resolved a queued list. If so we
         * need to send on the frames and tidy up.
         */
@@ -1191,9 +1237,9 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
 /* Close the multicast socket, and clear the vif tables etc */
 static void mroute_clean_tables(struct mr_table *mrt, bool all)
 {
-       int i;
+       struct mfc_cache *c, *tmp;
        LIST_HEAD(list);
-       struct mfc_cache *c, *next;
+       int i;
 
        /* Shut down all active vif entries */
        for (i = 0; i < mrt->maxvif; i++) {
@@ -1204,19 +1250,18 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
        unregister_netdevice_many(&list);
 
        /* Wipe the cache */
-       for (i = 0; i < MFC_LINES; i++) {
-               list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
-                       if (!all && (c->mfc_flags & MFC_STATIC))
-                               continue;
-                       list_del_rcu(&c->list);
-                       mroute_netlink_event(mrt, c, RTM_DELROUTE);
-                       ipmr_cache_free(c);
-               }
+       list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
+               if (!all && (c->mfc_flags & MFC_STATIC))
+                       continue;
+               rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
+               list_del_rcu(&c->list);
+               mroute_netlink_event(mrt, c, RTM_DELROUTE);
+               ipmr_cache_free(c);
        }
 
        if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
                spin_lock_bh(&mfc_unres_lock);
-               list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
+               list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
                        list_del(&c->list);
                        mroute_netlink_event(mrt, c, RTM_DELROUTE);
                        ipmr_destroy_unres(mrt, c);
@@ -1791,9 +1836,9 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
                          struct sk_buff *skb, struct mfc_cache *cache,
                          int local)
 {
+       int true_vifi = ipmr_find_vif(mrt, skb->dev);
        int psend = -1;
        int vif, ct;
-       int true_vifi = ipmr_find_vif(mrt, skb->dev);
 
        vif = cache->mfc_parent;
        cache->mfc_un.res.pkt++;
@@ -2091,8 +2136,10 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
        int ct;
 
        /* If cache is unresolved, don't try to parse IIF and OIF */
-       if (c->mfc_parent >= MAXVIFS)
+       if (c->mfc_parent >= MAXVIFS) {
+               rtm->rtm_flags |= RTNH_F_UNRESOLVED;
                return -ENOENT;
+       }
 
        if (VIF_EXISTS(mrt, c->mfc_parent) &&
            nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
@@ -2134,7 +2181,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
 
 int ipmr_get_route(struct net *net, struct sk_buff *skb,
                   __be32 saddr, __be32 daddr,
-                  struct rtmsg *rtm, int nowait, u32 portid)
+                  struct rtmsg *rtm, u32 portid)
 {
        struct mfc_cache *cache;
        struct mr_table *mrt;
@@ -2158,11 +2205,6 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
                struct net_device *dev;
                int vif = -1;
 
-               if (nowait) {
-                       rcu_read_unlock();
-                       return -EAGAIN;
-               }
-
                dev = skb->dev;
                read_lock(&mrt_lock);
                if (dev)
@@ -2296,34 +2338,30 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
        struct mr_table *mrt;
        struct mfc_cache *mfc;
        unsigned int t = 0, s_t;
-       unsigned int h = 0, s_h;
        unsigned int e = 0, s_e;
 
        s_t = cb->args[0];
-       s_h = cb->args[1];
-       s_e = cb->args[2];
+       s_e = cb->args[1];
 
        rcu_read_lock();
        ipmr_for_each_table(mrt, net) {
                if (t < s_t)
                        goto next_table;
-               if (t > s_t)
-                       s_h = 0;
-               for (h = s_h; h < MFC_LINES; h++) {
-                       list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
-                               if (e < s_e)
-                                       goto next_entry;
-                               if (ipmr_fill_mroute(mrt, skb,
-                                                    NETLINK_CB(cb->skb).portid,
-                                                    cb->nlh->nlmsg_seq,
-                                                    mfc, RTM_NEWROUTE,
-                                                    NLM_F_MULTI) < 0)
-                                       goto done;
+               list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
+                       if (e < s_e)
+                               goto next_entry;
+                       if (ipmr_fill_mroute(mrt, skb,
+                                            NETLINK_CB(cb->skb).portid,
+                                            cb->nlh->nlmsg_seq,
+                                            mfc, RTM_NEWROUTE,
+                                            NLM_F_MULTI) < 0)
+                               goto done;
 next_entry:
-                               e++;
-                       }
-                       e = s_e = 0;
+                       e++;
                }
+               e = 0;
+               s_e = 0;
+
                spin_lock_bh(&mfc_unres_lock);
                list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
                        if (e < s_e)
@@ -2340,16 +2378,15 @@ next_entry2:
                        e++;
                }
                spin_unlock_bh(&mfc_unres_lock);
-               e = s_e = 0;
-               s_h = 0;
+               e = 0;
+               s_e = 0;
 next_table:
                t++;
        }
 done:
        rcu_read_unlock();
 
-       cb->args[2] = e;
-       cb->args[1] = h;
+       cb->args[1] = e;
        cb->args[0] = t;
 
        return skb->len;
@@ -2593,10 +2630,8 @@ struct ipmr_mfc_iter {
        struct seq_net_private p;
        struct mr_table *mrt;
        struct list_head *cache;
-       int ct;
 };
 
-
 static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
                                          struct ipmr_mfc_iter *it, loff_t pos)
 {
@@ -2604,12 +2639,10 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
        struct mfc_cache *mfc;
 
        rcu_read_lock();
-       for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
-               it->cache = &mrt->mfc_cache_array[it->ct];
-               list_for_each_entry_rcu(mfc, it->cache, list)
-                       if (pos-- == 0)
-                               return mfc;
-       }
+       it->cache = &mrt->mfc_cache_list;
+       list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
+               if (pos-- == 0)
+                       return mfc;
        rcu_read_unlock();
 
        spin_lock_bh(&mfc_unres_lock);
@@ -2636,17 +2669,16 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
 
        it->mrt = mrt;
        it->cache = NULL;
-       it->ct = 0;
        return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
                : SEQ_START_TOKEN;
 }
 
 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-       struct mfc_cache *mfc = v;
        struct ipmr_mfc_iter *it = seq->private;
        struct net *net = seq_file_net(seq);
        struct mr_table *mrt = it->mrt;
+       struct mfc_cache *mfc = v;
 
        ++*pos;
 
@@ -2659,19 +2691,9 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        if (it->cache == &mrt->mfc_unres_queue)
                goto end_of_list;
 
-       BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
-
-       while (++it->ct < MFC_LINES) {
-               it->cache = &mrt->mfc_cache_array[it->ct];
-               if (list_empty(it->cache))
-                       continue;
-               return list_first_entry(it->cache, struct mfc_cache, list);
-       }
-
        /* exhausted cache_array, show unresolved */
        rcu_read_unlock();
        it->cache = &mrt->mfc_unres_queue;
-       it->ct = 0;
 
        spin_lock_bh(&mfc_unres_lock);
        if (!list_empty(it->cache))
@@ -2691,7 +2713,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
 
        if (it->cache == &mrt->mfc_unres_queue)
                spin_unlock_bh(&mfc_unres_lock);
-       else if (it->cache == &mrt->mfc_cache_array[it->ct])
+       else if (it->cache == &mrt->mfc_cache_list)
                rcu_read_unlock();
 }
 
index a467e1236c43ed1f3ecaa55524eb59722f1682e8..6241a81fd7f5a3df8fb3cf251bfdd407dda6a1f6 100644 (file)
@@ -677,11 +677,6 @@ static int copy_entries_to_user(unsigned int total_size,
                return PTR_ERR(counters);
 
        loc_cpu_entry = private->entries;
-       /* ... then copy entire thing ... */
-       if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
-               ret = -EFAULT;
-               goto free_counters;
-       }
 
        /* FIXME: use iterator macros --RR */
        /* ... then go back and fix counters and names */
@@ -689,6 +684,10 @@ static int copy_entries_to_user(unsigned int total_size,
                const struct xt_entry_target *t;
 
                e = (struct arpt_entry *)(loc_cpu_entry + off);
+               if (copy_to_user(userptr + off, e, sizeof(*e))) {
+                       ret = -EFAULT;
+                       goto free_counters;
+               }
                if (copy_to_user(userptr + off
                                 + offsetof(struct arpt_entry, counters),
                                 &counters[num],
@@ -698,11 +697,7 @@ static int copy_entries_to_user(unsigned int total_size,
                }
 
                t = arpt_get_target_c(e);
-               if (copy_to_user(userptr + off + e->target_offset
-                                + offsetof(struct xt_entry_target,
-                                           u.user.name),
-                                t->u.kernel.target->name,
-                                strlen(t->u.kernel.target->name)+1) != 0) {
+               if (xt_target_to_user(t, userptr + off + e->target_offset)) {
                        ret = -EFAULT;
                        goto free_counters;
                }
index 91656a1d8fbd5b15b306383fe733751fef10f27c..384b85713e062881d3d2554b0828f08ff28f443d 100644 (file)
@@ -826,10 +826,6 @@ copy_entries_to_user(unsigned int total_size,
                return PTR_ERR(counters);
 
        loc_cpu_entry = private->entries;
-       if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
-               ret = -EFAULT;
-               goto free_counters;
-       }
 
        /* FIXME: use iterator macros --RR */
        /* ... then go back and fix counters and names */
@@ -839,6 +835,10 @@ copy_entries_to_user(unsigned int total_size,
                const struct xt_entry_target *t;
 
                e = (struct ipt_entry *)(loc_cpu_entry + off);
+               if (copy_to_user(userptr + off, e, sizeof(*e))) {
+                       ret = -EFAULT;
+                       goto free_counters;
+               }
                if (copy_to_user(userptr + off
                                 + offsetof(struct ipt_entry, counters),
                                 &counters[num],
@@ -852,23 +852,14 @@ copy_entries_to_user(unsigned int total_size,
                     i += m->u.match_size) {
                        m = (void *)e + i;
 
-                       if (copy_to_user(userptr + off + i
-                                        + offsetof(struct xt_entry_match,
-                                                   u.user.name),
-                                        m->u.kernel.match->name,
-                                        strlen(m->u.kernel.match->name)+1)
-                           != 0) {
+                       if (xt_match_to_user(m, userptr + off + i)) {
                                ret = -EFAULT;
                                goto free_counters;
                        }
                }
 
                t = ipt_get_target_c(e);
-               if (copy_to_user(userptr + off + e->target_offset
-                                + offsetof(struct xt_entry_target,
-                                           u.user.name),
-                                t->u.kernel.target->name,
-                                strlen(t->u.kernel.target->name)+1) != 0) {
+               if (xt_target_to_user(t, userptr + off + e->target_offset)) {
                        ret = -EFAULT;
                        goto free_counters;
                }
index 0a783cd73faf25d9ec4d7605759038e4e0aef345..52f26459efc345a8a0c00d356306fb5fd398547e 100644 (file)
@@ -485,6 +485,7 @@ static struct xt_target clusterip_tg_reg __read_mostly = {
        .checkentry     = clusterip_tg_check,
        .destroy        = clusterip_tg_destroy,
        .targetsize     = sizeof(struct ipt_clusterip_tgt_info),
+       .usersize       = offsetof(struct ipt_clusterip_tgt_info, config),
 #ifdef CONFIG_COMPAT
        .compatsize     = sizeof(struct compat_ipt_clusterip_tgt_info),
 #endif /* CONFIG_COMPAT */
index 30c0de53e2541b45c494830bf0fc2fc36b0b9d43..3240a2614e82bd82c674aac84ab140c91670f163 100644 (file)
@@ -57,8 +57,7 @@ synproxy_send_tcp(struct net *net,
                goto free_nskb;
 
        if (nfct) {
-               nskb->nfct = nfct;
-               nskb->nfctinfo = ctinfo;
+               nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo);
                nf_conntrack_get(nfct);
        }
 
@@ -107,8 +106,8 @@ synproxy_send_client_synack(struct net *net,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
-                         niph, nth, tcp_hdr_size);
+       synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
+                         IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
 }
 
 static void
@@ -230,8 +229,8 @@ synproxy_send_client_ack(struct net *net,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
-                         niph, nth, tcp_hdr_size);
+       synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
+                         IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
 }
 
 static bool
index d075b3cf24000c6eaba53964ce254b42214bd4b4..73c591d8a9a8e5295000f702e1b32e6a643e30e0 100644 (file)
@@ -128,16 +128,16 @@ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
 /* Returns conntrack if it dealt with ICMP, and filled in skb fields */
 static int
 icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
-                enum ip_conntrack_info *ctinfo,
                 unsigned int hooknum)
 {
        struct nf_conntrack_tuple innertuple, origtuple;
        const struct nf_conntrack_l4proto *innerproto;
        const struct nf_conntrack_tuple_hash *h;
        const struct nf_conntrack_zone *zone;
+       enum ip_conntrack_info ctinfo;
        struct nf_conntrack_zone tmp;
 
-       NF_CT_ASSERT(skb->nfct == NULL);
+       NF_CT_ASSERT(!skb_nfct(skb));
        zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
 
        /* Are they talking about one of our connections? */
@@ -160,7 +160,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
                return -NF_ACCEPT;
        }
 
-       *ctinfo = IP_CT_RELATED;
+       ctinfo = IP_CT_RELATED;
 
        h = nf_conntrack_find_get(net, zone, &innertuple);
        if (!h) {
@@ -169,11 +169,10 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
        }
 
        if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
-               *ctinfo += IP_CT_IS_REPLY;
+               ctinfo += IP_CT_IS_REPLY;
 
        /* Update skb to refer to this connection */
-       skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
-       skb->nfctinfo = *ctinfo;
+       nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo);
        return NF_ACCEPT;
 }
 
@@ -181,7 +180,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
 static int
 icmp_error(struct net *net, struct nf_conn *tmpl,
           struct sk_buff *skb, unsigned int dataoff,
-          enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
+          u8 pf, unsigned int hooknum)
 {
        const struct icmphdr *icmph;
        struct icmphdr _ih;
@@ -225,7 +224,7 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
            icmph->type != ICMP_REDIRECT)
                return NF_ACCEPT;
 
-       return icmp_error_message(net, tmpl, skb, ctinfo, hooknum);
+       return icmp_error_message(net, tmpl, skb, hooknum);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
index 49bd6a54404f17559d31722b90bb4687b6c4abc8..346bf7ccac0881bc86c7d019987281d9dc8db62e 100644 (file)
@@ -45,7 +45,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
 {
        u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-       if (skb->nfct) {
+       if (skb_nfct(skb)) {
                enum ip_conntrack_info ctinfo;
                const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
@@ -75,7 +75,7 @@ static unsigned int ipv4_conntrack_defrag(void *priv,
 #if !IS_ENABLED(CONFIG_NF_NAT)
        /* Previously seen (loopback)?  Ignore.  Do this before
           fragment check. */
-       if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
+       if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb)))
                return NF_ACCEPT;
 #endif
 #endif
index cf986e1c7bbd2e2383ba37cf54fae87fccfdc72c..f0dbff05fc28174de694ecad1d1adcde63c314ec 100644 (file)
@@ -68,10 +68,9 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
 
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
        /* Avoid counting cloned packets towards the original connection. */
-       nf_conntrack_put(skb->nfct);
-       skb->nfct     = &nf_ct_untracked_get()->ct_general;
-       skb->nfctinfo = IP_CT_NEW;
-       nf_conntrack_get(skb->nfct);
+       nf_reset(skb);
+       nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW);
+       nf_conntrack_get(skb_nfct(skb));
 #endif
        /*
         * If we are in PREROUTING/INPUT, decrease the TTL to mitigate potential
index b24795e2ee6d486c871149811b7916145345f6ea..f6f713376e6e429610b070ba65f685cbfd6ae6ae 100644 (file)
@@ -87,7 +87,7 @@ static void nf_log_arp_packet(struct net *net, u_int8_t pf,
        struct nf_log_buf *m;
 
        /* FIXME: Disabled from containers until syslog ns is supported */
-       if (!net_eq(net, &init_net))
+       if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns)
                return;
 
        m = nf_log_buf_open();
index 856648966f4c209b5d01b608afb1b25a069b8b54..c83a9963269bf689d7c4285c89ef1a4228bc739b 100644 (file)
@@ -319,7 +319,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,
        struct nf_log_buf *m;
 
        /* FIXME: Disabled from containers until syslog ns is supported */
-       if (!net_eq(net, &init_net))
+       if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns)
                return;
 
        m = nf_log_buf_open();
index 68d77b1f1495bb8dace1f6aa9c0e9a6ee5b2e5dd..2af6244b83e27ae384e96cf071c10c5a89674804 100644 (file)
@@ -433,9 +433,9 @@ int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                goto out;
        }
 
-       pr_debug("after bind(): num = %d, dif = %d\n",
-                (int)isk->inet_num,
-                (int)sk->sk_bound_dev_if);
+       pr_debug("after bind(): num = %hu, dif = %d\n",
+                isk->inet_num,
+                sk->sk_bound_dev_if);
 
        err = 0;
        if (sk->sk_family == AF_INET && isk->inet_rcv_saddr)
@@ -850,7 +850,8 @@ out:
        return err;
 
 do_confirm:
-       dst_confirm(&rt->dst);
+       if (msg->msg_flags & MSG_PROBE)
+               dst_confirm_neigh(&rt->dst, &fl4.daddr);
        if (!(msg->msg_flags & MSG_PROBE) || len)
                goto back_from_confirm;
        err = 0;
index 7143ca1a6af991cd7d1f35753577aae5422ccf10..69cf49e8356d0184f774840c9dc96560f2ae2f2b 100644 (file)
@@ -57,15 +57,13 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
        unsigned int frag_mem;
        int orphans, sockets;
 
-       local_bh_disable();
        orphans = percpu_counter_sum_positive(&tcp_orphan_count);
        sockets = proto_sockets_allocated_sum_positive(&tcp_prot);
-       local_bh_enable();
 
        socket_seq_show(seq);
        seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
                   sock_prot_inuse_get(net, &tcp_prot), orphans,
-                  atomic_read(&tcp_death_row.tw_count), sockets,
+                  atomic_read(&net->ipv4.tcp_death_row.tw_count), sockets,
                   proto_memory_allocated(&tcp_prot));
        seq_printf(seq, "UDP: inuse %d mem %ld\n",
                   sock_prot_inuse_get(net, &udp_prot),
@@ -264,6 +262,7 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPSackMerged", LINUX_MIB_SACKMERGED),
        SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK),
        SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP),
+       SNMP_MIB_ITEM("PFMemallocDrop", LINUX_MIB_PFMEMALLOCDROP),
        SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
        SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
        SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
index 4e49e5cb001ccd6824104af273bcbc40fc864ef7..8119e1f66e036ad2a8372bf24dd943c7d9631d8e 100644 (file)
@@ -383,6 +383,9 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
 
        sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
 
+       if (flags & MSG_CONFIRM)
+               skb_set_dst_pending_confirm(skb, 1);
+
        skb->transport_header = skb->network_header;
        err = -EFAULT;
        if (memcpy_from_msg(iph, msg, length))
@@ -666,7 +669,8 @@ out:
        return len;
 
 do_confirm:
-       dst_confirm(&rt->dst);
+       if (msg->msg_flags & MSG_PROBE)
+               dst_confirm_neigh(&rt->dst, &fl4.daddr);
        if (!(msg->msg_flags & MSG_PROBE) || len)
                goto back_from_confirm;
        err = 0;
index 709ffe67d1de1609be7d3e4a98d9314b01e5f265..cb494a5050f7a9d7cbfc7d961ee5ae0d66196be7 100644 (file)
@@ -154,6 +154,7 @@ static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
                                           struct sk_buff *skb,
                                           const void *daddr);
+static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
 
 static struct dst_ops ipv4_dst_ops = {
        .family =               AF_INET,
@@ -168,6 +169,7 @@ static struct dst_ops ipv4_dst_ops = {
        .redirect =             ip_do_redirect,
        .local_out =            __ip_local_out,
        .neigh_lookup =         ipv4_neigh_lookup,
+       .confirm_neigh =        ipv4_confirm_neigh,
 };
 
 #define ECN_OR_COST(class)     TC_PRIO_##class
@@ -461,6 +463,23 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
        return neigh_create(&arp_tbl, pkey, dev);
 }
 
+static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
+{
+       struct net_device *dev = dst->dev;
+       const __be32 *pkey = daddr;
+       const struct rtable *rt;
+
+       rt = (const struct rtable *)dst;
+       if (rt->rt_gateway)
+               pkey = (const __be32 *)&rt->rt_gateway;
+       else if (!daddr ||
+                (rt->rt_flags &
+                 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL)))
+               return;
+
+       __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
+}
+
 #define IP_IDENTS_SZ 2048u
 
 static atomic_t *ip_idents __read_mostly;
@@ -1758,7 +1777,6 @@ standard_hash:
 
 static int ip_mkroute_input(struct sk_buff *skb,
                            struct fib_result *res,
-                           const struct flowi4 *fl4,
                            struct in_device *in_dev,
                            __be32 daddr, __be32 saddr, u32 tos)
 {
@@ -1883,7 +1901,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        if (res.type != RTN_UNICAST)
                goto martian_destination;
 
-       err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
+       err = ip_mkroute_input(skb, &res, in_dev, daddr, saddr, tos);
 out:   return err;
 
 brd_input:
@@ -2454,7 +2472,7 @@ EXPORT_SYMBOL_GPL(ip_route_output_flow);
 
 static int rt_fill_info(struct net *net,  __be32 dst, __be32 src, u32 table_id,
                        struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
-                       u32 seq, int event, int nowait, unsigned int flags)
+                       u32 seq, int event)
 {
        struct rtable *rt = skb_rtable(skb);
        struct rtmsg *r;
@@ -2463,7 +2481,7 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src, u32 table_id,
        u32 error;
        u32 metrics[RTAX_MAX];
 
-       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
+       nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), 0);
        if (!nlh)
                return -EMSGSIZE;
 
@@ -2541,18 +2559,12 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src, u32 table_id,
                    IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
                        int err = ipmr_get_route(net, skb,
                                                 fl4->saddr, fl4->daddr,
-                                                r, nowait, portid);
+                                                r, portid);
 
                        if (err <= 0) {
-                               if (!nowait) {
-                                       if (err == 0)
-                                               return 0;
-                                       goto nla_put_failure;
-                               } else {
-                                       if (err == -EMSGSIZE)
-                                               goto nla_put_failure;
-                                       error = err;
-                               }
+                               if (err == 0)
+                                       return 0;
+                               goto nla_put_failure;
                        }
                } else
 #endif
@@ -2638,9 +2650,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
                skb->protocol   = htons(ETH_P_IP);
                skb->dev        = dev;
                skb->mark       = mark;
-               local_bh_disable();
                err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
-               local_bh_enable();
 
                rt = skb_rtable(skb);
                if (err == 0 && rt->dst.error)
@@ -2665,7 +2675,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
 
        err = rt_fill_info(net, dst, src, table_id, &fl4, skb,
                           NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
-                          RTM_NEWROUTE, 0, 0);
+                          RTM_NEWROUTE);
        if (err < 0)
                goto errout_free;
 
index 3e88467d70eec498e0a167474084c98c89069574..496b97e17aaf7ed2cf41cef303cb0696927f66ac 100644 (file)
 #include <linux/tcp.h>
 #include <linux/slab.h>
 #include <linux/random.h>
-#include <linux/cryptohash.h>
+#include <linux/siphash.h>
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <net/tcp.h>
 #include <net/route.h>
 
-static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly;
+static siphash_key_t syncookie_secret[2] __read_mostly;
 
 #define COOKIEBITS 24  /* Upper bits store count */
 #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
@@ -48,24 +48,13 @@ static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly;
 #define TSBITS 6
 #define TSMASK (((__u32)1 << TSBITS) - 1)
 
-static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], ipv4_cookie_scratch);
-
 static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
                       u32 count, int c)
 {
-       __u32 *tmp;
-
        net_get_random_once(syncookie_secret, sizeof(syncookie_secret));
-
-       tmp  = this_cpu_ptr(ipv4_cookie_scratch);
-       memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
-       tmp[0] = (__force u32)saddr;
-       tmp[1] = (__force u32)daddr;
-       tmp[2] = ((__force u32)sport << 16) + (__force u32)dport;
-       tmp[3] = count;
-       sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5);
-
-       return tmp[17];
+       return siphash_4u32((__force u32)saddr, (__force u32)daddr,
+                           (__force u32)sport << 16 | (__force u32)dport,
+                           count, &syncookie_secret[c]);
 }
 
 
index b2fa498b15d173739d0ebc5b6dd0577bf8dc4c08..d6880a6149ee80c6c75f4fe75b46a9d18d204d5d 100644 (file)
@@ -35,6 +35,8 @@ static int ip_local_port_range_min[] = { 1, 1 };
 static int ip_local_port_range_max[] = { 65535, 65535 };
 static int tcp_adv_win_scale_min = -31;
 static int tcp_adv_win_scale_max = 31;
+static int ip_privileged_port_min;
+static int ip_privileged_port_max = 65535;
 static int ip_ttl_min = 1;
 static int ip_ttl_max = 255;
 static int tcp_syn_retries_min = 1;
@@ -79,7 +81,12 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
        ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
 
        if (write && ret == 0) {
-               if (range[1] < range[0])
+               /* Ensure that the upper limit is not smaller than the lower,
+                * and that the lower does not encroach upon the privileged
+                * port limit.
+                */
+               if ((range[1] < range[0]) ||
+                   (range[0] < net->ipv4.sysctl_ip_prot_sock))
                        ret = -EINVAL;
                else
                        set_local_port_range(net, range);
@@ -88,6 +95,40 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
        return ret;
 }
 
+/* Validate changes from /proc interface. */
+static int ipv4_privileged_ports(struct ctl_table *table, int write,
+                               void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       struct net *net = container_of(table->data, struct net,
+           ipv4.sysctl_ip_prot_sock);
+       int ret;
+       int pports;
+       int range[2];
+       struct ctl_table tmp = {
+               .data = &pports,
+               .maxlen = sizeof(pports),
+               .mode = table->mode,
+               .extra1 = &ip_privileged_port_min,
+               .extra2 = &ip_privileged_port_max,
+       };
+
+       pports = net->ipv4.sysctl_ip_prot_sock;
+
+       ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+
+       if (write && ret == 0) {
+               inet_get_local_port_range(net, &range[0], &range[1]);
+               /* Ensure that the local port range doesn't overlap with the
+                * privileged port range.
+                */
+               if (range[0] < pports)
+                       ret = -EINVAL;
+               else
+                       net->ipv4.sysctl_ip_prot_sock = pports;
+       }
+
+       return ret;
+}
 
 static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
 {
@@ -289,13 +330,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "tcp_max_tw_buckets",
-               .data           = &tcp_death_row.sysctl_max_tw_buckets,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {
                .procname       = "tcp_fastopen",
                .data           = &sysctl_tcp_fastopen,
@@ -309,13 +343,6 @@ static struct ctl_table ipv4_table[] = {
                .maxlen         = ((TCP_FASTOPEN_KEY_LENGTH * 2) + 10),
                .proc_handler   = proc_tcp_fastopen_key,
        },
-       {
-               .procname       = "tcp_tw_recycle",
-               .data           = &tcp_death_row.sysctl_tw_recycle,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {
                .procname       = "tcp_abort_on_overflow",
                .data           = &sysctl_tcp_abort_on_overflow,
@@ -337,13 +364,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "tcp_max_syn_backlog",
-               .data           = &sysctl_max_syn_backlog,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {
                .procname       = "inet_peer_threshold",
                .data           = &inet_peer_threshold,
@@ -557,13 +577,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "tcp_thin_dupack",
-               .data           = &sysctl_tcp_thin_dupack,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {
                .procname       = "tcp_early_retrans",
                .data           = &sysctl_tcp_early_retrans,
@@ -960,6 +973,27 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "tcp_max_tw_buckets",
+               .data           = &init_net.ipv4.tcp_death_row.sysctl_max_tw_buckets,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "tcp_tw_recycle",
+               .data           = &init_net.ipv4.tcp_death_row.sysctl_tw_recycle,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "tcp_max_syn_backlog",
+               .data           = &init_net.ipv4.sysctl_max_syn_backlog,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        {
                .procname       = "fib_multipath_use_neigh",
@@ -970,6 +1004,24 @@ static struct ctl_table ipv4_net_table[] = {
                .extra1         = &zero,
                .extra2         = &one,
        },
+#endif
+       {
+               .procname       = "ip_unprivileged_port_start",
+               .maxlen         = sizeof(int),
+               .data           = &init_net.ipv4.sysctl_ip_prot_sock,
+               .mode           = 0644,
+               .proc_handler   = ipv4_privileged_ports,
+       },
+#ifdef CONFIG_NET_L3_MASTER_DEV
+       {
+               .procname       = "udp_l3mdev_accept",
+               .data           = &init_net.ipv4.sysctl_udp_l3mdev_accept,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one,
+       },
 #endif
        { }
 };
index 0efb4c7f6704f662b6c762e48698a41564add2a4..da385ae997a3d61f0217a2e585088a82e6d50cd3 100644 (file)
@@ -406,7 +406,6 @@ void tcp_init_sock(struct sock *sk)
        tp->mss_cache = TCP_MSS_DEFAULT;
 
        tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
-       tcp_enable_early_retrans(tp);
        tcp_assign_congestion_control(sk);
 
        tp->tsoffset = 0;
@@ -421,15 +420,13 @@ void tcp_init_sock(struct sock *sk)
        sk->sk_sndbuf = sysctl_tcp_wmem[1];
        sk->sk_rcvbuf = sysctl_tcp_rmem[1];
 
-       local_bh_disable();
        sk_sockets_allocated_inc(sk);
-       local_bh_enable();
 }
 EXPORT_SYMBOL(tcp_init_sock);
 
 static void tcp_tx_timestamp(struct sock *sk, u16 tsflags, struct sk_buff *skb)
 {
-       if (tsflags) {
+       if (tsflags && skb) {
                struct skb_shared_info *shinfo = skb_shinfo(skb);
                struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 
@@ -536,6 +533,12 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 
                if (tp->urg_data & TCP_URG_VALID)
                        mask |= POLLPRI;
+       } else if (sk->sk_state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
+               /* Active TCP fastopen socket with defer_connect
+                * Return POLLOUT so application can call write()
+                * in order for kernel to generate SYN+data
+                */
+               mask |= POLLOUT | POLLWRNORM;
        }
        /* This barrier is coupled with smp_wmb() in tcp_reset() */
        smp_rmb();
@@ -964,10 +967,8 @@ new_segment:
                copied += copy;
                offset += copy;
                size -= copy;
-               if (!size) {
-                       tcp_tx_timestamp(sk, sk->sk_tsflags, skb);
+               if (!size)
                        goto out;
-               }
 
                if (skb->len < size_goal || (flags & MSG_OOB))
                        continue;
@@ -993,8 +994,11 @@ wait_for_memory:
        }
 
 out:
-       if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
-               tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
+       if (copied) {
+               tcp_tx_timestamp(sk, sk->sk_tsflags, tcp_write_queue_tail(sk));
+               if (!(flags & MSG_SENDPAGE_NOTLAST))
+                       tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
+       }
        return copied;
 
 do_error:
@@ -1079,6 +1083,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
                                int *copied, size_t size)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       struct inet_sock *inet = inet_sk(sk);
        int err, flags;
 
        if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
@@ -1093,9 +1098,19 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
        tp->fastopen_req->data = msg;
        tp->fastopen_req->size = size;
 
+       if (inet->defer_connect) {
+               err = tcp_connect(sk);
+               /* Same failure procedure as in tcp_v4/6_connect */
+               if (err) {
+                       tcp_set_state(sk, TCP_CLOSE);
+                       inet->inet_dport = 0;
+                       sk->sk_route_caps = 0;
+               }
+       }
        flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
        err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
-                                   msg->msg_namelen, flags);
+                                   msg->msg_namelen, flags, 1);
+       inet->defer_connect = 0;
        *copied = tp->fastopen_req->copied;
        tcp_free_fastopen_req(tp);
        return err;
@@ -1115,7 +1130,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        lock_sock(sk);
 
        flags = msg->msg_flags;
-       if (flags & MSG_FASTOPEN) {
+       if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
                err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
                if (err == -EINPROGRESS && copied_syn > 0)
                        goto out;
@@ -1273,7 +1288,7 @@ new_segment:
                        } else {
                                skb_fill_page_desc(skb, i, pfrag->page,
                                                   pfrag->offset, copy);
-                               get_page(pfrag->page);
+                               page_ref_inc(pfrag->page);
                        }
                        pfrag->offset += copy;
                }
@@ -1287,7 +1302,6 @@ new_segment:
 
                copied += copy;
                if (!msg_data_left(msg)) {
-                       tcp_tx_timestamp(sk, sockc.tsflags, skb);
                        if (unlikely(flags & MSG_EOR))
                                TCP_SKB_CB(skb)->eor = 1;
                        goto out;
@@ -1318,8 +1332,10 @@ wait_for_memory:
        }
 
 out:
-       if (copied)
+       if (copied) {
+               tcp_tx_timestamp(sk, sockc.tsflags, tcp_write_queue_tail(sk));
                tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
+       }
 out_nopush:
        release_sock(sk);
        return copied + copied_syn;
@@ -2479,11 +2495,6 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
        case TCP_THIN_DUPACK:
                if (val < 0 || val > 1)
                        err = -EINVAL;
-               else {
-                       tp->thin_dupack = val;
-                       if (tp->thin_dupack)
-                               tcp_disable_early_retrans(tp);
-               }
                break;
 
        case TCP_REPAIR:
@@ -2668,6 +2679,18 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                        err = -EINVAL;
                }
                break;
+       case TCP_FASTOPEN_CONNECT:
+               if (val > 1 || val < 0) {
+                       err = -EINVAL;
+               } else if (sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) {
+                       if (sk->sk_state == TCP_CLOSE)
+                               tp->fastopen_connect = val;
+                       else
+                               err = -EINVAL;
+               } else {
+                       err = -EOPNOTSUPP;
+               }
+               break;
        case TCP_TIMESTAMP:
                if (!tp->repair)
                        err = -EPERM;
@@ -2770,6 +2793,9 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
                info->tcpi_sacked = sk->sk_max_ack_backlog;
                return;
        }
+
+       slow = lock_sock_fast(sk);
+
        info->tcpi_ca_state = icsk->icsk_ca_state;
        info->tcpi_retransmits = icsk->icsk_retransmits;
        info->tcpi_probes = icsk->icsk_probes_out;
@@ -2820,15 +2846,11 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
 
        info->tcpi_total_retrans = tp->total_retrans;
 
-       slow = lock_sock_fast(sk);
-
        info->tcpi_bytes_acked = tp->bytes_acked;
        info->tcpi_bytes_received = tp->bytes_received;
        info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt);
        tcp_get_info_chrono_stats(tp, info);
 
-       unlock_sock_fast(sk, slow);
-
        info->tcpi_segs_out = tp->segs_out;
        info->tcpi_segs_in = tp->segs_in;
 
@@ -2844,6 +2866,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
                do_div(rate64, intv);
                info->tcpi_delivery_rate = rate64;
        }
+       unlock_sock_fast(sk, slow);
 }
 EXPORT_SYMBOL_GPL(tcp_get_info);
 
@@ -2853,7 +2876,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
        struct sk_buff *stats;
        struct tcp_info info;
 
-       stats = alloc_skb(3 * nla_total_size_64bit(sizeof(u64)), GFP_ATOMIC);
+       stats = alloc_skb(5 * nla_total_size_64bit(sizeof(u64)), GFP_ATOMIC);
        if (!stats)
                return NULL;
 
@@ -2864,6 +2887,10 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
                          info.tcpi_rwnd_limited, TCP_NLA_PAD);
        nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED,
                          info.tcpi_sndbuf_limited, TCP_NLA_PAD);
+       nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT,
+                         tp->data_segs_out, TCP_NLA_PAD);
+       nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS,
+                         tp->total_retrans, TCP_NLA_PAD);
        return stats;
 }
 
@@ -2973,8 +3000,9 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
        case TCP_THIN_LINEAR_TIMEOUTS:
                val = tp->thin_lto;
                break;
+
        case TCP_THIN_DUPACK:
-               val = tp->thin_dupack;
+               val = 0;
                break;
 
        case TCP_REPAIR:
@@ -3027,6 +3055,10 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                val = icsk->icsk_accept_queue.fastopenq.max_qlen;
                break;
 
+       case TCP_FASTOPEN_CONNECT:
+               val = tp->fastopen_connect;
+               break;
+
        case TCP_TIMESTAMP:
                val = tcp_time_stamp + tp->tsoffset;
                break;
@@ -3340,6 +3372,7 @@ void __init tcp_init(void)
 
        percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
        percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
+       inet_hashinfo_init(&tcp_hashinfo);
        tcp_hashinfo.bind_bucket_cachep =
                kmem_cache_create("tcp_bind_bucket",
                                  sizeof(struct inet_bind_bucket), 0,
@@ -3383,10 +3416,7 @@ void __init tcp_init(void)
 
 
        cnt = tcp_hashinfo.ehash_mask + 1;
-
-       tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
        sysctl_tcp_max_orphans = cnt / 2;
-       sysctl_max_syn_backlog = max(128, cnt / 256);
 
        tcp_init_mem();
        /* Set per-socket limits to no more than 1/128 the pressure threshold */
@@ -3405,6 +3435,7 @@ void __init tcp_init(void)
        pr_info("Hash tables configured (established %u bind %u)\n",
                tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
 
+       tcp_v4_init();
        tcp_metrics_init();
        BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
        tcp_tasklet_init();
index dd2560c83a8592359af70919bddb4a628630b0b3..8ea4e9787f82ba65cd07b4c2b663df76fe4eb143 100644 (file)
@@ -326,3 +326,57 @@ fastopen:
        *foc = valid_foc;
        return NULL;
 }
+
+bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
+                              struct tcp_fastopen_cookie *cookie)
+{
+       unsigned long last_syn_loss = 0;
+       int syn_loss = 0;
+
+       tcp_fastopen_cache_get(sk, mss, cookie, &syn_loss, &last_syn_loss);
+
+       /* Recurring FO SYN losses: no cookie or data in SYN */
+       if (syn_loss > 1 &&
+           time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
+               cookie->len = -1;
+               return false;
+       }
+       if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE) {
+               cookie->len = -1;
+               return true;
+       }
+       return cookie->len > 0;
+}
+
+/* This function checks if we want to defer sending SYN until the first
+ * write().  We defer under the following conditions:
+ * 1. fastopen_connect sockopt is set
+ * 2. we have a valid cookie
+ * Return value: return true if we want to defer until application writes data
+ *               return false if we want to send out SYN immediately
+ */
+bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
+{
+       struct tcp_fastopen_cookie cookie = { .len = 0 };
+       struct tcp_sock *tp = tcp_sk(sk);
+       u16 mss;
+
+       if (tp->fastopen_connect && !tp->fastopen_req) {
+               if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
+                       inet_sk(sk)->defer_connect = 1;
+                       return true;
+               }
+
+               /* Alloc fastopen_req in order for FO option to be included
+                * in SYN
+                */
+               tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
+                                          sk->sk_allocation);
+               if (tp->fastopen_req)
+                       tp->fastopen_req->cookie = cookie;
+               else
+                       *err = -ENOBUFS;
+       }
+       return false;
+}
+EXPORT_SYMBOL(tcp_fastopen_defer_connect);
index 41dcbd568cbe2403f2a9e659669afe462a42e228..2c0ff327b6dfe6919f22bf52687816e19c2c0444 100644 (file)
@@ -79,7 +79,7 @@
 int sysctl_tcp_timestamps __read_mostly = 1;
 int sysctl_tcp_window_scaling __read_mostly = 1;
 int sysctl_tcp_sack __read_mostly = 1;
-int sysctl_tcp_fack __read_mostly = 1;
+int sysctl_tcp_fack __read_mostly;
 int sysctl_tcp_max_reordering __read_mostly = 300;
 int sysctl_tcp_dsack __read_mostly = 1;
 int sysctl_tcp_app_win __read_mostly = 31;
@@ -95,9 +95,6 @@ int sysctl_tcp_rfc1337 __read_mostly;
 int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
 int sysctl_tcp_frto __read_mostly = 2;
 int sysctl_tcp_min_rtt_wlen __read_mostly = 300;
-
-int sysctl_tcp_thin_dupack __read_mostly;
-
 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
 int sysctl_tcp_early_retrans __read_mostly = 3;
 int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
@@ -904,8 +901,6 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
                tcp_disable_fack(tp);
        }
 
-       if (metric > 0)
-               tcp_disable_early_retrans(tp);
        tp->rack.reord = 1;
 }
 
@@ -916,10 +911,6 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
            before(TCP_SKB_CB(skb)->seq,
                   TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
                tp->retransmit_skb_hint = skb;
-
-       if (!tp->lost_out ||
-           after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high))
-               tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
 }
 
 /* Sum the number of packets on the wire we have marked as lost.
@@ -1135,6 +1126,7 @@ struct tcp_sacktag_state {
         */
        struct skb_mstamp first_sackt;
        struct skb_mstamp last_sackt;
+       struct skb_mstamp ack_time; /* Timestamp when the S/ACK was received */
        struct rate_sample *rate;
        int     flag;
 };
@@ -1217,7 +1209,8 @@ static u8 tcp_sacktag_one(struct sock *sk,
                return sacked;
 
        if (!(sacked & TCPCB_SACKED_ACKED)) {
-               tcp_rack_advance(tp, xmit_time, sacked);
+               tcp_rack_advance(tp, sacked, end_seq,
+                                xmit_time, &state->ack_time);
 
                if (sacked & TCPCB_SACKED_RETRANS) {
                        /* If the segment is not tagged as lost,
@@ -1937,7 +1930,6 @@ void tcp_enter_loss(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct net *net = sock_net(sk);
        struct sk_buff *skb;
-       bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
        bool is_reneg;                  /* is receiver reneging on SACKs? */
        bool mark_lost;
 
@@ -1982,7 +1974,6 @@ void tcp_enter_loss(struct sock *sk)
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
                        TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
                        tp->lost_out += tcp_skb_pcount(skb);
-                       tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
                }
        }
        tcp_verify_left_out(tp);
@@ -1998,13 +1989,15 @@ void tcp_enter_loss(struct sock *sk)
        tp->high_seq = tp->snd_nxt;
        tcp_ecn_queue_cwr(tp);
 
-       /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
-        * loss recovery is underway except recurring timeout(s) on
-        * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
+       /* F-RTO RFC5682 sec 3.1 step 1 mandates to disable F-RTO
+        * if a previous recovery is underway, otherwise it may incorrectly
+        * call a timeout spurious if some previously retransmitted packets
+        * are s/acked (sec 3.2). We do not apply that retriction since
+        * retransmitted skbs are permanently tagged with TCPCB_EVER_RETRANS
+        * so FLAG_ORIG_SACK_ACKED is always correct. But we do disable F-RTO
+        * on PTMU discovery to avoid sending new data.
         */
-       tp->frto = sysctl_tcp_frto &&
-                  (new_recovery || icsk->icsk_retransmits) &&
-                  !inet_csk(sk)->icsk_mtup.probe_size;
+       tp->frto = sysctl_tcp_frto && !inet_csk(sk)->icsk_mtup.probe_size;
 }
 
 /* If ACK arrived pointing to a remembered SACK, it means that our
@@ -2056,30 +2049,6 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
        return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
 }
 
-static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       unsigned long delay;
-
-       /* Delay early retransmit and entering fast recovery for
-        * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples
-        * available, or RTO is scheduled to fire first.
-        */
-       if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 ||
-           (flag & FLAG_ECE) || !tp->srtt_us)
-               return false;
-
-       delay = max(usecs_to_jiffies(tp->srtt_us >> 5),
-                   msecs_to_jiffies(2));
-
-       if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay)))
-               return false;
-
-       inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay,
-                                 TCP_RTO_MAX);
-       return true;
-}
-
 /* Linux NewReno/SACK/FACK/ECN state machine.
  * --------------------------------------
  *
@@ -2127,10 +2096,26 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
  *     F.e. after RTO, when all the queue is considered as lost,
  *     lost_out = packets_out and in_flight = retrans_out.
  *
- *             Essentially, we have now two algorithms counting
+ *             Essentially, we have now a few algorithms detecting
  *             lost packets.
  *
- *             FACK: It is the simplest heuristics. As soon as we decided
+ *             If the receiver supports SACK:
+ *
+ *             RFC6675/3517: It is the conventional algorithm. A packet is
+ *             considered lost if the number of higher sequence packets
+ *             SACKed is greater than or equal the DUPACK thoreshold
+ *             (reordering). This is implemented in tcp_mark_head_lost and
+ *             tcp_update_scoreboard.
+ *
+ *             RACK (draft-ietf-tcpm-rack-01): it is a newer algorithm
+ *             (2017-) that checks timing instead of counting DUPACKs.
+ *             Essentially a packet is considered lost if it's not S/ACKed
+ *             after RTT + reordering_window, where both metrics are
+ *             dynamically measured and adjusted. This is implemented in
+ *             tcp_rack_mark_lost.
+ *
+ *             FACK (Disabled by default. Subsumbed by RACK):
+ *             It is the simplest heuristics. As soon as we decided
  *             that something is lost, we decide that _all_ not SACKed
  *             packets until the most forward SACK are lost. I.e.
  *             lost_out = fackets_out - sacked_out and left_out = fackets_out.
@@ -2139,16 +2124,14 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
  *             takes place. We use FACK by default until reordering
  *             is suspected on the path to this destination.
  *
- *             NewReno: when Recovery is entered, we assume that one segment
+ *             If the receiver does not support SACK:
+ *
+ *             NewReno (RFC6582): in Recovery we assume that one segment
  *             is lost (classic Reno). While we are in Recovery and
  *             a partial ACK arrives, we assume that one more packet
  *             is lost (NewReno). This heuristics are the same in NewReno
  *             and SACK.
  *
- *  Imagine, that's all! Forget about all this shamanism about CWND inflation
- *  deflation etc. CWND is real congestion window, never inflated, changes
- *  only according to classic VJ rules.
- *
  * Really tricky (and requiring careful tuning) part of algorithm
  * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue().
  * The first determines the moment _when_ we should reduce CWND and,
@@ -2176,8 +2159,6 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
 static bool tcp_time_to_recover(struct sock *sk, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       __u32 packets_out;
-       int tcp_reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
 
        /* Trick#1: The loss is proven. */
        if (tp->lost_out)
@@ -2187,39 +2168,6 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
        if (tcp_dupack_heuristics(tp) > tp->reordering)
                return true;
 
-       /* Trick#4: It is still not OK... But will it be useful to delay
-        * recovery more?
-        */
-       packets_out = tp->packets_out;
-       if (packets_out <= tp->reordering &&
-           tp->sacked_out >= max_t(__u32, packets_out/2, tcp_reordering) &&
-           !tcp_may_send_now(sk)) {
-               /* We have nothing to send. This connection is limited
-                * either by receiver window or by application.
-                */
-               return true;
-       }
-
-       /* If a thin stream is detected, retransmit after first
-        * received dupack. Employ only if SACK is supported in order
-        * to avoid possible corner-case series of spurious retransmissions
-        * Use only if there are no unsent data.
-        */
-       if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
-           tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
-           tcp_is_sack(tp) && !tcp_send_head(sk))
-               return true;
-
-       /* Trick#6: TCP early retransmit, per RFC5827.  To avoid spurious
-        * retransmissions due to small network reorderings, we implement
-        * Mitigation A.3 in the RFC and delay the retransmission for a short
-        * interval if appropriate.
-        */
-       if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out &&
-           (tp->packets_out >= (tp->sacked_out + 1) && tp->packets_out < 4) &&
-           !tcp_may_send_now(sk))
-               return !tcp_pause_early_retransmit(sk, flag);
-
        return false;
 }
 
@@ -2521,8 +2469,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
        tcp_ecn_queue_cwr(tp);
 }
 
-static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked,
-                              int flag)
+void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int sndcnt = 0;
@@ -2690,7 +2637,7 @@ void tcp_simple_retransmit(struct sock *sk)
 }
 EXPORT_SYMBOL(tcp_simple_retransmit);
 
-static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
+void tcp_enter_recovery(struct sock *sk, bool ece_ack)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int mib_idx;
@@ -2726,14 +2673,18 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
            tcp_try_undo_loss(sk, false))
                return;
 
-       if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
-               /* Step 3.b. A timeout is spurious if not all data are
-                * lost, i.e., never-retransmitted data are (s)acked.
-                */
-               if ((flag & FLAG_ORIG_SACK_ACKED) &&
-                   tcp_try_undo_loss(sk, true))
-                       return;
+       /* The ACK (s)acks some never-retransmitted data meaning not all
+        * the data packets before the timeout were lost. Therefore we
+        * undo the congestion window and state. This is essentially
+        * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since
+        * a retransmitted skb is permantly marked, we can apply such an
+        * operation even if F-RTO was not used.
+        */
+       if ((flag & FLAG_ORIG_SACK_ACKED) &&
+           tcp_try_undo_loss(sk, tp->undo_marker))
+               return;
 
+       if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
                if (after(tp->snd_nxt, tp->high_seq)) {
                        if (flag & FLAG_DATA_SACKED || is_dupack)
                                tp->frto = 0; /* Step 3.a. loss was real */
@@ -2800,6 +2751,21 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked)
        return false;
 }
 
+static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag,
+                                  const struct skb_mstamp *ack_time)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       /* Use RACK to detect loss */
+       if (sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION) {
+               u32 prior_retrans = tp->retrans_out;
+
+               tcp_rack_mark_lost(sk, ack_time);
+               if (prior_retrans > tp->retrans_out)
+                       *ack_flag |= FLAG_LOST_RETRANS;
+       }
+}
+
 /* Process an event, which can update packets-in-flight not trivially.
  * Main goal of this function is to calculate new estimate for left_out,
  * taking into account both packets sitting in receiver's buffer and
@@ -2813,7 +2779,8 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked)
  * tcp_xmit_retransmit_queue().
  */
 static void tcp_fastretrans_alert(struct sock *sk, const int acked,
-                                 bool is_dupack, int *ack_flag, int *rexmit)
+                                 bool is_dupack, int *ack_flag, int *rexmit,
+                                 const struct skb_mstamp *ack_time)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
@@ -2864,13 +2831,6 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                }
        }
 
-       /* Use RACK to detect loss */
-       if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS &&
-           tcp_rack_mark_lost(sk)) {
-               flag |= FLAG_LOST_RETRANS;
-               *ack_flag |= FLAG_LOST_RETRANS;
-       }
-
        /* E. Process state. */
        switch (icsk->icsk_ca_state) {
        case TCP_CA_Recovery:
@@ -2888,11 +2848,13 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                        tcp_try_keep_open(sk);
                        return;
                }
+               tcp_rack_identify_loss(sk, ack_flag, ack_time);
                break;
        case TCP_CA_Loss:
                tcp_process_loss(sk, flag, is_dupack, rexmit);
-               if (icsk->icsk_ca_state != TCP_CA_Open &&
-                   !(flag & FLAG_LOST_RETRANS))
+               tcp_rack_identify_loss(sk, ack_flag, ack_time);
+               if (!(icsk->icsk_ca_state == TCP_CA_Open ||
+                     (*ack_flag & FLAG_LOST_RETRANS)))
                        return;
                /* Change state if cwnd is undone or retransmits are lost */
        default:
@@ -2906,6 +2868,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                if (icsk->icsk_ca_state <= TCP_CA_Disorder)
                        tcp_try_undo_dsack(sk);
 
+               tcp_rack_identify_loss(sk, ack_flag, ack_time);
                if (!tcp_time_to_recover(sk, flag)) {
                        tcp_try_to_open(sk, flag);
                        return;
@@ -3024,7 +2987,7 @@ void tcp_rearm_rto(struct sock *sk)
        } else {
                u32 rto = inet_csk(sk)->icsk_rto;
                /* Offset the time elapsed after installing regular RTO */
-               if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+               if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
                    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
                        struct sk_buff *skb = tcp_write_queue_head(sk);
                        const u32 rto_time_stamp =
@@ -3041,24 +3004,6 @@ void tcp_rearm_rto(struct sock *sk)
        }
 }
 
-/* This function is called when the delayed ER timer fires. TCP enters
- * fast recovery and performs fast-retransmit.
- */
-void tcp_resume_early_retransmit(struct sock *sk)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-
-       tcp_rearm_rto(sk);
-
-       /* Stop if ER is disabled after the delayed ER timer is scheduled */
-       if (!tp->do_early_retrans)
-               return;
-
-       tcp_enter_recovery(sk, false);
-       tcp_update_scoreboard(sk, 1);
-       tcp_xmit_retransmit_queue(sk);
-}
-
 /* If we get here, the whole TSO packet has not been acked. */
 static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
 {
@@ -3101,11 +3046,11 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
  */
 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                               u32 prior_snd_una, int *acked,
-                              struct tcp_sacktag_state *sack,
-                              struct skb_mstamp *now)
+                              struct tcp_sacktag_state *sack)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct skb_mstamp first_ackt, last_ackt;
+       struct skb_mstamp *now = &sack->ack_time;
        struct tcp_sock *tp = tcp_sk(sk);
        u32 prior_sacked = tp->sacked_out;
        u32 reord = tp->packets_out;
@@ -3165,7 +3110,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                } else if (tcp_is_sack(tp)) {
                        tp->delivered += acked_pcount;
                        if (!tcp_skb_spurious_retrans(tp, skb))
-                               tcp_rack_advance(tp, &skb->skb_mstamp, sacked);
+                               tcp_rack_advance(tp, sacked, scb->end_seq,
+                                                &skb->skb_mstamp,
+                                                &sack->ack_time);
                }
                if (sacked & TCPCB_LOST)
                        tp->lost_out -= acked_pcount;
@@ -3595,7 +3542,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        u32 lost = tp->lost;
        int acked = 0; /* Number of packets newly acked */
        int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
-       struct skb_mstamp now;
 
        sack_state.first_sackt.v64 = 0;
        sack_state.rate = &rs;
@@ -3621,10 +3567,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        if (after(ack, tp->snd_nxt))
                goto invalid_ack;
 
-       skb_mstamp_get(&now);
+       skb_mstamp_get(&sack_state.ack_time);
 
-       if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
-           icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
+       if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
                tcp_rearm_rto(sk);
 
        if (after(ack, prior_snd_una)) {
@@ -3689,34 +3634,34 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
        /* See if we can take anything off of the retransmit queue. */
        flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
-                                   &sack_state, &now);
+                                   &sack_state);
 
        if (tcp_ack_is_dubious(sk, flag)) {
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
-               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
+               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit,
+                                     &sack_state.ack_time);
        }
        if (tp->tlp_high_seq)
                tcp_process_tlp_ack(sk, ack, flag);
 
-       if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
-               struct dst_entry *dst = __sk_dst_get(sk);
-               if (dst)
-                       dst_confirm(dst);
-       }
+       if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
+               sk_dst_confirm(sk);
 
        if (icsk->icsk_pending == ICSK_TIME_RETRANS)
                tcp_schedule_loss_probe(sk);
        delivered = tp->delivered - delivered;  /* freshly ACKed or SACKed */
        lost = tp->lost - lost;                 /* freshly marked lost */
-       tcp_rate_gen(sk, delivered, lost, &now, &rs);
-       tcp_cong_control(sk, ack, delivered, flag, &rs);
+       tcp_rate_gen(sk, delivered, lost, &sack_state.ack_time,
+                    sack_state.rate);
+       tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
        tcp_xmit_recovery(sk, rexmit);
        return 1;
 
 no_queue:
        /* If data was DSACKed, see if we can undo a cwnd reduction. */
        if (flag & FLAG_DSACKING_ACK)
-               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
+               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit,
+                                     &sack_state.ack_time);
        /* If this ack opens up a zero window, clear backoff.  It was
         * being used to time the probes, and is probably far higher than
         * it needs to be for normal retransmission.
@@ -3737,9 +3682,11 @@ old_ack:
         * If data was DSACKed, see if we can undo a cwnd reduction.
         */
        if (TCP_SKB_CB(skb)->sacked) {
+               skb_mstamp_get(&sack_state.ack_time);
                flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
                                                &sack_state);
-               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
+               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit,
+                                     &sack_state.ack_time);
                tcp_xmit_recovery(sk, rexmit);
        }
 
@@ -4557,6 +4504,7 @@ add_sack:
 end:
        if (skb) {
                tcp_grow_window(sk, skb);
+               skb_condense(skb);
                skb_set_owner_r(skb, sk);
        }
 }
@@ -5249,6 +5197,23 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
        return err;
 }
 
+/* Accept RST for rcv_nxt - 1 after a FIN.
+ * When tcp connections are abruptly terminated from Mac OSX (via ^C), a
+ * FIN is sent followed by a RST packet. The RST is sent with the same
+ * sequence number as the FIN, and thus according to RFC 5961 a challenge
+ * ACK should be sent. However, Mac OSX rate limits replies to challenge
+ * ACKs on the closed socket. In addition middleboxes can drop either the
+ * challenge ACK or a subsequent RST.
+ */
+static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) &&
+                       (1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK |
+                                              TCPF_CLOSING));
+}
+
 /* Does PAWS and seqno based validation of an incoming segment, flags will
  * play significant role here.
  */
@@ -5287,20 +5252,25 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
                                                  LINUX_MIB_TCPACKSKIPPEDSEQ,
                                                  &tp->last_oow_ack_time))
                                tcp_send_dupack(sk, skb);
+               } else if (tcp_reset_check(sk, skb)) {
+                       tcp_reset(sk);
                }
                goto discard;
        }
 
        /* Step 2: check RST bit */
        if (th->rst) {
-               /* RFC 5961 3.2 (extend to match against SACK too if available):
-                * If seq num matches RCV.NXT or the right-most SACK block,
+               /* RFC 5961 3.2 (extend to match against (RCV.NXT - 1) after a
+                * FIN and SACK too if available):
+                * If seq num matches RCV.NXT or (RCV.NXT - 1) after a FIN, or
+                * the right-most SACK block,
                 * then
                 *     RESET the connection
                 * else
                 *     Send a challenge ACK
                 */
-               if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
+               if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt ||
+                   tcp_reset_check(sk, skb)) {
                        rst_seq_match = true;
                } else if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) {
                        struct tcp_sack_block *sp = &tp->selective_acks[0];
@@ -6022,7 +5992,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
                break;
 
        case TCP_FIN_WAIT1: {
-               struct dst_entry *dst;
                int tmo;
 
                /* If we enter the TCP_FIN_WAIT1 state and we are a
@@ -6049,9 +6018,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
                tcp_set_state(sk, TCP_FIN_WAIT2);
                sk->sk_shutdown |= SEND_SHUTDOWN;
 
-               dst = __sk_dst_get(sk);
-               if (dst)
-                       dst_confirm(dst);
+               sk_dst_confirm(sk);
 
                if (!sock_flag(sk, SOCK_DEAD)) {
                        /* Wake up lingering close() */
@@ -6363,7 +6330,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                 * timewait bucket, so that all the necessary checks
                 * are made in the function processing timewait state.
                 */
-               if (tcp_death_row.sysctl_tw_recycle) {
+               if (net->ipv4.tcp_death_row.sysctl_tw_recycle) {
                        bool strict;
 
                        dst = af_ops->route_req(sk, &fl, req, &strict);
@@ -6377,8 +6344,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                }
                /* Kill the following clause, if you dislike this way. */
                else if (!net->ipv4.sysctl_tcp_syncookies &&
-                        (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
-                         (sysctl_max_syn_backlog >> 2)) &&
+                        (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
+                         (net->ipv4.sysctl_max_syn_backlog >> 2)) &&
                         !tcp_peer_is_proven(req, dst, false,
                                             tmp_opt.saw_tstamp)) {
                        /* Without syncookies last quarter of
index fe9da4fb96bf967ab27f5179fc5a9ac3576cc693..8c124d4ef4b72eb18df3fb2515ee141df9a521f3 100644 (file)
@@ -146,6 +146,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        struct rtable *rt;
        int err;
        struct ip_options_rcu *inet_opt;
+       struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
 
        if (addr_len < sizeof(struct sockaddr_in))
                return -EINVAL;
@@ -196,7 +197,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                        tp->write_seq      = 0;
        }
 
-       if (tcp_death_row.sysctl_tw_recycle &&
+       if (tcp_death_row->sysctl_tw_recycle &&
            !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
                tcp_fetch_timewait_stamp(sk, &rt->dst);
 
@@ -215,7 +216,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
         * complete initialization after this.
         */
        tcp_set_state(sk, TCP_SYN_SENT);
-       err = inet_hash_connect(&tcp_death_row, sk);
+       err = inet_hash_connect(tcp_death_row, sk);
        if (err)
                goto failure;
 
@@ -231,6 +232,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        /* OK, now commit destination to socket.  */
        sk->sk_gso_type = SKB_GSO_TCPV4;
        sk_setup_caps(sk, &rt->dst);
+       rt = NULL;
 
        if (!tp->write_seq && likely(!tp->repair))
                tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
@@ -241,9 +243,13 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 
        inet->inet_id = tp->write_seq ^ jiffies;
 
+       if (tcp_fastopen_defer_connect(sk, &err))
+               return err;
+       if (err)
+               goto failure;
+
        err = tcp_connect(sk);
 
-       rt = NULL;
        if (err)
                goto failure;
 
@@ -1318,10 +1324,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
        tcp_ca_openreq_child(newsk, dst);
 
        tcp_sync_mss(newsk, dst_mtu(dst));
-       newtp->advmss = dst_metric_advmss(dst);
-       if (tcp_sk(sk)->rx_opt.user_mss &&
-           tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
-               newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
+       newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
 
        tcp_initialize_rcv_mss(newsk);
 
@@ -1555,8 +1558,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
         * It has been noticed pure SACK packets were sometimes dropped
         * (if cooked by drivers without copybreak feature).
         */
-       if (!skb->data_len)
-               skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
+       skb_condense(skb);
 
        if (unlikely(sk_add_backlog(sk, skb, limit))) {
                bh_unlock_sock(sk);
@@ -1816,7 +1818,6 @@ const struct inet_connection_sock_af_ops ipv4_specific = {
        .getsockopt        = ip_getsockopt,
        .addr2sockaddr     = inet_csk_addr2sockaddr,
        .sockaddr_len      = sizeof(struct sockaddr_in),
-       .bind_conflict     = inet_csk_bind_conflict,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_ip_setsockopt,
        .compat_getsockopt = compat_ip_getsockopt,
@@ -1887,9 +1888,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
        tcp_free_fastopen_req(tp);
        tcp_saved_syn_free(tp);
 
-       local_bh_disable();
        sk_sockets_allocated_dec(sk);
-       local_bh_enable();
 }
 EXPORT_SYMBOL(tcp_v4_destroy_sock);
 
@@ -2228,7 +2227,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
        int state;
 
        if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
-           icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+           icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
            icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
                timer_active    = 1;
                timer_expires   = icsk->icsk_timeout;
@@ -2375,6 +2374,7 @@ struct proto tcp_prot = {
        .shutdown               = tcp_shutdown,
        .setsockopt             = tcp_setsockopt,
        .getsockopt             = tcp_getsockopt,
+       .keepalive              = tcp_set_keepalive,
        .recvmsg                = tcp_recvmsg,
        .sendmsg                = tcp_sendmsg,
        .sendpage               = tcp_sendpage,
@@ -2418,7 +2418,7 @@ static void __net_exit tcp_sk_exit(struct net *net)
 
 static int __net_init tcp_sk_init(struct net *net)
 {
-       int res, cpu;
+       int res, cpu, cnt;
 
        net->ipv4.tcp_sk = alloc_percpu(struct sock *);
        if (!net->ipv4.tcp_sk)
@@ -2457,6 +2457,13 @@ static int __net_init tcp_sk_init(struct net *net)
        net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
        net->ipv4.sysctl_tcp_tw_reuse = 0;
 
+       cnt = tcp_hashinfo.ehash_mask + 1;
+       net->ipv4.tcp_death_row.sysctl_tw_recycle = 0;
+       net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
+       net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
+
+       net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
+
        return 0;
 fail:
        tcp_sk_exit(net);
@@ -2466,7 +2473,7 @@ fail:
 
 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
 {
-       inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
+       inet_twsk_purge(&tcp_hashinfo, AF_INET);
 }
 
 static struct pernet_operations __net_initdata tcp_sk_ops = {
@@ -2477,7 +2484,6 @@ static struct pernet_operations __net_initdata tcp_sk_ops = {
 
 void __init tcp_v4_init(void)
 {
-       inet_hashinfo_init(&tcp_hashinfo);
        if (register_pernet_subsys(&tcp_sk_ops))
                panic("Failed to create the TCP control socket.\n");
 }
index ba8f02d0f283c6eaaf14ed89103adea135093353..0f46e5fe31ad1b6809ada1f70bce7b63df4f8c9c 100644 (file)
@@ -375,12 +375,10 @@ void tcp_update_metrics(struct sock *sk)
        u32 val;
        int m;
 
+       sk_dst_confirm(sk);
        if (sysctl_tcp_nometrics_save || !dst)
                return;
 
-       if (dst->flags & DST_HOST)
-               dst_confirm(dst);
-
        rcu_read_lock();
        if (icsk->icsk_backoff || !tp->srtt_us) {
                /* This session failed to estimate rtt. Why?
@@ -493,11 +491,10 @@ void tcp_init_metrics(struct sock *sk)
        struct tcp_metrics_block *tm;
        u32 val, crtt = 0; /* cached RTT scaled by 8 */
 
+       sk_dst_confirm(sk);
        if (!dst)
                goto reset;
 
-       dst_confirm(dst);
-
        rcu_read_lock();
        tm = tcp_get_metrics(sk, dst, true);
        if (!tm) {
@@ -522,7 +519,6 @@ void tcp_init_metrics(struct sock *sk)
        val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
        if (val && tp->reordering != val) {
                tcp_disable_fack(tp);
-               tcp_disable_early_retrans(tp);
                tp->reordering = val;
        }
 
index 28ce5ee831f59d0a66d49b27c766b396b3e62ff9..dff7d2aaf8611e96da094097e1e153541b3e0119 100644 (file)
 
 int sysctl_tcp_abort_on_overflow __read_mostly;
 
-struct inet_timewait_death_row tcp_death_row = {
-       .sysctl_max_tw_buckets = NR_FILE * 2,
-       .hashinfo       = &tcp_hashinfo,
-};
-EXPORT_SYMBOL_GPL(tcp_death_row);
-
 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
 {
        if (seq == s_win)
@@ -100,6 +94,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
        struct tcp_options_received tmp_opt;
        struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
        bool paws_reject = false;
+       struct inet_timewait_death_row *tcp_death_row = &sock_net((struct sock*)tw)->ipv4.tcp_death_row;
 
        tmp_opt.saw_tstamp = 0;
        if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
@@ -153,7 +148,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
                        tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
                }
 
-               if (tcp_death_row.sysctl_tw_recycle &&
+               if (tcp_death_row->sysctl_tw_recycle &&
                    tcptw->tw_ts_recent_stamp &&
                    tcp_tw_remember_stamp(tw))
                        inet_twsk_reschedule(tw, tw->tw_timeout);
@@ -264,11 +259,12 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
        const struct tcp_sock *tp = tcp_sk(sk);
        struct inet_timewait_sock *tw;
        bool recycle_ok = false;
+       struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
 
-       if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
+       if (tcp_death_row->sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
                recycle_ok = tcp_remember_stamp(sk);
 
-       tw = inet_twsk_alloc(sk, &tcp_death_row, state);
+       tw = inet_twsk_alloc(sk, tcp_death_row, state);
 
        if (tw) {
                struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
@@ -364,15 +360,12 @@ void tcp_openreq_init_rwin(struct request_sock *req,
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        const struct tcp_sock *tp = tcp_sk(sk_listener);
-       u16 user_mss = READ_ONCE(tp->rx_opt.user_mss);
        int full_space = tcp_full_space(sk_listener);
-       int mss = dst_metric_advmss(dst);
        u32 window_clamp;
        __u8 rcv_wscale;
+       int mss;
 
-       if (user_mss && user_mss < mss)
-               mss = user_mss;
-
+       mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
        window_clamp = READ_ONCE(tp->window_clamp);
        /* Set this up on the first call only */
        req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
@@ -472,7 +465,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
                newtp->sacked_out = 0;
                newtp->fackets_out = 0;
                newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
-               tcp_enable_early_retrans(newtp);
                newtp->tlp_high_seq = 0;
                newtp->lsndtime = treq->snt_synack.stamp_jiffies;
                newsk->sk_txhash = treq->txhash;
index 8ce50dc3ab8cac821b8a2c3e0d31f0aa42f5c9d5..22548b5f05cbe5a655e0c53df2d31c5cc2e8a702 100644 (file)
@@ -76,16 +76,15 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
        tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
 
        tp->packets_out += tcp_skb_pcount(skb);
-       if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
-           icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
+       if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
                tcp_rearm_rto(sk);
-       }
 
        NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
                      tcp_skb_pcount(skb));
 }
 
-/* SND.NXT, if window was not shrunk.
+/* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
+ * window scaling factor due to loss of precision.
  * If window has been shrunk, what should we make? It is not clear at all.
  * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
  * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
@@ -95,7 +94,9 @@ static inline __u32 tcp_acceptable_seq(const struct sock *sk)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
 
-       if (!before(tcp_wnd_end(tp), tp->snd_nxt))
+       if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
+           (tp->rx_opt.wscale_ok &&
+            ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
                return tp->snd_nxt;
        else
                return tcp_wnd_end(tp);
@@ -966,6 +967,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
         */
        skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
 
+       /* If we had to use memory reserve to allocate this skb,
+        * this might cause drops if packet is looped back :
+        * Other socket might not have SOCK_MEMALLOC.
+        * Packets not looped back do not care about pfmemalloc.
+        */
+       skb->pfmemalloc = 0;
+
        skb_push(skb, tcp_header_size);
        skb_reset_transport_header(skb);
 
@@ -975,6 +983,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        skb_set_hash_from_sk(skb, sk);
        atomic_add(skb->truesize, &sk->sk_wmem_alloc);
 
+       skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
+
        /* Build TCP header and checksum it. */
        th = (struct tcphdr *)skb->data;
        th->source              = inet->inet_sport;
@@ -2289,8 +2299,6 @@ bool tcp_schedule_loss_probe(struct sock *sk)
        u32 timeout, tlp_time_stamp, rto_time_stamp;
        u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);
 
-       if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
-               return false;
        /* No consecutive loss probes. */
        if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
                tcp_rearm_rto(sk);
@@ -2309,8 +2317,9 @@ bool tcp_schedule_loss_probe(struct sock *sk)
        /* Schedule a loss probe in 2*RTT for SACK capable connections
         * in Open state, that are either limited by cwnd or application.
         */
-       if (sysctl_tcp_early_retrans < 3 || !tp->packets_out ||
-           !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
+       if ((sysctl_tcp_early_retrans != 3 && sysctl_tcp_early_retrans != 4) ||
+           !tp->packets_out || !tcp_is_sack(tp) ||
+           icsk->icsk_ca_state != TCP_CA_Open)
                return false;
 
        if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
@@ -2776,6 +2785,13 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
        if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
                tcp_ecn_clear_syn(sk, skb);
 
+       /* Update global and local TCP statistics. */
+       segs = tcp_skb_pcount(skb);
+       TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
+       if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+       tp->total_retrans += segs;
+
        /* make sure skb->data is aligned on arches that require it
         * and check if ack-trimming & collapsing extended the headroom
         * beyond what csum_start can cover.
@@ -2793,14 +2809,9 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
        }
 
        if (likely(!err)) {
-               segs = tcp_skb_pcount(skb);
-
                TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
-               /* Update global TCP statistics. */
-               TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
-               if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
-                       __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
-               tp->total_retrans += segs;
+       } else if (err != -EBUSY) {
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
        }
        return err;
 }
@@ -2823,8 +2834,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
                if (!tp->retrans_stamp)
                        tp->retrans_stamp = tcp_skb_timestamp(skb);
 
-       } else if (err != -EBUSY) {
-               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
        }
 
        if (tp->undo_retrans < 0)
@@ -2833,36 +2842,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
        return err;
 }
 
-/* Check if we forward retransmits are possible in the current
- * window/congestion state.
- */
-static bool tcp_can_forward_retransmit(struct sock *sk)
-{
-       const struct inet_connection_sock *icsk = inet_csk(sk);
-       const struct tcp_sock *tp = tcp_sk(sk);
-
-       /* Forward retransmissions are possible only during Recovery. */
-       if (icsk->icsk_ca_state != TCP_CA_Recovery)
-               return false;
-
-       /* No forward retransmissions in Reno are possible. */
-       if (tcp_is_reno(tp))
-               return false;
-
-       /* Yeah, we have to make difficult choice between forward transmission
-        * and retransmission... Both ways have their merits...
-        *
-        * For now we do not retransmit anything, while we have some new
-        * segments to send. In the other cases, follow rule 3 for
-        * NextSeg() specified in RFC3517.
-        */
-
-       if (tcp_may_send_now(sk))
-               return false;
-
-       return true;
-}
-
 /* This gets called after a retransmit timeout, and the initially
  * retransmitted data is acknowledged.  It tries to continue
  * resending the rest of the retransmit queue, until either
@@ -2877,24 +2856,16 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
        struct sk_buff *hole = NULL;
-       u32 max_segs, last_lost;
+       u32 max_segs;
        int mib_idx;
-       int fwd_rexmitting = 0;
 
        if (!tp->packets_out)
                return;
 
-       if (!tp->lost_out)
-               tp->retransmit_high = tp->snd_una;
-
        if (tp->retransmit_skb_hint) {
                skb = tp->retransmit_skb_hint;
-               last_lost = TCP_SKB_CB(skb)->end_seq;
-               if (after(last_lost, tp->retransmit_high))
-                       last_lost = tp->retransmit_high;
        } else {
                skb = tcp_write_queue_head(sk);
-               last_lost = tp->snd_una;
        }
 
        max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
@@ -2917,31 +2888,14 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
                 */
                segs = min_t(int, segs, max_segs);
 
-               if (fwd_rexmitting) {
-begin_fwd:
-                       if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
-                               break;
-                       mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
-
-               } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
-                       tp->retransmit_high = last_lost;
-                       if (!tcp_can_forward_retransmit(sk))
-                               break;
-                       /* Backtrack if necessary to non-L'ed skb */
-                       if (hole) {
-                               skb = hole;
-                               hole = NULL;
-                       }
-                       fwd_rexmitting = 1;
-                       goto begin_fwd;
-
+               if (tp->retrans_out >= tp->lost_out) {
+                       break;
                } else if (!(sacked & TCPCB_LOST)) {
                        if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
                                hole = skb;
                        continue;
 
                } else {
-                       last_lost = TCP_SKB_CB(skb)->end_seq;
                        if (icsk->icsk_ca_state != TCP_CA_Loss)
                                mib_idx = LINUX_MIB_TCPFASTRETRANS;
                        else
@@ -2962,7 +2916,8 @@ begin_fwd:
                if (tcp_in_cwnd_reduction(sk))
                        tp->prr_out += tcp_skb_pcount(skb);
 
-               if (skb == tcp_write_queue_head(sk))
+               if (skb == tcp_write_queue_head(sk) &&
+                   icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
                        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                                  inet_csk(sk)->icsk_rto,
                                                  TCP_RTO_MAX);
@@ -3119,7 +3074,6 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
        struct sk_buff *skb;
        int tcp_header_size;
        struct tcphdr *th;
-       u16 user_mss;
        int mss;
 
        skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
@@ -3149,10 +3103,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
        }
        skb_dst_set(skb, dst);
 
-       mss = dst_metric_advmss(dst);
-       user_mss = READ_ONCE(tp->rx_opt.user_mss);
-       if (user_mss && user_mss < mss)
-               mss = user_mss;
+       mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
 
        memset(&opts, 0, sizeof(opts));
 #ifdef CONFIG_SYN_COOKIES
@@ -3258,9 +3209,7 @@ static void tcp_connect_init(struct sock *sk)
 
        if (!tp->window_clamp)
                tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
-       tp->advmss = dst_metric_advmss(dst);
-       if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
-               tp->advmss = tp->rx_opt.user_mss;
+       tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
 
        tcp_initialize_rcv_mss(sk);
 
@@ -3326,31 +3275,19 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_fastopen_request *fo = tp->fastopen_req;
-       int syn_loss = 0, space, err = 0;
-       unsigned long last_syn_loss = 0;
+       int space, err = 0;
        struct sk_buff *syn_data;
 
        tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
-       tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
-                              &syn_loss, &last_syn_loss);
-       /* Recurring FO SYN losses: revert to regular handshake temporarily */
-       if (syn_loss > 1 &&
-           time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
-               fo->cookie.len = -1;
-               goto fallback;
-       }
-
-       if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
-               fo->cookie.len = -1;
-       else if (fo->cookie.len <= 0)
+       if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
                goto fallback;
 
        /* MSS for SYN-data is based on cached MSS and bounded by PMTU and
         * user-MSS. Reserve maximum option space for middleboxes that add
         * private TCP options. The cost is reduced data space in SYN :(
         */
-       if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
-               tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
+       tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
+
        space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
                MAX_TCP_OPTION_SPACE;
 
index e36df4fcfeba3042f3d84337a18311427f68a418..4ecb38ae85042db7fa59e1aa6c74c9c3da0b1099 100644 (file)
@@ -1,9 +1,32 @@
 #include <linux/tcp.h>
 #include <net/tcp.h>
 
-int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOST_RETRANS;
+int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOSS_DETECTION;
 
-/* Marks a packet lost, if some packet sent later has been (s)acked.
+static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       tcp_skb_mark_lost_uncond_verify(tp, skb);
+       if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
+               /* Account for retransmits that are lost again */
+               TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
+               tp->retrans_out -= tcp_skb_pcount(skb);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
+       }
+}
+
+static bool tcp_rack_sent_after(const struct skb_mstamp *t1,
+                               const struct skb_mstamp *t2,
+                               u32 seq1, u32 seq2)
+{
+       return skb_mstamp_after(t1, t2) ||
+              (t1->v64 == t2->v64 && after(seq1, seq2));
+}
+
+/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
+ *
+ * Marks a packet lost, if some packet sent later has been (s)acked.
  * The underlying idea is similar to the traditional dupthresh and FACK
  * but they look at different metrics:
  *
@@ -16,31 +39,26 @@ int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOST_RETRANS;
  * is being more resilient to reordering by simply allowing some
  * "settling delay", instead of tweaking the dupthresh.
  *
- * The current version is only used after recovery starts but can be
- * easily extended to detect the first loss.
+ * When tcp_rack_detect_loss() detects some packets are lost and we
+ * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
+ * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
+ * make us enter the CA_Recovery state.
  */
-int tcp_rack_mark_lost(struct sock *sk)
+static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now,
+                                u32 *reo_timeout)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       u32 reo_wnd, prior_retrans = tp->retrans_out;
-
-       if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced)
-               return 0;
-
-       /* Reset the advanced flag to avoid unnecessary queue scanning */
-       tp->rack.advanced = 0;
+       u32 reo_wnd;
 
+       *reo_timeout = 0;
        /* To be more reordering resilient, allow min_rtt/4 settling delay
         * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
         * RTT because reordering is often a path property and less related
         * to queuing or delayed ACKs.
-        *
-        * TODO: measure and adapt to the observed reordering delay, and
-        * use a timer to retransmit like the delayed early retransmit.
         */
        reo_wnd = 1000;
-       if (tp->rack.reord && tcp_min_rtt(tp) != ~0U)
+       if ((tp->rack.reord || !tp->lost_out) && tcp_min_rtt(tp) != ~0U)
                reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
 
        tcp_for_write_queue(skb, sk) {
@@ -54,20 +72,29 @@ int tcp_rack_mark_lost(struct sock *sk)
                    scb->sacked & TCPCB_SACKED_ACKED)
                        continue;
 
-               if (skb_mstamp_after(&tp->rack.mstamp, &skb->skb_mstamp)) {
+               if (tcp_rack_sent_after(&tp->rack.mstamp, &skb->skb_mstamp,
+                                       tp->rack.end_seq, scb->end_seq)) {
+                       /* Step 3 in draft-cheng-tcpm-rack-00.txt:
+                        * A packet is lost if its elapsed time is beyond
+                        * the recent RTT plus the reordering window.
+                        */
+                       u32 elapsed = skb_mstamp_us_delta(now,
+                                                         &skb->skb_mstamp);
+                       s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
 
-                       if (skb_mstamp_us_delta(&tp->rack.mstamp,
-                                               &skb->skb_mstamp) <= reo_wnd)
+                       if (remaining < 0) {
+                               tcp_rack_mark_skb_lost(sk, skb);
                                continue;
-
-                       /* skb is lost if packet sent later is sacked */
-                       tcp_skb_mark_lost_uncond_verify(tp, skb);
-                       if (scb->sacked & TCPCB_SACKED_RETRANS) {
-                               scb->sacked &= ~TCPCB_SACKED_RETRANS;
-                               tp->retrans_out -= tcp_skb_pcount(skb);
-                               NET_INC_STATS(sock_net(sk),
-                                             LINUX_MIB_TCPLOSTRETRANSMIT);
                        }
+
+                       /* Skip ones marked lost but not yet retransmitted */
+                       if ((scb->sacked & TCPCB_LOST) &&
+                           !(scb->sacked & TCPCB_SACKED_RETRANS))
+                               continue;
+
+                       /* Record maximum wait time (+1 to avoid 0) */
+                       *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
+
                } else if (!(scb->sacked & TCPCB_RETRANS)) {
                        /* Original data are sent sequentially so stop early
                         * b/c the rest are all sent after rack_sent
@@ -75,20 +102,43 @@ int tcp_rack_mark_lost(struct sock *sk)
                        break;
                }
        }
-       return prior_retrans - tp->retrans_out;
 }
 
-/* Record the most recently (re)sent time among the (s)acked packets */
-void tcp_rack_advance(struct tcp_sock *tp,
-                     const struct skb_mstamp *xmit_time, u8 sacked)
+void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       u32 timeout;
+
+       if (!tp->rack.advanced)
+               return;
+
+       /* Reset the advanced flag to avoid unnecessary queue scanning */
+       tp->rack.advanced = 0;
+       tcp_rack_detect_loss(sk, now, &timeout);
+       if (timeout) {
+               timeout = usecs_to_jiffies(timeout + TCP_REO_TIMEOUT_MIN);
+               inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
+                                         timeout, inet_csk(sk)->icsk_rto);
+       }
+}
+
+/* Record the most recently (re)sent time among the (s)acked packets
+ * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
+ * draft-cheng-tcpm-rack-00.txt
+ */
+void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
+                     const struct skb_mstamp *xmit_time,
+                     const struct skb_mstamp *ack_time)
 {
+       u32 rtt_us;
+
        if (tp->rack.mstamp.v64 &&
-           !skb_mstamp_after(xmit_time, &tp->rack.mstamp))
+           !tcp_rack_sent_after(xmit_time, &tp->rack.mstamp,
+                                end_seq, tp->rack.end_seq))
                return;
 
+       rtt_us = skb_mstamp_us_delta(ack_time, xmit_time);
        if (sacked & TCPCB_RETRANS) {
-               struct skb_mstamp now;
-
                /* If the sacked packet was retransmitted, it's ambiguous
                 * whether the retransmission or the original (or the prior
                 * retransmission) was sacked.
@@ -99,11 +149,35 @@ void tcp_rack_advance(struct tcp_sock *tp,
                 * so it's at least one RTT (i.e., retransmission is at least
                 * an RTT later).
                 */
-               skb_mstamp_get(&now);
-               if (skb_mstamp_us_delta(&now, xmit_time) < tcp_min_rtt(tp))
+               if (rtt_us < tcp_min_rtt(tp))
                        return;
        }
-
+       tp->rack.rtt_us = rtt_us;
        tp->rack.mstamp = *xmit_time;
+       tp->rack.end_seq = end_seq;
        tp->rack.advanced = 1;
 }
+
+/* We have waited long enough to accommodate reordering. Mark the expired
+ * packets lost and retransmit them.
+ */
+void tcp_rack_reo_timeout(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct skb_mstamp now;
+       u32 timeout, prior_inflight;
+
+       skb_mstamp_get(&now);
+       prior_inflight = tcp_packets_in_flight(tp);
+       tcp_rack_detect_loss(sk, &now, &timeout);
+       if (prior_inflight != tcp_packets_in_flight(tp)) {
+               if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
+                       tcp_enter_recovery(sk, false);
+                       if (!inet_csk(sk)->icsk_ca_ops->cong_control)
+                               tcp_cwnd_reduction(sk, 1, 0);
+               }
+               tcp_xmit_retransmit_queue(sk);
+       }
+       if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
+               tcp_rearm_rto(sk);
+}
index 3705075f42c34b4ce5102bd858d5ed55ed7dd4cc..40d893556e6701ace6a02903e53c45822d6fa56d 100644 (file)
@@ -563,8 +563,8 @@ void tcp_write_timer_handler(struct sock *sk)
        event = icsk->icsk_pending;
 
        switch (event) {
-       case ICSK_TIME_EARLY_RETRANS:
-               tcp_resume_early_retransmit(sk);
+       case ICSK_TIME_REO_TIMEOUT:
+               tcp_rack_reo_timeout(sk);
                break;
        case ICSK_TIME_LOSS_PROBE:
                tcp_send_loss_probe(sk);
@@ -617,6 +617,7 @@ void tcp_set_keepalive(struct sock *sk, int val)
        else if (!val)
                inet_csk_delete_keepalive_timer(sk);
 }
+EXPORT_SYMBOL_GPL(tcp_set_keepalive);
 
 
 static void tcp_keepalive_timer (unsigned long data)
index 8aab7d78d25bc6eaa42dcc960cdbd5086f614cad..ea6e4cff9fafe99af23fd8ea666cd979d5af9104 100644 (file)
@@ -134,14 +134,21 @@ EXPORT_SYMBOL(udp_memory_allocated);
 #define MAX_UDP_PORTS 65536
 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
 
+/* IPCB reference means this can not be used from early demux */
+static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+       if (!net->ipv4.sysctl_udp_l3mdev_accept &&
+           skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
+               return true;
+#endif
+       return false;
+}
+
 static int udp_lib_lport_inuse(struct net *net, __u16 num,
                               const struct udp_hslot *hslot,
                               unsigned long *bitmap,
-                              struct sock *sk,
-                              int (*saddr_comp)(const struct sock *sk1,
-                                                const struct sock *sk2,
-                                                bool match_wildcard),
-                              unsigned int log)
+                              struct sock *sk, unsigned int log)
 {
        struct sock *sk2;
        kuid_t uid = sock_i_uid(sk);
@@ -153,13 +160,18 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
                    (!sk2->sk_reuse || !sk->sk_reuse) &&
                    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
                     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
-                   (!sk2->sk_reuseport || !sk->sk_reuseport ||
-                    rcu_access_pointer(sk->sk_reuseport_cb) ||
-                    !uid_eq(uid, sock_i_uid(sk2))) &&
-                   saddr_comp(sk, sk2, true)) {
-                       if (!bitmap)
-                               return 1;
-                       __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap);
+                   inet_rcv_saddr_equal(sk, sk2, true)) {
+                       if (sk2->sk_reuseport && sk->sk_reuseport &&
+                           !rcu_access_pointer(sk->sk_reuseport_cb) &&
+                           uid_eq(uid, sock_i_uid(sk2))) {
+                               if (!bitmap)
+                                       return 0;
+                       } else {
+                               if (!bitmap)
+                                       return 1;
+                               __set_bit(udp_sk(sk2)->udp_port_hash >> log,
+                                         bitmap);
+                       }
                }
        }
        return 0;
@@ -171,10 +183,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
  */
 static int udp_lib_lport_inuse2(struct net *net, __u16 num,
                                struct udp_hslot *hslot2,
-                               struct sock *sk,
-                               int (*saddr_comp)(const struct sock *sk1,
-                                                 const struct sock *sk2,
-                                                 bool match_wildcard))
+                               struct sock *sk)
 {
        struct sock *sk2;
        kuid_t uid = sock_i_uid(sk);
@@ -188,11 +197,14 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
                    (!sk2->sk_reuse || !sk->sk_reuse) &&
                    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
                     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
-                   (!sk2->sk_reuseport || !sk->sk_reuseport ||
-                    rcu_access_pointer(sk->sk_reuseport_cb) ||
-                    !uid_eq(uid, sock_i_uid(sk2))) &&
-                   saddr_comp(sk, sk2, true)) {
-                       res = 1;
+                   inet_rcv_saddr_equal(sk, sk2, true)) {
+                       if (sk2->sk_reuseport && sk->sk_reuseport &&
+                           !rcu_access_pointer(sk->sk_reuseport_cb) &&
+                           uid_eq(uid, sock_i_uid(sk2))) {
+                               res = 0;
+                       } else {
+                               res = 1;
+                       }
                        break;
                }
        }
@@ -200,10 +212,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
        return res;
 }
 
-static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot,
-                                 int (*saddr_same)(const struct sock *sk1,
-                                                   const struct sock *sk2,
-                                                   bool match_wildcard))
+static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
 {
        struct net *net = sock_net(sk);
        kuid_t uid = sock_i_uid(sk);
@@ -217,7 +226,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot,
                    (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
                    (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
                    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
-                   (*saddr_same)(sk, sk2, false)) {
+                   inet_rcv_saddr_equal(sk, sk2, false)) {
                        return reuseport_add_sock(sk, sk2);
                }
        }
@@ -233,14 +242,10 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot,
  *
  *  @sk:          socket struct in question
  *  @snum:        port number to look up
- *  @saddr_comp:  AF-dependent comparison of bound local IP addresses
  *  @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
  *                   with NULL address
  */
 int udp_lib_get_port(struct sock *sk, unsigned short snum,
-                    int (*saddr_comp)(const struct sock *sk1,
-                                      const struct sock *sk2,
-                                      bool match_wildcard),
                     unsigned int hash2_nulladdr)
 {
        struct udp_hslot *hslot, *hslot2;
@@ -269,7 +274,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
                        bitmap_zero(bitmap, PORTS_PER_CHAIN);
                        spin_lock_bh(&hslot->lock);
                        udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
-                                           saddr_comp, udptable->log);
+                                           udptable->log);
 
                        snum = first;
                        /*
@@ -285,6 +290,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
                                snum += rand;
                        } while (snum != first);
                        spin_unlock_bh(&hslot->lock);
+                       cond_resched();
                } while (++first != last);
                goto fail;
        } else {
@@ -301,12 +307,11 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
                        if (hslot->count < hslot2->count)
                                goto scan_primary_hash;
 
-                       exist = udp_lib_lport_inuse2(net, snum, hslot2,
-                                                    sk, saddr_comp);
+                       exist = udp_lib_lport_inuse2(net, snum, hslot2, sk);
                        if (!exist && (hash2_nulladdr != slot2)) {
                                hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
                                exist = udp_lib_lport_inuse2(net, snum, hslot2,
-                                                            sk, saddr_comp);
+                                                            sk);
                        }
                        if (exist)
                                goto fail_unlock;
@@ -314,8 +319,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
                                goto found;
                }
 scan_primary_hash:
-               if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk,
-                                       saddr_comp, 0))
+               if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0))
                        goto fail_unlock;
        }
 found:
@@ -324,7 +328,7 @@ found:
        udp_sk(sk)->udp_portaddr_hash ^= snum;
        if (sk_unhashed(sk)) {
                if (sk->sk_reuseport &&
-                   udp_reuseport_add_sock(sk, hslot, saddr_comp)) {
+                   udp_reuseport_add_sock(sk, hslot)) {
                        inet_sk(sk)->inet_num = 0;
                        udp_sk(sk)->udp_port_hash = 0;
                        udp_sk(sk)->udp_portaddr_hash ^= snum;
@@ -356,24 +360,6 @@ fail:
 }
 EXPORT_SYMBOL(udp_lib_get_port);
 
-/* match_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
- * match_wildcard == false: addresses must be exactly the same, i.e.
- *                          0.0.0.0 only equals to 0.0.0.0
- */
-int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2,
-                        bool match_wildcard)
-{
-       struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
-
-       if (!ipv6_only_sock(sk2)) {
-               if (inet1->inet_rcv_saddr == inet2->inet_rcv_saddr)
-                       return 1;
-               if (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr)
-                       return match_wildcard;
-       }
-       return 0;
-}
-
 static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr,
                              unsigned int port)
 {
@@ -389,12 +375,13 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum)
 
        /* precompute partial secondary hash */
        udp_sk(sk)->udp_portaddr_hash = hash2_partial;
-       return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
+       return udp_lib_get_port(sk, snum, hash2_nulladdr);
 }
 
 static int compute_score(struct sock *sk, struct net *net,
                         __be32 saddr, __be16 sport,
-                        __be32 daddr, unsigned short hnum, int dif)
+                        __be32 daddr, unsigned short hnum, int dif,
+                        bool exact_dif)
 {
        int score;
        struct inet_sock *inet;
@@ -425,7 +412,7 @@ static int compute_score(struct sock *sk, struct net *net,
                score += 4;
        }
 
-       if (sk->sk_bound_dev_if) {
+       if (sk->sk_bound_dev_if || exact_dif) {
                if (sk->sk_bound_dev_if != dif)
                        return -1;
                score += 4;
@@ -450,7 +437,7 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
 /* called with rcu_read_lock() */
 static struct sock *udp4_lib_lookup2(struct net *net,
                __be32 saddr, __be16 sport,
-               __be32 daddr, unsigned int hnum, int dif,
+               __be32 daddr, unsigned int hnum, int dif, bool exact_dif,
                struct udp_hslot *hslot2,
                struct sk_buff *skb)
 {
@@ -462,7 +449,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
        badness = 0;
        udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
                score = compute_score(sk, net, saddr, sport,
-                                     daddr, hnum, dif);
+                                     daddr, hnum, dif, exact_dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
@@ -497,6 +484,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
        unsigned short hnum = ntohs(dport);
        unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
        struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
+       bool exact_dif = udp_lib_exact_dif_match(net, skb);
        int score, badness, matches = 0, reuseport = 0;
        u32 hash = 0;
 
@@ -509,7 +497,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
 
                result = udp4_lib_lookup2(net, saddr, sport,
                                          daddr, hnum, dif,
-                                         hslot2, skb);
+                                         exact_dif, hslot2, skb);
                if (!result) {
                        unsigned int old_slot2 = slot2;
                        hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
@@ -524,7 +512,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
 
                        result = udp4_lib_lookup2(net, saddr, sport,
                                                  daddr, hnum, dif,
-                                                 hslot2, skb);
+                                                 exact_dif, hslot2, skb);
                }
                return result;
        }
@@ -533,7 +521,7 @@ begin:
        badness = 0;
        sk_for_each_rcu(sk, &hslot->head) {
                score = compute_score(sk, net, saddr, sport,
-                                     daddr, hnum, dif);
+                                     daddr, hnum, dif, exact_dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
@@ -1113,7 +1101,8 @@ out:
        return err;
 
 do_confirm:
-       dst_confirm(&rt->dst);
+       if (msg->msg_flags & MSG_PROBE)
+               dst_confirm_neigh(&rt->dst, &fl4->daddr);
        if (!(msg->msg_flags&MSG_PROBE) || len)
                goto back_from_confirm;
        err = 0;
index 62e1e72db4612d0aa5c912ac7ec7aeea24123a35..1fc684111ce6afe6798bb21f5dcbcbcab53b9f6b 100644 (file)
@@ -40,6 +40,7 @@ drop:
 
 int xfrm4_transport_finish(struct sk_buff *skb, int async)
 {
+       struct xfrm_offload *xo = xfrm_offload(skb);
        struct iphdr *iph = ip_hdr(skb);
 
        iph->protocol = XFRM_MODE_SKB_CB(skb)->protocol;
@@ -53,6 +54,11 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
        iph->tot_len = htons(skb->len);
        ip_send_check(iph);
 
+       if (xo && (xo->flags & XFRM_GRO)) {
+               skb_mac_header_rebuild(skb);
+               return 0;
+       }
+
        NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
                dev_net(skb->dev), NULL, skb, skb->dev, NULL,
                xfrm4_rcv_encap_finish);
index fd840c7d75ea9bc0214bb14dd238e3cee820521a..4acc0508c5ebc65dc392de50a207901b2ea8d305 100644 (file)
@@ -43,6 +43,7 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
 {
        int ihl = skb->data - skb_transport_header(skb);
+       struct xfrm_offload *xo = xfrm_offload(skb);
 
        if (skb->transport_header != skb->network_header) {
                memmove(skb_transport_header(skb),
@@ -50,7 +51,8 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
                skb->network_header = skb->transport_header;
        }
        ip_hdr(skb)->tot_len = htons(skb->len + ihl);
-       skb_reset_transport_header(skb);
+       if (!xo || !(xo->flags & XFRM_GRO))
+               skb_reset_transport_header(skb);
        return 0;
 }
 
index 6a7ff69575353f5242aa800023999bcc12823fee..71b4ecc195c707b3e2ce9dab974f6d68a5e5c5eb 100644 (file)
@@ -17,8 +17,6 @@
 #include <net/ip.h>
 #include <net/l3mdev.h>
 
-static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
-
 static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
                                            int tos, int oif,
                                            const xfrm_address_t *saddr,
@@ -219,7 +217,7 @@ static inline int xfrm4_garbage_collect(struct dst_ops *ops)
 {
        struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
 
-       xfrm4_policy_afinfo.garbage_collect(net);
+       xfrm_garbage_collect_deferred(net);
        return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
 }
 
@@ -271,8 +269,7 @@ static struct dst_ops xfrm4_dst_ops_template = {
        .gc_thresh =            INT_MAX,
 };
 
-static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
-       .family =               AF_INET,
+static const struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
        .dst_ops =              &xfrm4_dst_ops_template,
        .dst_lookup =           xfrm4_dst_lookup,
        .get_saddr =            xfrm4_get_saddr,
@@ -376,7 +373,7 @@ static struct pernet_operations __net_initdata xfrm4_net_ops = {
 
 static void __init xfrm4_policy_init(void)
 {
-       xfrm_policy_register_afinfo(&xfrm4_policy_afinfo);
+       xfrm_policy_register_afinfo(&xfrm4_policy_afinfo, AF_INET);
 }
 
 void __init xfrm4_init(void)
index dccefa9d84cfd93d7de0f86bc904ab6c37bc5a92..8dd0e6ab86065f4e5e17bcd6d119def4677a2b86 100644 (file)
@@ -188,9 +188,8 @@ static const struct net_protocol ipcomp4_protocol = {
        .netns_ok       =       1,
 };
 
-static struct xfrm_input_afinfo xfrm4_input_afinfo = {
+static const struct xfrm_input_afinfo xfrm4_input_afinfo = {
        .family         =       AF_INET,
-       .owner          =       THIS_MODULE,
        .callback       =       xfrm4_rcv_cb,
 };
 
index 542074c00c78e34ede22013e568623fb0311b18d..d6660a8c0ea58c971c62e4e888e6256610c011ca 100644 (file)
@@ -90,11 +90,3 @@ void __init xfrm4_state_init(void)
 {
        xfrm_state_register_afinfo(&xfrm4_state_afinfo);
 }
-
-#if 0
-void __exit xfrm4_state_fini(void)
-{
-       xfrm_state_unregister_afinfo(&xfrm4_state_afinfo);
-}
-#endif  /*  0  */
-
index ec1267e2bd1f8c9168687513010cec7a7271ba71..e2afe677a9d944a2c6c27a2e7b2d06227712cf89 100644 (file)
@@ -75,6 +75,19 @@ config INET6_ESP
 
          If unsure, say Y.
 
+config INET6_ESP_OFFLOAD
+       tristate "IPv6: ESP transformation offload"
+       depends on INET6_ESP
+       select XFRM_OFFLOAD
+       default n
+       ---help---
+         Support for ESP transformation offload. This makes sense
+         only if this system really does IPsec and want to do it
+         with high throughput. A typical desktop system does not
+         need it, even if it does IPsec.
+
+         If unsure, say N.
+
 config INET6_IPCOMP
        tristate "IPv6: IPComp transformation"
        select INET6_XFRM_TUNNEL
@@ -208,6 +221,7 @@ config IPV6_TUNNEL
        tristate "IPv6: IP-in-IPv6 tunnel (RFC2473)"
        select INET6_TUNNEL
        select DST_CACHE
+       select GRO_CELLS
        ---help---
          Support for IPv6-in-IPv6 and IPv4-in-IPv6 tunnels described in
          RFC 2473.
index a9e9fec387ce828be30af69e49bd31b71604652f..217e9ff0e24b6db62307d27ddc50f1a88c2da73e 100644 (file)
@@ -30,6 +30,7 @@ ipv6-objs += $(ipv6-y)
 
 obj-$(CONFIG_INET6_AH) += ah6.o
 obj-$(CONFIG_INET6_ESP) += esp6.o
+obj-$(CONFIG_INET6_ESP_OFFLOAD) += esp6_offload.o
 obj-$(CONFIG_INET6_IPCOMP) += ipcomp6.o
 obj-$(CONFIG_INET6_XFRM_TUNNEL) += xfrm6_tunnel.o
 obj-$(CONFIG_INET6_TUNNEL) += tunnel6.o
index a7bcc0ab5e99543a08410abe6ff3dbfc9b3753b7..3a2025f5bf2c333a37d18329cdec88fdc1827870 100644 (file)
@@ -243,6 +243,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .seg6_require_hmac      = 0,
 #endif
        .enhanced_dad           = 1,
+       .addr_gen_mode          = IN6_ADDR_GEN_MODE_EUI64,
 };
 
 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -294,6 +295,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
        .seg6_require_hmac      = 0,
 #endif
        .enhanced_dad           = 1,
+       .addr_gen_mode          = IN6_ADDR_GEN_MODE_EUI64,
 };
 
 /* Check if a valid qdisc is available */
@@ -386,9 +388,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
        memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
 
        if (ndev->cnf.stable_secret.initialized)
-               ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+               ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
        else
-               ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64;
+               ndev->cnf.addr_gen_mode = ipv6_devconf_dflt.addr_gen_mode;
 
        ndev->cnf.mtu6 = dev->mtu;
        ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
@@ -2144,12 +2146,14 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
        case ARPHRD_SIT:
                return addrconf_ifid_sit(eui, dev);
        case ARPHRD_IPGRE:
+       case ARPHRD_TUNNEL:
                return addrconf_ifid_gre(eui, dev);
        case ARPHRD_6LOWPAN:
                return addrconf_ifid_eui64(eui, dev);
        case ARPHRD_IEEE1394:
                return addrconf_ifid_ieee1394(eui, dev);
        case ARPHRD_TUNNEL6:
+       case ARPHRD_IP6GRE:
                return addrconf_ifid_ip6tnl(eui, dev);
        }
        return -1;
@@ -2387,8 +2391,8 @@ static void manage_tempaddrs(struct inet6_dev *idev,
 
 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
 {
-       return idev->addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
-              idev->addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
+       return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
+              idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
 }
 
 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
@@ -3152,7 +3156,7 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
 
        ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
 
-       switch (idev->addr_gen_mode) {
+       switch (idev->cnf.addr_gen_mode) {
        case IN6_ADDR_GEN_MODE_RANDOM:
                ipv6_gen_mode_random_init(idev);
                /* fallthrough */
@@ -3193,6 +3197,9 @@ static void addrconf_dev_config(struct net_device *dev)
            (dev->type != ARPHRD_IEEE1394) &&
            (dev->type != ARPHRD_TUNNEL6) &&
            (dev->type != ARPHRD_6LOWPAN) &&
+           (dev->type != ARPHRD_IP6GRE) &&
+           (dev->type != ARPHRD_IPGRE) &&
+           (dev->type != ARPHRD_TUNNEL) &&
            (dev->type != ARPHRD_NONE)) {
                /* Alas, we support only Ethernet autoconfiguration. */
                return;
@@ -3204,8 +3211,8 @@ static void addrconf_dev_config(struct net_device *dev)
 
        /* this device type has no EUI support */
        if (dev->type == ARPHRD_NONE &&
-           idev->addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
-               idev->addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
+           idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
+               idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
 
        addrconf_addr_gen(idev, false);
 }
@@ -4900,6 +4907,13 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
        struct net *net = dev_net(ifa->idev->dev);
        int err = -ENOBUFS;
 
+       /* Don't send DELADDR notification for TENTATIVE address,
+        * since NEWADDR notification is sent only after removing
+        * TENTATIVE flag.
+        */
+       if (ifa->flags & IFA_F_TENTATIVE && event == RTM_DELADDR)
+               return;
+
        skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
        if (!skb)
                goto errout;
@@ -4987,6 +5001,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
 #endif
        array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
+       array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
 }
 
 static inline size_t inet6_ifla6_size(void)
@@ -5098,7 +5113,7 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
        if (!nla)
                goto nla_put_failure;
 
-       if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->addr_gen_mode))
+       if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
                goto nla_put_failure;
 
        read_lock_bh(&idev->lock);
@@ -5216,6 +5231,26 @@ static int inet6_validate_link_af(const struct net_device *dev,
        return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy);
 }
 
+static int check_addr_gen_mode(int mode)
+{
+       if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
+           mode != IN6_ADDR_GEN_MODE_NONE &&
+           mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
+           mode != IN6_ADDR_GEN_MODE_RANDOM)
+               return -EINVAL;
+       return 1;
+}
+
+static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
+                               int mode)
+{
+       if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
+           !idev->cnf.stable_secret.initialized &&
+           !net->ipv6.devconf_dflt->stable_secret.initialized)
+               return -EINVAL;
+       return 1;
+}
+
 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
 {
        int err = -EINVAL;
@@ -5237,18 +5272,11 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
        if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
                u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
 
-               if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
-                   mode != IN6_ADDR_GEN_MODE_NONE &&
-                   mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
-                   mode != IN6_ADDR_GEN_MODE_RANDOM)
-                       return -EINVAL;
-
-               if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
-                   !idev->cnf.stable_secret.initialized &&
-                   !dev_net(dev)->ipv6.devconf_dflt->stable_secret.initialized)
+               if (check_addr_gen_mode(mode) < 0 ||
+                   check_stable_privacy(idev, dev_net(dev), mode) < 0)
                        return -EINVAL;
 
-               idev->addr_gen_mode = mode;
+               idev->cnf.addr_gen_mode = mode;
                err = 0;
        }
 
@@ -5655,6 +5683,47 @@ int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
        return ret;
 }
 
+static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
+                                        void __user *buffer, size_t *lenp,
+                                        loff_t *ppos)
+{
+       int ret = 0;
+       int new_val;
+       struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
+       struct net *net = (struct net *)ctl->extra2;
+
+       ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+
+       if (write) {
+               new_val = *((int *)ctl->data);
+
+               if (check_addr_gen_mode(new_val) < 0)
+                       return -EINVAL;
+
+               /* request for default */
+               if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) {
+                       ipv6_devconf_dflt.addr_gen_mode = new_val;
+
+               /* request for individual net device */
+               } else {
+                       if (!idev)
+                               return ret;
+
+                       if (check_stable_privacy(idev, net, new_val) < 0)
+                               return -EINVAL;
+
+                       if (idev->cnf.addr_gen_mode != new_val) {
+                               idev->cnf.addr_gen_mode = new_val;
+                               rtnl_lock();
+                               addrconf_dev_config(idev->dev);
+                               rtnl_unlock();
+                       }
+               }
+       }
+
+       return ret;
+}
+
 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
                                         void __user *buffer, size_t *lenp,
                                         loff_t *ppos)
@@ -5705,14 +5774,14 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
                        struct inet6_dev *idev = __in6_dev_get(dev);
 
                        if (idev) {
-                               idev->addr_gen_mode =
+                               idev->cnf.addr_gen_mode =
                                        IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
                        }
                }
        } else {
                struct inet6_dev *idev = ctl->extra1;
 
-               idev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+               idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
        }
 
 out:
@@ -6099,6 +6168,13 @@ static const struct ctl_table addrconf_sysctl[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname               = "addr_gen_mode",
+               .data                   = &ipv6_devconf.addr_gen_mode,
+               .maxlen                 = sizeof(int),
+               .mode                   = 0644,
+               .proc_handler   = addrconf_sysctl_addr_gen_mode,
+       },
        {
                /* sentinel */
        }
index aa42123bc301f9c3877db49a05e6379c715aa384..04db40620ea65c1f369ef63490383e92def722ff 100644 (file)
@@ -302,7 +302,8 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                return -EINVAL;
 
        snum = ntohs(addr->sin6_port);
-       if (snum && snum < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
+       if (snum && snum < inet_prot_sock(net) &&
+           !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
                return -EACCES;
 
        lock_sock(sk);
index 189eb10b742d02fa5b39ac7206703e31e30c3cf7..dda6035e3b8480d9f2640f99f15f19e54b4f6b02 100644 (file)
@@ -474,6 +474,9 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
        int hdr_len = skb_network_header_len(skb);
        int ah_hlen = (ah->hdrlen + 2) << 2;
 
+       if (err)
+               goto out;
+
        work_iph = AH_SKB_CB(skb)->tmp;
        auth_data = ah_tmp_auth(work_iph, hdr_len);
        icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
index cbcdd5db31f473f75011c2346345dd752c9a7424..ff54faa756317047af5af661d070ba9a5eb65429 100644 (file)
@@ -44,6 +44,8 @@
 #include <net/protocol.h>
 #include <linux/icmpv6.h>
 
+#include <linux/highmem.h>
+
 struct esp_skb_cb {
        struct xfrm_skb_cb xfrm;
        void *tmp;
@@ -114,11 +116,40 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
                             __alignof__(struct scatterlist));
 }
 
+static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
+{
+       __be32 *seqhi;
+       struct crypto_aead *aead = x->data;
+       int seqhilen = 0;
+       u8 *iv;
+       struct aead_request *req;
+       struct scatterlist *sg;
+
+       if (x->props.flags & XFRM_STATE_ESN)
+               seqhilen += sizeof(__be32);
+
+       seqhi = esp_tmp_seqhi(tmp);
+       iv = esp_tmp_iv(aead, tmp, seqhilen);
+       req = esp_tmp_req(aead, iv);
+
+       /* Unref skb_frag_pages in the src scatterlist if necessary.
+        * Skip the first sg which comes from skb->data.
+        */
+       if (req->src != req->dst)
+               for (sg = sg_next(req->src); sg; sg = sg_next(sg))
+                       put_page(sg_page(sg));
+}
+
 static void esp_output_done(struct crypto_async_request *base, int err)
 {
        struct sk_buff *skb = base->data;
+       void *tmp;
+       struct dst_entry *dst = skb_dst(skb);
+       struct xfrm_state *x = dst->xfrm;
 
-       kfree(ESP_SKB_CB(skb)->tmp);
+       tmp = ESP_SKB_CB(skb)->tmp;
+       esp_ssg_unref(x, tmp);
+       kfree(tmp);
        xfrm_output_resume(skb, err);
 }
 
@@ -138,6 +169,27 @@ static void esp_output_restore_header(struct sk_buff *skb)
        esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
 }
 
+static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
+                                            struct ip_esp_hdr *esph,
+                                            __be32 *seqhi)
+{
+       struct xfrm_state *x = skb_dst(skb)->xfrm;
+
+       /* For ESN we move the header forward by 4 bytes to
+        * accomodate the high bits.  We will move it back after
+        * encryption.
+        */
+       if ((x->props.flags & XFRM_STATE_ESN)) {
+               esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
+               *seqhi = esph->spi;
+               esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
+       }
+
+       esph->spi = x->id.spi;
+
+       return esph;
+}
+
 static void esp_output_done_esn(struct crypto_async_request *base, int err)
 {
        struct sk_buff *skb = base->data;
@@ -146,14 +198,31 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err)
        esp_output_done(base, err);
 }
 
+static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
+{
+       /* Fill padding... */
+       if (tfclen) {
+               memset(tail, 0, tfclen);
+               tail += tfclen;
+       }
+       do {
+               int i;
+               for (i = 0; i < plen - 2; i++)
+                       tail[i] = i + 1;
+       } while (0);
+       tail[plen - 2] = plen - 2;
+       tail[plen - 1] = proto;
+}
+
 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
 {
        int err;
        struct ip_esp_hdr *esph;
        struct crypto_aead *aead;
        struct aead_request *req;
-       struct scatterlist *sg;
+       struct scatterlist *sg, *dsg;
        struct sk_buff *trailer;
+       struct page *page;
        void *tmp;
        int blksize;
        int clen;
@@ -164,10 +233,13 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
        int nfrags;
        int assoclen;
        int seqhilen;
+       int tailen;
        u8 *iv;
        u8 *tail;
+       u8 *vaddr;
        __be32 *seqhi;
        __be64 seqno;
+       __u8 proto = *skb_mac_header(skb);
 
        /* skb is pure payload to encrypt */
        aead = x->data;
@@ -186,11 +258,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
        blksize = ALIGN(crypto_aead_blocksize(aead), 4);
        clen = ALIGN(skb->len + 2 + tfclen, blksize);
        plen = clen - skb->len - tfclen;
-
-       err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
-       if (err < 0)
-               goto error;
-       nfrags = err;
+       tailen = tfclen + plen + alen;
 
        assoclen = sizeof(*esph);
        seqhilen = 0;
@@ -200,59 +268,152 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
                assoclen += seqhilen;
        }
 
-       tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
-       if (!tmp) {
-               err = -ENOMEM;
-               goto error;
+       *skb_mac_header(skb) = IPPROTO_ESP;
+       esph = ip_esp_hdr(skb);
+
+       if (!skb_cloned(skb)) {
+               if (tailen <= skb_availroom(skb)) {
+                       nfrags = 1;
+                       trailer = skb;
+                       tail = skb_tail_pointer(trailer);
+
+                       goto skip_cow;
+               } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
+                          && !skb_has_frag_list(skb)) {
+                       int allocsize;
+                       struct sock *sk = skb->sk;
+                       struct page_frag *pfrag = &x->xfrag;
+
+                       allocsize = ALIGN(tailen, L1_CACHE_BYTES);
+
+                       spin_lock_bh(&x->lock);
+
+                       if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
+                               spin_unlock_bh(&x->lock);
+                               goto cow;
+                       }
+
+                       page = pfrag->page;
+                       get_page(page);
+
+                       vaddr = kmap_atomic(page);
+
+                       tail = vaddr + pfrag->offset;
+
+                       esp_output_fill_trailer(tail, tfclen, plen, proto);
+
+                       kunmap_atomic(vaddr);
+
+                       nfrags = skb_shinfo(skb)->nr_frags;
+
+                       __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
+                                            tailen);
+                       skb_shinfo(skb)->nr_frags = ++nfrags;
+
+                       pfrag->offset = pfrag->offset + allocsize;
+                       nfrags++;
+
+                       skb->len += tailen;
+                       skb->data_len += tailen;
+                       skb->truesize += tailen;
+                       if (sk)
+                               atomic_add(tailen, &sk->sk_wmem_alloc);
+
+                       skb_push(skb, -skb_network_offset(skb));
+
+                       esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
+                       esph->spi = x->id.spi;
+
+                       tmp = esp_alloc_tmp(aead, nfrags + 2, seqhilen);
+                       if (!tmp) {
+                               spin_unlock_bh(&x->lock);
+                               err = -ENOMEM;
+                               goto error;
+                       }
+                       seqhi = esp_tmp_seqhi(tmp);
+                       iv = esp_tmp_iv(aead, tmp, seqhilen);
+                       req = esp_tmp_req(aead, iv);
+                       sg = esp_req_sg(aead, req);
+                       dsg = &sg[nfrags];
+
+                       esph = esp_output_set_esn(skb, esph, seqhi);
+
+                       sg_init_table(sg, nfrags);
+                       skb_to_sgvec(skb, sg,
+                                    (unsigned char *)esph - skb->data,
+                                    assoclen + ivlen + clen + alen);
+
+                       allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
+
+                       if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
+                               spin_unlock_bh(&x->lock);
+                               err = -ENOMEM;
+                               goto error;
+                       }
+
+                       skb_shinfo(skb)->nr_frags = 1;
+
+                       page = pfrag->page;
+                       get_page(page);
+                       /* replace page frags in skb with new page */
+                       __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
+                       pfrag->offset = pfrag->offset + allocsize;
+
+                       sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
+                       skb_to_sgvec(skb, dsg,
+                                    (unsigned char *)esph - skb->data,
+                                    assoclen + ivlen + clen + alen);
+
+                       spin_unlock_bh(&x->lock);
+
+                       goto skip_cow2;
+               }
        }
 
-       seqhi = esp_tmp_seqhi(tmp);
-       iv = esp_tmp_iv(aead, tmp, seqhilen);
-       req = esp_tmp_req(aead, iv);
-       sg = esp_req_sg(aead, req);
+cow:
+       err = skb_cow_data(skb, tailen, &trailer);
+       if (err < 0)
+               goto error;
+       nfrags = err;
 
-       /* Fill padding... */
        tail = skb_tail_pointer(trailer);
-       if (tfclen) {
-               memset(tail, 0, tfclen);
-               tail += tfclen;
-       }
-       do {
-               int i;
-               for (i = 0; i < plen - 2; i++)
-                       tail[i] = i + 1;
-       } while (0);
-       tail[plen - 2] = plen - 2;
-       tail[plen - 1] = *skb_mac_header(skb);
-       pskb_put(skb, trailer, clen - skb->len + alen);
+       esph = ip_esp_hdr(skb);
 
+skip_cow:
+       esp_output_fill_trailer(tail, tfclen, plen, proto);
+
+       pskb_put(skb, trailer, clen - skb->len + alen);
        skb_push(skb, -skb_network_offset(skb));
-       esph = ip_esp_hdr(skb);
-       *skb_mac_header(skb) = IPPROTO_ESP;
 
        esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
+       esph->spi = x->id.spi;
 
-       aead_request_set_callback(req, 0, esp_output_done, skb);
-
-       /* For ESN we move the header forward by 4 bytes to
-        * accomodate the high bits.  We will move it back after
-        * encryption.
-        */
-       if ((x->props.flags & XFRM_STATE_ESN)) {
-               esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
-               *seqhi = esph->spi;
-               esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
-               aead_request_set_callback(req, 0, esp_output_done_esn, skb);
+       tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
+       if (!tmp) {
+               err = -ENOMEM;
+               goto error;
        }
 
-       esph->spi = x->id.spi;
+       seqhi = esp_tmp_seqhi(tmp);
+       iv = esp_tmp_iv(aead, tmp, seqhilen);
+       req = esp_tmp_req(aead, iv);
+       sg = esp_req_sg(aead, req);
+       dsg = sg;
+
+       esph = esp_output_set_esn(skb, esph, seqhi);
 
        sg_init_table(sg, nfrags);
        skb_to_sgvec(skb, sg,
                     (unsigned char *)esph - skb->data,
                     assoclen + ivlen + clen + alen);
 
-       aead_request_set_crypt(req, sg, sg, ivlen + clen, iv);
+skip_cow2:
+       if ((x->props.flags & XFRM_STATE_ESN))
+               aead_request_set_callback(req, 0, esp_output_done_esn, skb);
+       else
+               aead_request_set_callback(req, 0, esp_output_done, skb);
+
+       aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv);
        aead_request_set_ad(req, assoclen);
 
        seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
@@ -278,6 +439,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
                        esp_output_restore_header(skb);
        }
 
+       if (sg != dsg)
+               esp_ssg_unref(x, tmp);
        kfree(tmp);
 
 error:
@@ -343,6 +506,23 @@ static void esp_input_restore_header(struct sk_buff *skb)
        __skb_pull(skb, 4);
 }
 
+static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
+{
+       struct xfrm_state *x = xfrm_input_state(skb);
+       struct ip_esp_hdr *esph = (struct ip_esp_hdr *)skb->data;
+
+       /* For ESN we move the header forward by 4 bytes to
+        * accomodate the high bits.  We will move it back after
+        * decryption.
+        */
+       if ((x->props.flags & XFRM_STATE_ESN)) {
+               esph = (void *)skb_push(skb, 4);
+               *seqhi = esph->spi;
+               esph->spi = esph->seq_no;
+               esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
+       }
+}
+
 static void esp_input_done_esn(struct crypto_async_request *base, int err)
 {
        struct sk_buff *skb = base->data;
@@ -378,14 +558,6 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
                goto out;
        }
 
-       nfrags = skb_cow_data(skb, 0, &trailer);
-       if (nfrags < 0) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       ret = -ENOMEM;
-
        assoclen = sizeof(*esph);
        seqhilen = 0;
 
@@ -394,6 +566,27 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
                assoclen += seqhilen;
        }
 
+       if (!skb_cloned(skb)) {
+               if (!skb_is_nonlinear(skb)) {
+                       nfrags = 1;
+
+                       goto skip_cow;
+               } else if (!skb_has_frag_list(skb)) {
+                       nfrags = skb_shinfo(skb)->nr_frags;
+                       nfrags++;
+
+                       goto skip_cow;
+               }
+       }
+
+       nfrags = skb_cow_data(skb, 0, &trailer);
+       if (nfrags < 0) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+skip_cow:
+       ret = -ENOMEM;
        tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
        if (!tmp)
                goto out;
@@ -404,26 +597,17 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
        req = esp_tmp_req(aead, iv);
        sg = esp_req_sg(aead, req);
 
-       skb->ip_summed = CHECKSUM_NONE;
+       esp_input_set_header(skb, seqhi);
 
-       esph = (struct ip_esp_hdr *)skb->data;
+       sg_init_table(sg, nfrags);
+       skb_to_sgvec(skb, sg, 0, skb->len);
 
-       aead_request_set_callback(req, 0, esp_input_done, skb);
+       skb->ip_summed = CHECKSUM_NONE;
 
-       /* For ESN we move the header forward by 4 bytes to
-        * accomodate the high bits.  We will move it back after
-        * decryption.
-        */
-       if ((x->props.flags & XFRM_STATE_ESN)) {
-               esph = (void *)skb_push(skb, 4);
-               *seqhi = esph->spi;
-               esph->spi = esph->seq_no;
-               esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
+       if ((x->props.flags & XFRM_STATE_ESN))
                aead_request_set_callback(req, 0, esp_input_done_esn, skb);
-       }
-
-       sg_init_table(sg, nfrags);
-       skb_to_sgvec(skb, sg, 0, skb->len);
+       else
+               aead_request_set_callback(req, 0, esp_input_done, skb);
 
        aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
        aead_request_set_ad(req, assoclen);
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
new file mode 100644 (file)
index 0000000..d914eb9
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * IPV6 GSO/GRO offload support
+ * Linux INET implementation
+ *
+ * Copyright (C) 2016 secunet Security Networks AG
+ * Author: Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * ESP GRO support
+ */
+
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <net/protocol.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <net/ip.h>
+#include <net/xfrm.h>
+#include <net/esp.h>
+#include <linux/scatterlist.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <net/ip6_route.h>
+#include <net/ipv6.h>
+#include <linux/icmpv6.h>
+
+static struct sk_buff **esp6_gro_receive(struct sk_buff **head,
+                                        struct sk_buff *skb)
+{
+       int offset = skb_gro_offset(skb);
+       struct xfrm_offload *xo;
+       struct xfrm_state *x;
+       __be32 seq;
+       __be32 spi;
+       int err;
+
+       skb_pull(skb, offset);
+
+       if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
+               goto out;
+
+       err = secpath_set(skb);
+       if (err)
+               goto out;
+
+       if (skb->sp->len == XFRM_MAX_DEPTH)
+               goto out;
+
+       x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
+                             (xfrm_address_t *)&ipv6_hdr(skb)->daddr,
+                             spi, IPPROTO_ESP, AF_INET6);
+       if (!x)
+               goto out;
+
+       skb->sp->xvec[skb->sp->len++] = x;
+       skb->sp->olen++;
+
+       xo = xfrm_offload(skb);
+       if (!xo) {
+               xfrm_state_put(x);
+               goto out;
+       }
+       xo->flags |= XFRM_GRO;
+
+       XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
+       XFRM_SPI_SKB_CB(skb)->family = AF_INET6;
+       XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
+       XFRM_SPI_SKB_CB(skb)->seq = seq;
+
+       /* We don't need to handle errors from xfrm_input, it does all
+        * the error handling and frees the resources on error. */
+       xfrm_input(skb, IPPROTO_ESP, spi, -2);
+
+       return ERR_PTR(-EINPROGRESS);
+out:
+       skb_push(skb, offset);
+       NAPI_GRO_CB(skb)->same_flow = 0;
+       NAPI_GRO_CB(skb)->flush = 1;
+
+       return NULL;
+}
+
+static const struct net_offload esp6_offload = {
+       .callbacks = {
+               .gro_receive = esp6_gro_receive,
+       },
+};
+
+static int __init esp6_offload_init(void)
+{
+       return inet6_add_offload(&esp6_offload, IPPROTO_ESP);
+}
+
+static void __exit esp6_offload_exit(void)
+{
+       inet6_del_offload(&esp6_offload, IPPROTO_ESP);
+}
+
+module_init(esp6_offload_init);
+module_exit(esp6_offload_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
index 3036f665e6c87f700a7e8fde5518d649413f9940..230b5aac9f03eadb775eea9cb3d9b4cce571cc32 100644 (file)
@@ -110,19 +110,17 @@ static const struct inet6_protocol icmpv6_protocol = {
        .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
 };
 
+/* Called with BH disabled */
 static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
 {
        struct sock *sk;
 
-       local_bh_disable();
-
        sk = icmpv6_sk(net);
        if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
                /* This can happen if the output path (f.e. SIT or
                 * ip6ip6 tunnel) signals dst_link_failure() for an
                 * outgoing ICMP6 packet.
                 */
-               local_bh_enable();
                return NULL;
        }
        return sk;
@@ -130,7 +128,7 @@ static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
 
 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
 {
-       spin_unlock_bh(&sk->sk_lock.slock);
+       spin_unlock(&sk->sk_lock.slock);
 }
 
 /*
@@ -168,6 +166,30 @@ static bool is_ineligible(const struct sk_buff *skb)
        return false;
 }
 
+static bool icmpv6_mask_allow(int type)
+{
+       /* Informational messages are not limited. */
+       if (type & ICMPV6_INFOMSG_MASK)
+               return true;
+
+       /* Do not limit pmtu discovery, it would break it. */
+       if (type == ICMPV6_PKT_TOOBIG)
+               return true;
+
+       return false;
+}
+
+static bool icmpv6_global_allow(int type)
+{
+       if (icmpv6_mask_allow(type))
+               return true;
+
+       if (icmp_global_allow())
+               return true;
+
+       return false;
+}
+
 /*
  * Check the ICMP output rate limit
  */
@@ -178,12 +200,7 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
        struct dst_entry *dst;
        bool res = false;
 
-       /* Informational messages are not limited. */
-       if (type & ICMPV6_INFOMSG_MASK)
-               return true;
-
-       /* Do not limit pmtu discovery, it would break it. */
-       if (type == ICMPV6_PKT_TOOBIG)
+       if (icmpv6_mask_allow(type))
                return true;
 
        /*
@@ -200,20 +217,16 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
        } else {
                struct rt6_info *rt = (struct rt6_info *)dst;
                int tmo = net->ipv6.sysctl.icmpv6_time;
+               struct inet_peer *peer;
 
                /* Give more bandwidth to wider prefixes. */
                if (rt->rt6i_dst.plen < 128)
                        tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
 
-               if (icmp_global_allow()) {
-                       struct inet_peer *peer;
-
-                       peer = inet_getpeer_v6(net->ipv6.peers,
-                                              &fl6->daddr, 1);
-                       res = inet_peer_xrlim_allow(peer, tmo);
-                       if (peer)
-                               inet_putpeer(peer);
-               }
+               peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr, 1);
+               res = inet_peer_xrlim_allow(peer, tmo);
+               if (peer)
+                       inet_putpeer(peer);
        }
        dst_release(dst);
        return res;
@@ -474,6 +487,13 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
                return;
        }
 
+       /* Needed by both icmp_global_allow and icmpv6_xmit_lock */
+       local_bh_disable();
+
+       /* Check global sysctl_icmp_msgs_per_sec ratelimit */
+       if (!icmpv6_global_allow(type))
+               goto out_bh_enable;
+
        mip6_addr_swap(skb);
 
        memset(&fl6, 0, sizeof(fl6));
@@ -492,7 +512,8 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
 
        sk = icmpv6_xmit_lock(net);
        if (!sk)
-               return;
+               goto out_bh_enable;
+
        sk->sk_mark = mark;
        np = inet6_sk(sk);
 
@@ -552,6 +573,8 @@ out_dst_release:
        dst_release(dst);
 out:
        icmpv6_xmit_unlock(sk);
+out_bh_enable:
+       local_bh_enable();
 }
 
 /* Slightly more convenient version of icmp6_send.
@@ -665,9 +688,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        fl6.flowi6_uid = sock_net_uid(net, NULL);
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
+       local_bh_disable();
        sk = icmpv6_xmit_lock(net);
        if (!sk)
-               return;
+               goto out_bh_enable;
        sk->sk_mark = mark;
        np = inet6_sk(sk);
 
@@ -709,6 +733,8 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        dst_release(dst);
 out:
        icmpv6_xmit_unlock(sk);
+out_bh_enable:
+       local_bh_enable();
 }
 
 void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
index 13b5e85fe0d56471ab792b1e75801def3800ee9c..ce1aae4a7fc8fa9daf9f1502d0ac77d2be2aee31 100644 (file)
@@ -115,7 +115,7 @@ static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
        [ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, },
 };
 
-static int ila_build_state(struct net_device *dev, struct nlattr *nla,
+static int ila_build_state(struct nlattr *nla,
                           unsigned int family, const void *cfg,
                           struct lwtunnel_state **ts)
 {
index 75c308239243305a508e62d01814d88da7248018..9a31d13bf180d1e58a4a4b0a65750f377f963c52 100644 (file)
 #include <net/inet6_connection_sock.h>
 #include <net/sock_reuseport.h>
 
-int inet6_csk_bind_conflict(const struct sock *sk,
-                           const struct inet_bind_bucket *tb, bool relax,
-                           bool reuseport_ok)
-{
-       const struct sock *sk2;
-       bool reuse = !!sk->sk_reuse;
-       bool reuseport = !!sk->sk_reuseport && reuseport_ok;
-       kuid_t uid = sock_i_uid((struct sock *)sk);
-
-       /* We must walk the whole port owner list in this case. -DaveM */
-       /*
-        * See comment in inet_csk_bind_conflict about sock lookup
-        * vs net namespaces issues.
-        */
-       sk_for_each_bound(sk2, &tb->owners) {
-               if (sk != sk2 &&
-                   (!sk->sk_bound_dev_if ||
-                    !sk2->sk_bound_dev_if ||
-                    sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
-                       if ((!reuse || !sk2->sk_reuse ||
-                            sk2->sk_state == TCP_LISTEN) &&
-                           (!reuseport || !sk2->sk_reuseport ||
-                            rcu_access_pointer(sk->sk_reuseport_cb) ||
-                            (sk2->sk_state != TCP_TIME_WAIT &&
-                             !uid_eq(uid,
-                                     sock_i_uid((struct sock *)sk2))))) {
-                               if (ipv6_rcv_saddr_equal(sk, sk2, true))
-                                       break;
-                       }
-                       if (!relax && reuse && sk2->sk_reuse &&
-                           sk2->sk_state != TCP_LISTEN &&
-                           ipv6_rcv_saddr_equal(sk, sk2, true))
-                               break;
-               }
-       }
-
-       return sk2 != NULL;
-}
-EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
-
 struct dst_entry *inet6_csk_route_req(const struct sock *sk,
                                      struct flowi6 *fl6,
                                      const struct request_sock *req,
index 02761c9fe43eb306fa1887e577130e5abd2aa2b8..d0900918a19e5e5cec30831b64c764057892162d 100644 (file)
@@ -268,54 +268,10 @@ int inet6_hash(struct sock *sk)
 
        if (sk->sk_state != TCP_CLOSE) {
                local_bh_disable();
-               err = __inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
+               err = __inet_hash(sk, NULL);
                local_bh_enable();
        }
 
        return err;
 }
 EXPORT_SYMBOL_GPL(inet6_hash);
-
-/* match_wildcard == true:  IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6
- *                          only, and any IPv4 addresses if not IPv6 only
- * match_wildcard == false: addresses must be exactly the same, i.e.
- *                          IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
- *                          and 0.0.0.0 equals to 0.0.0.0 only
- */
-int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
-                        bool match_wildcard)
-{
-       const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
-       int sk2_ipv6only = inet_v6_ipv6only(sk2);
-       int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
-       int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
-
-       /* if both are mapped, treat as IPv4 */
-       if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
-               if (!sk2_ipv6only) {
-                       if (sk->sk_rcv_saddr == sk2->sk_rcv_saddr)
-                               return 1;
-                       if (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr)
-                               return match_wildcard;
-               }
-               return 0;
-       }
-
-       if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
-               return 1;
-
-       if (addr_type2 == IPV6_ADDR_ANY && match_wildcard &&
-           !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
-               return 1;
-
-       if (addr_type == IPV6_ADDR_ANY && match_wildcard &&
-           !(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED))
-               return 1;
-
-       if (sk2_rcv_saddr6 &&
-           ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6))
-               return 1;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(ipv6_rcv_saddr_equal);
index ef548520452253dd5a39f6c24388b8fa1ddaa580..e4266746e4a2af67562bb05dd50ace54e55d3edd 100644 (file)
@@ -318,6 +318,16 @@ static int fib6_dump_node(struct fib6_walker *w)
                        w->leaf = rt;
                        return 1;
                }
+
+               /* Multipath routes are dumped in one route with the
+                * RTA_MULTIPATH attribute. Jump 'rt' to point to the
+                * last sibling of this route (no need to dump the
+                * sibling routes again)
+                */
+               if (rt->rt6i_nsiblings)
+                       rt = list_last_entry(&rt->rt6i_siblings,
+                                            struct rt6_info,
+                                            rt6i_siblings);
        }
        w->leaf = NULL;
        return 0;
@@ -746,6 +756,9 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
        u16 nlflags = NLM_F_EXCL;
        int err;
 
+       if (info->nlh && (info->nlh->nlmsg_flags & NLM_F_APPEND))
+               nlflags |= NLM_F_APPEND;
+
        ins = &fn->leaf;
 
        for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) {
@@ -868,7 +881,8 @@ add:
                *ins = rt;
                rt->rt6i_node = fn;
                atomic_inc(&rt->rt6i_ref);
-               inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
+               if (!info->skip_notify)
+                       inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
                info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
 
                if (!(fn->fn_flags & RTN_RTINFO)) {
@@ -894,7 +908,8 @@ add:
                rt->rt6i_node = fn;
                rt->dst.rt6_next = iter->dst.rt6_next;
                atomic_inc(&rt->rt6i_ref);
-               inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
+               if (!info->skip_notify)
+                       inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
                if (!(fn->fn_flags & RTN_RTINFO)) {
                        info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
                        fn->fn_flags |= RTN_RTINFO;
@@ -1439,7 +1454,8 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
 
        fib6_purge_rt(rt, fn, net);
 
-       inet6_rt_notify(RTM_DELROUTE, rt, info, 0);
+       if (!info->skip_notify)
+               inet6_rt_notify(RTM_DELROUTE, rt, info, 0);
        rt6_release(rt);
 }
 
index 630b73be599977599c0021849fc6eb689cfefad7..6fcb7cb49bb20dcfe518177177e3e0ac1c7d1091 100644 (file)
@@ -486,11 +486,6 @@ drop:
        return 0;
 }
 
-struct ipv6_tel_txoption {
-       struct ipv6_txoptions ops;
-       __u8 dst_opt[8];
-};
-
 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
 {
        return iptunnel_handle_offloads(skb,
@@ -1003,6 +998,9 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
        dev->flags |= IFF_NOARP;
        dev->addr_len = sizeof(struct in6_addr);
        netif_keep_dst(dev);
+       /* This perm addr will be used as interface identifier by IPv6 */
+       dev->addr_assign_type = NET_ADDR_RANDOM;
+       eth_random_addr(dev->perm_addr);
 }
 
 static int ip6gre_tunnel_init_common(struct net_device *dev)
index fc7b4017ba241f9dd39d49bd6258ecd4a16e3a3a..0838e6d01d2e4979559cae63a20ca339a3e2c22c 100644 (file)
@@ -253,7 +253,7 @@ out_unlock:
        rcu_read_unlock();
 
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index 7cebee58e55b7f6f23279ac2515a69df936b5712..528b3c1f3fdee4314e1c23007ae76333b4af7505 100644 (file)
@@ -119,7 +119,8 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
        if (unlikely(!neigh))
                neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
        if (!IS_ERR(neigh)) {
-               ret = dst_neigh_output(dst, neigh, skb);
+               sock_confirm_neigh(skb, neigh);
+               ret = neigh_output(neigh, skb);
                rcu_read_unlock_bh();
                return ret;
        }
@@ -1149,6 +1150,9 @@ static inline int ip6_ufo_append_data(struct sock *sk,
                skb->protocol = htons(ETH_P_IPV6);
                skb->csum = 0;
 
+               if (flags & MSG_CONFIRM)
+                       skb_set_dst_pending_confirm(skb, 1);
+
                __skb_queue_tail(queue, skb);
        } else if (skb_is_gso(skb)) {
                goto append;
@@ -1521,6 +1525,9 @@ alloc_new_skb:
                        exthdrlen = 0;
                        dst_exthdrlen = 0;
 
+                       if ((flags & MSG_CONFIRM) && !skb_prev)
+                               skb_set_dst_pending_confirm(skb, 1);
+
                        /*
                         * Put the packet on the pending queue
                         */
index d82042c8d8fd4b38eac12a58eb634438aab726a7..c795fee372c4992cf6c391330a85d7d017a7a3a5 100644 (file)
@@ -49,6 +49,7 @@
 #include <net/xfrm.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
+#include <linux/etherdevice.h>
 
 #define IP6_VTI_HASH_SIZE_SHIFT  5
 #define IP6_VTI_HASH_SIZE (1 << IP6_VTI_HASH_SIZE_SHIFT)
@@ -842,6 +843,9 @@ static void vti6_dev_setup(struct net_device *dev)
        dev->flags |= IFF_NOARP;
        dev->addr_len = sizeof(struct in6_addr);
        netif_keep_dst(dev);
+       /* This perm addr will be used as interface identifier by IPv6 */
+       dev->addr_assign_type = NET_ADDR_RANDOM;
+       eth_random_addr(dev->perm_addr);
 }
 
 /**
index 604d8953c775966872969a9a5d828d1d9d99067a..babaf3ec2742bd9ba4a16ad0ec71211a5ed700e1 100644 (file)
@@ -2243,8 +2243,10 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
        int ct;
 
        /* If cache is unresolved, don't try to parse IIF and OIF */
-       if (c->mf6c_parent >= MAXMIFS)
+       if (c->mf6c_parent >= MAXMIFS) {
+               rtm->rtm_flags |= RTNH_F_UNRESOLVED;
                return -ENOENT;
+       }
 
        if (MIF_EXISTS(mrt, c->mf6c_parent) &&
            nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
@@ -2286,7 +2288,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
 }
 
 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
-                   int nowait, u32 portid)
+                   u32 portid)
 {
        int err;
        struct mr6_table *mrt;
@@ -2313,11 +2315,6 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
                struct net_device *dev;
                int vif;
 
-               if (nowait) {
-                       read_unlock(&mrt_lock);
-                       return -EAGAIN;
-               }
-
                dev = skb->dev;
                if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
                        read_unlock(&mrt_lock);
@@ -2355,7 +2352,7 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
                return err;
        }
 
-       if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
+       if (rtm->rtm_flags & RTM_F_NOTIFY)
                cache->mfc_flags |= MFC_NOTIFY;
 
        err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
index ee97c44e2aa0074df4bf22790aaf759e3a1390b6..a531ba032b85da42d3b5eefcbd9e2e624b5a2868 100644 (file)
@@ -595,16 +595,24 @@ done:
 
                if (val) {
                        struct net_device *dev;
+                       int midx;
 
-                       if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
-                               goto e_inval;
+                       rcu_read_lock();
 
-                       dev = dev_get_by_index(net, val);
+                       dev = dev_get_by_index_rcu(net, val);
                        if (!dev) {
+                               rcu_read_unlock();
                                retv = -ENODEV;
                                break;
                        }
-                       dev_put(dev);
+                       midx = l3mdev_master_ifindex_rcu(dev);
+
+                       rcu_read_unlock();
+
+                       if (sk->sk_bound_dev_if &&
+                           sk->sk_bound_dev_if != val &&
+                           (!midx || midx != sk->sk_bound_dev_if))
+                               goto e_inval;
                }
                np->mcast_oif = val;
                retv = 0;
index 25a022d41a7035f5350339b17c3cf68517496f85..1e15c54fd5e27dbafaf4d4b0b3de9008a724559f 100644 (file)
@@ -855,10 +855,6 @@ copy_entries_to_user(unsigned int total_size,
                return PTR_ERR(counters);
 
        loc_cpu_entry = private->entries;
-       if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
-               ret = -EFAULT;
-               goto free_counters;
-       }
 
        /* FIXME: use iterator macros --RR */
        /* ... then go back and fix counters and names */
@@ -868,6 +864,10 @@ copy_entries_to_user(unsigned int total_size,
                const struct xt_entry_target *t;
 
                e = (struct ip6t_entry *)(loc_cpu_entry + off);
+               if (copy_to_user(userptr + off, e, sizeof(*e))) {
+                       ret = -EFAULT;
+                       goto free_counters;
+               }
                if (copy_to_user(userptr + off
                                 + offsetof(struct ip6t_entry, counters),
                                 &counters[num],
@@ -881,23 +881,14 @@ copy_entries_to_user(unsigned int total_size,
                     i += m->u.match_size) {
                        m = (void *)e + i;
 
-                       if (copy_to_user(userptr + off + i
-                                        + offsetof(struct xt_entry_match,
-                                                   u.user.name),
-                                        m->u.kernel.match->name,
-                                        strlen(m->u.kernel.match->name)+1)
-                           != 0) {
+                       if (xt_match_to_user(m, userptr + off + i)) {
                                ret = -EFAULT;
                                goto free_counters;
                        }
                }
 
                t = ip6t_get_target_c(e);
-               if (copy_to_user(userptr + off + e->target_offset
-                                + offsetof(struct xt_entry_target,
-                                           u.user.name),
-                                t->u.kernel.target->name,
-                                strlen(t->u.kernel.target->name)+1) != 0) {
+               if (xt_target_to_user(t, userptr + off + e->target_offset)) {
                        ret = -EFAULT;
                        goto free_counters;
                }
index 590f767db5d4df887c201597ee22f395d8a6869b..a379d2f79b19f00b1053dc4bc21bbd1638546d4f 100644 (file)
@@ -112,6 +112,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
                .table          = "mangle",
                .target         = ip6t_snpt_tg,
                .targetsize     = sizeof(struct ip6t_npt_tginfo),
+               .usersize       = offsetof(struct ip6t_npt_tginfo, adjustment),
                .checkentry     = ip6t_npt_checkentry,
                .family         = NFPROTO_IPV6,
                .hooks          = (1 << NF_INET_LOCAL_IN) |
@@ -123,6 +124,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
                .table          = "mangle",
                .target         = ip6t_dnpt_tg,
                .targetsize     = sizeof(struct ip6t_npt_tginfo),
+               .usersize       = offsetof(struct ip6t_npt_tginfo, adjustment),
                .checkentry     = ip6t_npt_checkentry,
                .family         = NFPROTO_IPV6,
                .hooks          = (1 << NF_INET_PRE_ROUTING) |
index 98c8dd38575a35c0c3d243378cd516f23ca8d62d..4ef1ddd4bbbd813ff8e6ed46275ecdf48d5ff9a8 100644 (file)
@@ -71,8 +71,7 @@ synproxy_send_tcp(struct net *net,
        skb_dst_set(nskb, dst);
 
        if (nfct) {
-               nskb->nfct = nfct;
-               nskb->nfctinfo = ctinfo;
+               nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo);
                nf_conntrack_get(nfct);
        }
 
@@ -121,8 +120,8 @@ synproxy_send_client_synack(struct net *net,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
-                         niph, nth, tcp_hdr_size);
+       synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
+                         IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
 }
 
 static void
@@ -244,8 +243,8 @@ synproxy_send_client_ack(struct net *net,
 
        synproxy_build_options(nth, opts);
 
-       synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
-                         niph, nth, tcp_hdr_size);
+       synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
+                         IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
 }
 
 static bool
index f5a61bc3ec2b3b5bfd9ce75c3646a37598b9b71f..d2c2ccbfbe728f8ff2d7bc42c5e7feb2ae4ad97b 100644 (file)
@@ -145,15 +145,15 @@ static int
 icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
                     struct sk_buff *skb,
                     unsigned int icmp6off,
-                    enum ip_conntrack_info *ctinfo,
                     unsigned int hooknum)
 {
        struct nf_conntrack_tuple intuple, origtuple;
        const struct nf_conntrack_tuple_hash *h;
        const struct nf_conntrack_l4proto *inproto;
+       enum ip_conntrack_info ctinfo;
        struct nf_conntrack_zone tmp;
 
-       NF_CT_ASSERT(skb->nfct == NULL);
+       NF_CT_ASSERT(!skb_nfct(skb));
 
        /* Are they talking about one of our connections? */
        if (!nf_ct_get_tuplepr(skb,
@@ -176,7 +176,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
                return -NF_ACCEPT;
        }
 
-       *ctinfo = IP_CT_RELATED;
+       ctinfo = IP_CT_RELATED;
 
        h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp),
                                  &intuple);
@@ -185,19 +185,18 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
                return -NF_ACCEPT;
        } else {
                if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
-                       *ctinfo += IP_CT_IS_REPLY;
+                       ctinfo += IP_CT_IS_REPLY;
        }
 
        /* Update skb to refer to this connection */
-       skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
-       skb->nfctinfo = *ctinfo;
+       nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo);
        return NF_ACCEPT;
 }
 
 static int
 icmpv6_error(struct net *net, struct nf_conn *tmpl,
             struct sk_buff *skb, unsigned int dataoff,
-            enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
+            u8 pf, unsigned int hooknum)
 {
        const struct icmp6hdr *icmp6h;
        struct icmp6hdr _ih;
@@ -222,9 +221,8 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
        type = icmp6h->icmp6_type - 130;
        if (type >= 0 && type < sizeof(noct_valid_new) &&
            noct_valid_new[type]) {
-               skb->nfct = &nf_ct_untracked_get()->ct_general;
-               skb->nfctinfo = IP_CT_NEW;
-               nf_conntrack_get(skb->nfct);
+               nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW);
+               nf_conntrack_get(skb_nfct(skb));
                return NF_ACCEPT;
        }
 
@@ -232,7 +230,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
        if (icmp6h->icmp6_type >= 128)
                return NF_ACCEPT;
 
-       return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum);
+       return icmpv6_error_message(net, tmpl, skb, dataoff, hooknum);
 }
 
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
index 8e0bdd0587871f03aa949b0eabc4d6ec9740b0cc..ada60d1a991b7c2e421884f0a8c4361d6f81f10a 100644 (file)
@@ -37,7 +37,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
 {
        u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-       if (skb->nfct) {
+       if (skb_nfct(skb)) {
                enum ip_conntrack_info ctinfo;
                const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
@@ -61,7 +61,7 @@ static unsigned int ipv6_defrag(void *priv,
 
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
        /* Previously seen (loopback)?  */
-       if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
+       if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb)))
                return NF_ACCEPT;
 #endif
 
index 4a84b5ad9ecbb74b29ef509d00fbda60868833cd..888ecd106e5f37ba6c9e93bf1d1f161ddbaa280c 100644 (file)
@@ -57,10 +57,9 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
                return;
 
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-       nf_conntrack_put(skb->nfct);
-       skb->nfct     = &nf_ct_untracked_get()->ct_general;
-       skb->nfctinfo = IP_CT_NEW;
-       nf_conntrack_get(skb->nfct);
+       nf_reset(skb);
+       nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW);
+       nf_conntrack_get(skb_nfct(skb));
 #endif
        if (hooknum == NF_INET_PRE_ROUTING ||
            hooknum == NF_INET_LOCAL_IN) {
index 57d86066a13bc567d0526da14b13173657acac9c..055c51b80f5dd1fa37873a665bbe3810d3f17c3e 100644 (file)
@@ -351,7 +351,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
        struct nf_log_buf *m;
 
        /* FIXME: Disabled from containers until syslog ns is supported */
-       if (!net_eq(net, &init_net))
+       if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns)
                return;
 
        m = nf_log_buf_open();
index e1f8b34d7a2ef8fb232826747d2ffe6652c1cb06..9b522fa90e6d8f4a87ebed7cf574a36ceea89c61 100644 (file)
@@ -126,12 +126,6 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                return PTR_ERR(dst);
        rt = (struct rt6_info *) dst;
 
-       np = inet6_sk(sk);
-       if (!np) {
-               err = -EBADF;
-               goto dst_err_out;
-       }
-
        if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
                fl6.flowi6_oif = np->mcast_oif;
        else if (!fl6.flowi6_oif)
@@ -166,7 +160,6 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        }
        release_sock(sk);
 
-dst_err_out:
        dst_release(dst);
 
        if (err)
index ea89073c824747f185beb7da0f4aab6b74832149..f174e76e6505d4045e940c9fceef765d2aaa937d 100644 (file)
@@ -654,6 +654,9 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
 
        skb->ip_summed = CHECKSUM_NONE;
 
+       if (flags & MSG_CONFIRM)
+               skb_set_dst_pending_confirm(skb, 1);
+
        skb->transport_header = skb->network_header;
        err = memcpy_from_msg(iph, msg, length);
        if (err)
@@ -934,7 +937,8 @@ out:
        txopt_put(opt_to_free);
        return err < 0 ? err : len;
 do_confirm:
-       dst_confirm(dst);
+       if (msg->msg_flags & MSG_PROBE)
+               dst_confirm_neigh(dst, &fl6.daddr);
        if (!(msg->msg_flags & MSG_PROBE) || len)
                goto back_from_confirm;
        err = 0;
index 7ea85370c11c81e8743e6a3086a32a0011f35d17..f54f4265b37f291ea10c8f67a45a243d2095074c 100644 (file)
@@ -98,6 +98,12 @@ static void          rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
                                        struct sk_buff *skb);
 static void            rt6_dst_from_metrics_check(struct rt6_info *rt);
 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
+static size_t rt6_nlmsg_size(struct rt6_info *rt);
+static int rt6_fill_node(struct net *net,
+                        struct sk_buff *skb, struct rt6_info *rt,
+                        struct in6_addr *dst, struct in6_addr *src,
+                        int iif, int type, u32 portid, u32 seq,
+                        unsigned int flags);
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
 static struct rt6_info *rt6_add_route_info(struct net *net,
@@ -217,6 +223,21 @@ static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
        return neigh_create(&nd_tbl, daddr, dst->dev);
 }
 
+static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
+{
+       struct net_device *dev = dst->dev;
+       struct rt6_info *rt = (struct rt6_info *)dst;
+
+       daddr = choose_neigh_daddr(rt, NULL, daddr);
+       if (!daddr)
+               return;
+       if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
+               return;
+       if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
+               return;
+       __ipv6_confirm_neigh(dev, daddr);
+}
+
 static struct dst_ops ip6_dst_ops_template = {
        .family                 =       AF_INET6,
        .gc                     =       ip6_dst_gc,
@@ -233,6 +254,7 @@ static struct dst_ops ip6_dst_ops_template = {
        .redirect               =       rt6_do_redirect,
        .local_out              =       __ip6_local_out,
        .neigh_lookup           =       ip6_neigh_lookup,
+       .confirm_neigh          =       ip6_confirm_neigh,
 };
 
 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
@@ -1359,6 +1381,7 @@ static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
                                 const struct ipv6hdr *iph, u32 mtu)
 {
+       const struct in6_addr *daddr, *saddr;
        struct rt6_info *rt6 = (struct rt6_info *)dst;
 
        if (rt6->rt6i_flags & RTF_LOCAL)
@@ -1367,26 +1390,26 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
        if (dst_metric_locked(dst, RTAX_MTU))
                return;
 
-       dst_confirm(dst);
+       if (iph) {
+               daddr = &iph->daddr;
+               saddr = &iph->saddr;
+       } else if (sk) {
+               daddr = &sk->sk_v6_daddr;
+               saddr = &inet6_sk(sk)->saddr;
+       } else {
+               daddr = NULL;
+               saddr = NULL;
+       }
+       dst_confirm_neigh(dst, daddr);
        mtu = max_t(u32, mtu, IPV6_MIN_MTU);
        if (mtu >= dst_mtu(dst))
                return;
 
        if (!rt6_cache_allowed_for_pmtu(rt6)) {
                rt6_do_update_pmtu(rt6, mtu);
-       } else {
-               const struct in6_addr *daddr, *saddr;
+       } else if (daddr) {
                struct rt6_info *nrt6;
 
-               if (iph) {
-                       daddr = &iph->daddr;
-                       saddr = &iph->saddr;
-               } else if (sk) {
-                       daddr = &sk->sk_v6_daddr;
-                       saddr = &inet6_sk(sk)->saddr;
-               } else {
-                       return;
-               }
                nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
                if (nrt6) {
                        rt6_do_update_pmtu(nrt6, mtu);
@@ -1897,7 +1920,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
        if (cfg->fc_encap) {
                struct lwtunnel_state *lwtstate;
 
-               err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+               err = lwtunnel_build_state(cfg->fc_encap_type,
                                           cfg->fc_encap, AF_INET6, cfg,
                                           &lwtstate);
                if (err)
@@ -2143,6 +2166,54 @@ int ip6_del_rt(struct rt6_info *rt)
        return __ip6_del_rt(rt, &info);
 }
 
+static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
+{
+       struct nl_info *info = &cfg->fc_nlinfo;
+       struct sk_buff *skb = NULL;
+       struct fib6_table *table;
+       int err;
+
+       table = rt->rt6i_table;
+       write_lock_bh(&table->tb6_lock);
+
+       if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) {
+               struct rt6_info *sibling, *next_sibling;
+
+               /* prefer to send a single notification with all hops */
+               skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
+               if (skb) {
+                       u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
+
+                       if (rt6_fill_node(info->nl_net, skb, rt,
+                                         NULL, NULL, 0, RTM_DELROUTE,
+                                         info->portid, seq, 0) < 0) {
+                               kfree_skb(skb);
+                               skb = NULL;
+                       } else
+                               info->skip_notify = 1;
+               }
+
+               list_for_each_entry_safe(sibling, next_sibling,
+                                        &rt->rt6i_siblings,
+                                        rt6i_siblings) {
+                       err = fib6_del(sibling, info);
+                       if (err)
+                               goto out;
+               }
+       }
+
+       err = fib6_del(rt, info);
+out:
+       write_unlock_bh(&table->tb6_lock);
+       ip6_rt_put(rt);
+
+       if (skb) {
+               rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV6_ROUTE,
+                           info->nlh, gfp_any());
+       }
+       return err;
+}
+
 static int ip6_route_del(struct fib6_config *cfg)
 {
        struct fib6_table *table;
@@ -2179,7 +2250,11 @@ static int ip6_route_del(struct fib6_config *cfg)
                        dst_hold(&rt->dst);
                        read_unlock_bh(&table->tb6_lock);
 
-                       return __ip6_del_rt(rt, &cfg->fc_nlinfo);
+                       /* if gateway was specified only delete the one hop */
+                       if (cfg->fc_flags & RTF_GATEWAY)
+                               return __ip6_del_rt(rt, &cfg->fc_nlinfo);
+
+                       return __ip6_del_rt_siblings(rt, cfg);
                }
        }
        read_unlock_bh(&table->tb6_lock);
@@ -2258,7 +2333,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
         * Look, redirects are sent only in response to data packets,
         * so that this nexthop apparently is reachable. --ANK
         */
-       dst_confirm(&rt->dst);
+       dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
 
        neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
        if (!neigh)
@@ -2634,6 +2709,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
        rt->dst.output = ip6_output;
        rt->rt6i_idev = idev;
 
+       rt->rt6i_protocol = RTPROT_KERNEL;
        rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
        if (anycast)
                rt->rt6i_flags |= RTF_ANYCAST;
@@ -2711,13 +2787,16 @@ struct arg_dev_net {
        struct net *net;
 };
 
+/* called with write lock held for table with rt */
 static int fib6_ifdown(struct rt6_info *rt, void *arg)
 {
        const struct arg_dev_net *adn = arg;
        const struct net_device *dev = adn->dev;
 
        if ((rt->dst.dev == dev || !dev) &&
-           rt != adn->net->ipv6.ip6_null_entry)
+           rt != adn->net->ipv6.ip6_null_entry &&
+           (rt->rt6i_nsiblings == 0 ||
+            !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
                return -1;
 
        return 0;
@@ -2948,7 +3027,7 @@ static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
        struct rt6_nh *nh;
 
        list_for_each_entry(nh, rt6_nh_list, next) {
-               pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6 nexthop %pI6 ifi %d\n",
+               pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
                        &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
                        nh->r_cfg.fc_ifindex);
        }
@@ -2987,13 +3066,37 @@ static int ip6_route_info_append(struct list_head *rt6_nh_list,
        return 0;
 }
 
+static void ip6_route_mpath_notify(struct rt6_info *rt,
+                                  struct rt6_info *rt_last,
+                                  struct nl_info *info,
+                                  __u16 nlflags)
+{
+       /* if this is an APPEND route, then rt points to the first route
+        * inserted and rt_last points to last route inserted. Userspace
+        * wants a consistent dump of the route which starts at the first
+        * nexthop. Since sibling routes are always added at the end of
+        * the list, find the first sibling of the last route appended
+        */
+       if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) {
+               rt = list_first_entry(&rt_last->rt6i_siblings,
+                                     struct rt6_info,
+                                     rt6i_siblings);
+       }
+
+       if (rt)
+               inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
+}
+
 static int ip6_route_multipath_add(struct fib6_config *cfg)
 {
+       struct rt6_info *rt_notif = NULL, *rt_last = NULL;
+       struct nl_info *info = &cfg->fc_nlinfo;
        struct fib6_config r_cfg;
        struct rtnexthop *rtnh;
        struct rt6_info *rt;
        struct rt6_nh *err_nh;
        struct rt6_nh *nh, *nh_safe;
+       __u16 nlflags;
        int remaining;
        int attrlen;
        int err = 1;
@@ -3002,6 +3105,10 @@ static int ip6_route_multipath_add(struct fib6_config *cfg)
                       (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
        LIST_HEAD(rt6_nh_list);
 
+       nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
+       if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
+               nlflags |= NLM_F_APPEND;
+
        remaining = cfg->fc_mp_len;
        rtnh = (struct rtnexthop *)cfg->fc_mp;
 
@@ -3044,9 +3151,20 @@ static int ip6_route_multipath_add(struct fib6_config *cfg)
                rtnh = rtnh_next(rtnh, &remaining);
        }
 
+       /* for add and replace send one notification with all nexthops.
+        * Skip the notification in fib6_add_rt2node and send one with
+        * the full route when done
+        */
+       info->skip_notify = 1;
+
        err_nh = NULL;
        list_for_each_entry(nh, &rt6_nh_list, next) {
-               err = __ip6_ins_rt(nh->rt6_info, &cfg->fc_nlinfo, &nh->mxc);
+               rt_last = nh->rt6_info;
+               err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc);
+               /* save reference to first route for notification */
+               if (!rt_notif && !err)
+                       rt_notif = nh->rt6_info;
+
                /* nh->rt6_info is used or freed at this point, reset to NULL*/
                nh->rt6_info = NULL;
                if (err) {
@@ -3068,9 +3186,18 @@ static int ip6_route_multipath_add(struct fib6_config *cfg)
                nhn++;
        }
 
+       /* success ... tell user about new route */
+       ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
        goto cleanup;
 
 add_errout:
+       /* send notification for routes that were added so that
+        * the delete notifications sent by ip6_route_del are
+        * coherent
+        */
+       if (rt_notif)
+               ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
+
        /* Delete routes that were already added */
        list_for_each_entry(nh, &rt6_nh_list, next) {
                if (err_nh == nh)
@@ -3138,8 +3265,10 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
 
        if (cfg.fc_mp)
                return ip6_route_multipath_del(&cfg);
-       else
+       else {
+               cfg.fc_delete_all_nh = 1;
                return ip6_route_del(&cfg);
+       }
 }
 
 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -3157,8 +3286,20 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
                return ip6_route_add(&cfg);
 }
 
-static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
+static size_t rt6_nlmsg_size(struct rt6_info *rt)
 {
+       int nexthop_len = 0;
+
+       if (rt->rt6i_nsiblings) {
+               nexthop_len = nla_total_size(0)  /* RTA_MULTIPATH */
+                           + NLA_ALIGN(sizeof(struct rtnexthop))
+                           + nla_total_size(16) /* RTA_GATEWAY */
+                           + nla_total_size(4)  /* RTA_OIF */
+                           + lwtunnel_get_encap_size(rt->dst.lwtstate);
+
+               nexthop_len *= rt->rt6i_nsiblings;
+       }
+
        return NLMSG_ALIGN(sizeof(struct rtmsg))
               + nla_total_size(16) /* RTA_SRC */
               + nla_total_size(16) /* RTA_DST */
@@ -3172,14 +3313,69 @@ static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
               + nla_total_size(sizeof(struct rta_cacheinfo))
               + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
               + nla_total_size(1) /* RTA_PREF */
-              + lwtunnel_get_encap_size(rt->dst.lwtstate);
+              + lwtunnel_get_encap_size(rt->dst.lwtstate)
+              + nexthop_len;
+}
+
+static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
+                           unsigned int *flags)
+{
+       if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
+               *flags |= RTNH_F_LINKDOWN;
+               if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
+                       *flags |= RTNH_F_DEAD;
+       }
+
+       if (rt->rt6i_flags & RTF_GATEWAY) {
+               if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
+                       goto nla_put_failure;
+       }
+
+       if (rt->dst.dev &&
+           nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
+               goto nla_put_failure;
+
+       if (rt->dst.lwtstate &&
+           lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
+               goto nla_put_failure;
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
+}
+
+static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
+{
+       struct rtnexthop *rtnh;
+       unsigned int flags = 0;
+
+       rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
+       if (!rtnh)
+               goto nla_put_failure;
+
+       rtnh->rtnh_hops = 0;
+       rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
+
+       if (rt6_nexthop_info(skb, rt, &flags) < 0)
+               goto nla_put_failure;
+
+       rtnh->rtnh_flags = flags;
+
+       /* length of rtnetlink header + attributes */
+       rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
+
+       return 0;
+
+nla_put_failure:
+       return -EMSGSIZE;
 }
 
 static int rt6_fill_node(struct net *net,
                         struct sk_buff *skb, struct rt6_info *rt,
                         struct in6_addr *dst, struct in6_addr *src,
                         int iif, int type, u32 portid, u32 seq,
-                        int prefix, int nowait, unsigned int flags)
+                        unsigned int flags)
 {
        u32 metrics[RTAX_MAX];
        struct rtmsg *rtm;
@@ -3187,13 +3383,6 @@ static int rt6_fill_node(struct net *net,
        long expires;
        u32 table;
 
-       if (prefix) {   /* user wants prefix routes only */
-               if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
-                       /* success since this is not a prefix route */
-                       return 1;
-               }
-       }
-
        nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
        if (!nlh)
                return -EMSGSIZE;
@@ -3233,11 +3422,6 @@ static int rt6_fill_node(struct net *net,
        else
                rtm->rtm_type = RTN_UNICAST;
        rtm->rtm_flags = 0;
-       if (!netif_carrier_ok(rt->dst.dev)) {
-               rtm->rtm_flags |= RTNH_F_LINKDOWN;
-               if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
-                       rtm->rtm_flags |= RTNH_F_DEAD;
-       }
        rtm->rtm_scope = RT_SCOPE_UNIVERSE;
        rtm->rtm_protocol = rt->rt6i_protocol;
        if (rt->rt6i_flags & RTF_DYNAMIC)
@@ -3271,19 +3455,12 @@ static int rt6_fill_node(struct net *net,
        if (iif) {
 #ifdef CONFIG_IPV6_MROUTE
                if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
-                       int err = ip6mr_get_route(net, skb, rtm, nowait,
-                                                 portid);
-
-                       if (err <= 0) {
-                               if (!nowait) {
-                                       if (err == 0)
-                                               return 0;
-                                       goto nla_put_failure;
-                               } else {
-                                       if (err == -EMSGSIZE)
-                                               goto nla_put_failure;
-                               }
-                       }
+                       int err = ip6mr_get_route(net, skb, rtm, portid);
+
+                       if (err == 0)
+                               return 0;
+                       if (err < 0)
+                               goto nla_put_failure;
                } else
 #endif
                        if (nla_put_u32(skb, RTA_IIF, iif))
@@ -3308,17 +3485,35 @@ static int rt6_fill_node(struct net *net,
        if (rtnetlink_put_metrics(skb, metrics) < 0)
                goto nla_put_failure;
 
-       if (rt->rt6i_flags & RTF_GATEWAY) {
-               if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
-                       goto nla_put_failure;
-       }
-
-       if (rt->dst.dev &&
-           nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
-               goto nla_put_failure;
        if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
                goto nla_put_failure;
 
+       /* For multipath routes, walk the siblings list and add
+        * each as a nexthop within RTA_MULTIPATH.
+        */
+       if (rt->rt6i_nsiblings) {
+               struct rt6_info *sibling, *next_sibling;
+               struct nlattr *mp;
+
+               mp = nla_nest_start(skb, RTA_MULTIPATH);
+               if (!mp)
+                       goto nla_put_failure;
+
+               if (rt6_add_nexthop(skb, rt) < 0)
+                       goto nla_put_failure;
+
+               list_for_each_entry_safe(sibling, next_sibling,
+                                        &rt->rt6i_siblings, rt6i_siblings) {
+                       if (rt6_add_nexthop(skb, sibling) < 0)
+                               goto nla_put_failure;
+               }
+
+               nla_nest_end(skb, mp);
+       } else {
+               if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags) < 0)
+                       goto nla_put_failure;
+       }
+
        expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
 
        if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
@@ -3327,8 +3522,6 @@ static int rt6_fill_node(struct net *net,
        if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
                goto nla_put_failure;
 
-       if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
-               goto nla_put_failure;
 
        nlmsg_end(skb, nlh);
        return 0;
@@ -3341,18 +3534,26 @@ nla_put_failure:
 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
 {
        struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
-       int prefix;
+       struct net *net = arg->net;
+
+       if (rt == net->ipv6.ip6_null_entry)
+               return 0;
 
        if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
                struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
-               prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
-       } else
-               prefix = 0;
 
-       return rt6_fill_node(arg->net,
+               /* user wants prefix routes only */
+               if (rtm->rtm_flags & RTM_F_PREFIX &&
+                   !(rt->rt6i_flags & RTF_PREFIX_RT)) {
+                       /* success since this is not a prefix route */
+                       return 1;
+               }
+       }
+
+       return rt6_fill_node(net,
                     arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
                     NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
-                    prefix, 0, NLM_F_MULTI);
+                    NLM_F_MULTI);
 }
 
 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
@@ -3433,17 +3634,11 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
                goto errout;
        }
 
-       /* Reserve room for dummy headers, this skb can pass
-          through good chunk of routing engine.
-        */
-       skb_reset_mac_header(skb);
-       skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
-
        skb_dst_set(skb, &rt->dst);
 
        err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
                            RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
-                           nlh->nlmsg_seq, 0, 0, 0);
+                           nlh->nlmsg_seq, 0);
        if (err < 0) {
                kfree_skb(skb);
                goto errout;
@@ -3470,7 +3665,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
                goto errout;
 
        err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
-                               event, info->portid, seq, 0, 0, nlm_flags);
+                               event, info->portid, seq, nlm_flags);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
index 6ef3dfb6e811642f1fc9b680e0b255a9399bb024..f950cb53d5e3c9b460bff5f72f108af4f1b2d29a 100644 (file)
@@ -45,7 +45,7 @@
 #include <net/seg6_hmac.h>
 #include <linux/random.h>
 
-static char * __percpu *hmac_ring;
+static DEFINE_PER_CPU(char [SEG6_HMAC_RING_SIZE], hmac_ring);
 
 static int seg6_hmac_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
 {
@@ -192,7 +192,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
         */
 
        local_bh_disable();
-       ring = *this_cpu_ptr(hmac_ring);
+       ring = this_cpu_ptr(hmac_ring);
        off = ring;
 
        /* source address */
@@ -353,27 +353,6 @@ out:
 }
 EXPORT_SYMBOL(seg6_push_hmac);
 
-static int seg6_hmac_init_ring(void)
-{
-       int i;
-
-       hmac_ring = alloc_percpu(char *);
-
-       if (!hmac_ring)
-               return -ENOMEM;
-
-       for_each_possible_cpu(i) {
-               char *ring = kzalloc(SEG6_HMAC_RING_SIZE, GFP_KERNEL);
-
-               if (!ring)
-                       return -ENOMEM;
-
-               *per_cpu_ptr(hmac_ring, i) = ring;
-       }
-
-       return 0;
-}
-
 static int seg6_hmac_init_algo(void)
 {
        struct seg6_hmac_algo *algo;
@@ -410,7 +389,8 @@ static int seg6_hmac_init_algo(void)
                        return -ENOMEM;
 
                for_each_possible_cpu(cpu) {
-                       shash = kzalloc(shsize, GFP_KERNEL);
+                       shash = kzalloc_node(shsize, GFP_KERNEL,
+                                            cpu_to_node(cpu));
                        if (!shash)
                                return -ENOMEM;
                        *per_cpu_ptr(algo->shashs, cpu) = shash;
@@ -422,16 +402,7 @@ static int seg6_hmac_init_algo(void)
 
 int __init seg6_hmac_init(void)
 {
-       int ret;
-
-       ret = seg6_hmac_init_ring();
-       if (ret < 0)
-               goto out;
-
-       ret = seg6_hmac_init_algo();
-
-out:
-       return ret;
+       return seg6_hmac_init_algo();
 }
 EXPORT_SYMBOL(seg6_hmac_init);
 
@@ -450,13 +421,6 @@ void seg6_hmac_exit(void)
        struct seg6_hmac_algo *algo = NULL;
        int i, alg_count, cpu;
 
-       for_each_possible_cpu(i) {
-               char *ring = *per_cpu_ptr(hmac_ring, i);
-
-               kfree(ring);
-       }
-       free_percpu(hmac_ring);
-
        alg_count = sizeof(hmac_algos) / sizeof(struct seg6_hmac_algo);
        for (i = 0; i < alg_count; i++) {
                algo = &hmac_algos[i];
index c46f8cbf5ab5aa4031d4080d70079e99859d4eb4..85582257d3af88146d435ef6c2e98f0bbef94a41 100644 (file)
@@ -55,8 +55,8 @@ static const struct nla_policy seg6_iptunnel_policy[SEG6_IPTUNNEL_MAX + 1] = {
        [SEG6_IPTUNNEL_SRH]     = { .type = NLA_BINARY },
 };
 
-int nla_put_srh(struct sk_buff *skb, int attrtype,
-               struct seg6_iptunnel_encap *tuninfo)
+static int nla_put_srh(struct sk_buff *skb, int attrtype,
+                      struct seg6_iptunnel_encap *tuninfo)
 {
        struct seg6_iptunnel_encap *data;
        struct nlattr *nla;
@@ -235,7 +235,7 @@ static int seg6_do_srh(struct sk_buff *skb)
        return 0;
 }
 
-int seg6_input(struct sk_buff *skb)
+static int seg6_input(struct sk_buff *skb)
 {
        int err;
 
@@ -251,7 +251,7 @@ int seg6_input(struct sk_buff *skb)
        return dst_input(skb);
 }
 
-int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
        struct dst_entry *orig_dst = skb_dst(skb);
        struct dst_entry *dst = NULL;
@@ -303,7 +303,7 @@ drop:
        return err;
 }
 
-static int seg6_build_state(struct net_device *dev, struct nlattr *nla,
+static int seg6_build_state(struct nlattr *nla,
                            unsigned int family, const void *cfg,
                            struct lwtunnel_state **ts)
 {
index a4d49760bf434e0800fb92cf10cdd6e6ce22f5e5..895ff650db43017ef39344679771d94ad6eaaf00 100644 (file)
@@ -16,7 +16,7 @@
 
 #include <linux/tcp.h>
 #include <linux/random.h>
-#include <linux/cryptohash.h>
+#include <linux/siphash.h>
 #include <linux/kernel.h>
 #include <net/ipv6.h>
 #include <net/tcp.h>
@@ -24,7 +24,7 @@
 #define COOKIEBITS 24  /* Upper bits store count */
 #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
 
-static u32 syncookie6_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly;
+static siphash_key_t syncookie6_secret[2] __read_mostly;
 
 /* RFC 2460, Section 8.3:
  * [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..]
@@ -41,30 +41,27 @@ static __u16 const msstab[] = {
        9000 - 60,
 };
 
-static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], ipv6_cookie_scratch);
-
-static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *daddr,
+static u32 cookie_hash(const struct in6_addr *saddr,
+                      const struct in6_addr *daddr,
                       __be16 sport, __be16 dport, u32 count, int c)
 {
-       __u32 *tmp;
+       const struct {
+               struct in6_addr saddr;
+               struct in6_addr daddr;
+               u32 count;
+               __be16 sport;
+               __be16 dport;
+       } __aligned(SIPHASH_ALIGNMENT) combined = {
+               .saddr = *saddr,
+               .daddr = *daddr,
+               .count = count,
+               .sport = sport,
+               .dport = dport
+       };
 
        net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret));
-
-       tmp  = this_cpu_ptr(ipv6_cookie_scratch);
-
-       /*
-        * we have 320 bits of information to hash, copy in the remaining
-        * 192 bits required for sha_transform, from the syncookie6_secret
-        * and overwrite the digest with the secret
-        */
-       memcpy(tmp + 10, syncookie6_secret[c], 44);
-       memcpy(tmp, saddr, 16);
-       memcpy(tmp + 4, daddr, 16);
-       tmp[8] = ((__force u32)sport << 16) + (__force u32)dport;
-       tmp[9] = count;
-       sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5);
-
-       return tmp[17];
+       return siphash(&combined, offsetofend(typeof(combined), dport),
+                      &syncookie6_secret[c]);
 }
 
 static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
index 4c60c6f71cd30bf18f270c3d994f193ad13045ae..21c719965b6be69bb04d79f240ca8c638643b592 100644 (file)
@@ -123,6 +123,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        struct dst_entry *dst;
        int addr_type;
        int err;
+       struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
 
        if (addr_len < SIN6_LEN_RFC2133)
                return -EINVAL;
@@ -263,7 +264,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        sk->sk_gso_type = SKB_GSO_TCPV6;
        ip6_dst_store(sk, dst, NULL, NULL);
 
-       if (tcp_death_row.sysctl_tw_recycle &&
+       if (tcp_death_row->sysctl_tw_recycle &&
            !tp->rx_opt.ts_recent_stamp &&
            ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
                tcp_fetch_timewait_stamp(sk, dst);
@@ -278,7 +279,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        inet->inet_dport = usin->sin6_port;
 
        tcp_set_state(sk, TCP_SYN_SENT);
-       err = inet6_hash_connect(&tcp_death_row, sk);
+       err = inet6_hash_connect(tcp_death_row, sk);
        if (err)
                goto late_failure;
 
@@ -291,6 +292,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
                                                             inet->inet_dport,
                                                             &tp->tsoffset);
 
+       if (tcp_fastopen_defer_connect(sk, &err))
+               return err;
+       if (err)
+               goto late_failure;
+
        err = tcp_connect(sk);
        if (err)
                goto late_failure;
@@ -299,7 +305,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
 late_failure:
        tcp_set_state(sk, TCP_CLOSE);
-       __sk_dst_reset(sk);
 failure:
        inet->inet_dport = 0;
        sk->sk_route_caps = 0;
@@ -1157,10 +1162,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
        tcp_ca_openreq_child(newsk, dst);
 
        tcp_sync_mss(newsk, dst_mtu(dst));
-       newtp->advmss = dst_metric_advmss(dst);
-       if (tcp_sk(sk)->rx_opt.user_mss &&
-           tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
-               newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
+       newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
 
        tcp_initialize_rcv_mss(newsk);
 
@@ -1627,7 +1629,6 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
        .getsockopt        = ipv6_getsockopt,
        .addr2sockaddr     = inet6_csk_addr2sockaddr,
        .sockaddr_len      = sizeof(struct sockaddr_in6),
-       .bind_conflict     = inet6_csk_bind_conflict,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_ipv6_setsockopt,
        .compat_getsockopt = compat_ipv6_getsockopt,
@@ -1658,7 +1659,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
        .getsockopt        = ipv6_getsockopt,
        .addr2sockaddr     = inet6_csk_addr2sockaddr,
        .sockaddr_len      = sizeof(struct sockaddr_in6),
-       .bind_conflict     = inet6_csk_bind_conflict,
 #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_ipv6_setsockopt,
        .compat_getsockopt = compat_ipv6_getsockopt,
@@ -1751,7 +1751,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
        srcp  = ntohs(inet->inet_sport);
 
        if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
-           icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+           icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
            icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
                timer_active    = 1;
                timer_expires   = icsk->icsk_timeout;
@@ -1895,6 +1895,7 @@ struct proto tcpv6_prot = {
        .shutdown               = tcp_shutdown,
        .setsockopt             = tcp_setsockopt,
        .getsockopt             = tcp_getsockopt,
+       .keepalive              = tcp_set_keepalive,
        .recvmsg                = tcp_recvmsg,
        .sendmsg                = tcp_sendmsg,
        .sendpage               = tcp_sendpage,
@@ -1955,7 +1956,7 @@ static void __net_exit tcpv6_net_exit(struct net *net)
 
 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
 {
-       inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
+       inet_twsk_purge(&tcp_hashinfo, AF_INET6);
 }
 
 static struct pernet_operations tcpv6_net_ops = {
index 221825a9407afebba47106f60729f91e2992158c..4e4c401e3bc69020deaa4af1c10633288faedf13 100644 (file)
 #include <trace/events/skb.h>
 #include "udp_impl.h"
 
+static bool udp6_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
+{
+#if defined(CONFIG_NET_L3_MASTER_DEV)
+       if (!net->ipv4.sysctl_udp_l3mdev_accept &&
+           skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
+               return true;
+#endif
+       return false;
+}
+
 static u32 udp6_ehashfn(const struct net *net,
                        const struct in6_addr *laddr,
                        const u16 lport,
@@ -103,7 +113,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
 
        /* precompute partial secondary hash */
        udp_sk(sk)->udp_portaddr_hash = hash2_partial;
-       return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
+       return udp_lib_get_port(sk, snum, hash2_nulladdr);
 }
 
 static void udp_v6_rehash(struct sock *sk)
@@ -118,7 +128,7 @@ static void udp_v6_rehash(struct sock *sk)
 static int compute_score(struct sock *sk, struct net *net,
                         const struct in6_addr *saddr, __be16 sport,
                         const struct in6_addr *daddr, unsigned short hnum,
-                        int dif)
+                        int dif, bool exact_dif)
 {
        int score;
        struct inet_sock *inet;
@@ -149,7 +159,7 @@ static int compute_score(struct sock *sk, struct net *net,
                score++;
        }
 
-       if (sk->sk_bound_dev_if) {
+       if (sk->sk_bound_dev_if || exact_dif) {
                if (sk->sk_bound_dev_if != dif)
                        return -1;
                score++;
@@ -165,7 +175,7 @@ static int compute_score(struct sock *sk, struct net *net,
 static struct sock *udp6_lib_lookup2(struct net *net,
                const struct in6_addr *saddr, __be16 sport,
                const struct in6_addr *daddr, unsigned int hnum, int dif,
-               struct udp_hslot *hslot2,
+               bool exact_dif, struct udp_hslot *hslot2,
                struct sk_buff *skb)
 {
        struct sock *sk, *result;
@@ -176,7 +186,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
        badness = -1;
        udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
                score = compute_score(sk, net, saddr, sport,
-                                     daddr, hnum, dif);
+                                     daddr, hnum, dif, exact_dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
@@ -212,6 +222,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
        unsigned short hnum = ntohs(dport);
        unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
        struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
+       bool exact_dif = udp6_lib_exact_dif_match(net, skb);
        int score, badness, matches = 0, reuseport = 0;
        u32 hash = 0;
 
@@ -223,7 +234,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
                        goto begin;
 
                result = udp6_lib_lookup2(net, saddr, sport,
-                                         daddr, hnum, dif,
+                                         daddr, hnum, dif, exact_dif,
                                          hslot2, skb);
                if (!result) {
                        unsigned int old_slot2 = slot2;
@@ -239,7 +250,8 @@ struct sock *__udp6_lib_lookup(struct net *net,
 
                        result = udp6_lib_lookup2(net, saddr, sport,
                                                  daddr, hnum, dif,
-                                                 hslot2, skb);
+                                                 exact_dif, hslot2,
+                                                 skb);
                }
                return result;
        }
@@ -247,7 +259,8 @@ begin:
        result = NULL;
        badness = -1;
        sk_for_each_rcu(sk, &hslot->head) {
-               score = compute_score(sk, net, saddr, sport, daddr, hnum, dif);
+               score = compute_score(sk, net, saddr, sport, daddr, hnum, dif,
+                                     exact_dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
@@ -1299,7 +1312,8 @@ out:
        return err;
 
 do_confirm:
-       dst_confirm(dst);
+       if (msg->msg_flags & MSG_PROBE)
+               dst_confirm_neigh(dst, &fl6.daddr);
        if (!(msg->msg_flags&MSG_PROBE) || len)
                goto back_from_confirm;
        err = 0;
index b5789562aded9274f706225df3f69e45efac5008..08a807b29298f5a2c14c30eaedcba87f3057431d 100644 (file)
@@ -33,6 +33,8 @@ EXPORT_SYMBOL(xfrm6_rcv_spi);
 
 int xfrm6_transport_finish(struct sk_buff *skb, int async)
 {
+       struct xfrm_offload *xo = xfrm_offload(skb);
+
        skb_network_header(skb)[IP6CB(skb)->nhoff] =
                XFRM_MODE_SKB_CB(skb)->protocol;
 
@@ -44,6 +46,11 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
        ipv6_hdr(skb)->payload_len = htons(skb->len);
        __skb_push(skb, skb->data - skb_network_header(skb));
 
+       if (xo && (xo->flags & XFRM_GRO)) {
+               skb_mac_header_rebuild(skb);
+               return -1;
+       }
+
        NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
                dev_net(skb->dev), NULL, skb, skb->dev, NULL,
                ip6_rcv_finish);
@@ -69,18 +76,9 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
        struct xfrm_state *x = NULL;
        int i = 0;
 
-       /* Allocate new secpath or COW existing one. */
-       if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
-               struct sec_path *sp;
-
-               sp = secpath_dup(skb->sp);
-               if (!sp) {
-                       XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
-                       goto drop;
-               }
-               if (skb->sp)
-                       secpath_put(skb->sp);
-               skb->sp = sp;
+       if (secpath_set(skb)) {
+               XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
+               goto drop;
        }
 
        if (1 + skb->sp->len == XFRM_MAX_DEPTH) {
index 4e344105b3fddeacfc79ca7492b62f223de2c02e..4439ee44c8b05461b8a66190c7800379ca5f105c 100644 (file)
@@ -47,6 +47,7 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
 {
        int ihl = skb->data - skb_transport_header(skb);
+       struct xfrm_offload *xo = xfrm_offload(skb);
 
        if (skb->transport_header != skb->network_header) {
                memmove(skb_transport_header(skb),
@@ -55,7 +56,8 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
        }
        ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
                                           sizeof(struct ipv6hdr));
-       skb_reset_transport_header(skb);
+       if (!xo || !(xo->flags & XFRM_GRO))
+               skb_reset_transport_header(skb);
        return 0;
 }
 
index e0f71c01d7289d7ba6213af131d647c8bae322e6..79651bc71bf0d48d54e47d75cef0cd3f686e16ab 100644 (file)
@@ -25,8 +25,6 @@
 #include <net/mip6.h>
 #endif
 
-static struct xfrm_policy_afinfo xfrm6_policy_afinfo;
-
 static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
                                          const xfrm_address_t *saddr,
                                          const xfrm_address_t *daddr)
@@ -220,7 +218,7 @@ static inline int xfrm6_garbage_collect(struct dst_ops *ops)
 {
        struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
 
-       xfrm6_policy_afinfo.garbage_collect(net);
+       xfrm_garbage_collect_deferred(net);
        return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
 }
 
@@ -291,8 +289,7 @@ static struct dst_ops xfrm6_dst_ops_template = {
        .gc_thresh =            INT_MAX,
 };
 
-static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
-       .family =               AF_INET6,
+static const struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
        .dst_ops =              &xfrm6_dst_ops_template,
        .dst_lookup =           xfrm6_dst_lookup,
        .get_saddr =            xfrm6_get_saddr,
@@ -305,7 +302,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
 
 static int __init xfrm6_policy_init(void)
 {
-       return xfrm_policy_register_afinfo(&xfrm6_policy_afinfo);
+       return xfrm_policy_register_afinfo(&xfrm6_policy_afinfo, AF_INET6);
 }
 
 static void xfrm6_policy_fini(void)
index 54d13f8dbbae10670756eee0b16b898423d06060..b2dc8ce493784c7f824d7a0db4b16947ad7512c5 100644 (file)
@@ -162,9 +162,8 @@ static const struct inet6_protocol ipcomp6_protocol = {
        .flags          =       INET6_PROTO_NOPOLICY,
 };
 
-static struct xfrm_input_afinfo xfrm6_input_afinfo = {
+static const struct xfrm_input_afinfo xfrm6_input_afinfo = {
        .family         =       AF_INET6,
-       .owner          =       THIS_MODULE,
        .callback       =       xfrm6_rcv_cb,
 };
 
index e2c6ae0245652521bbe96bd8227443b5f0267589..8bf18a5f66e0c465ef3640ae4168c875c4c9e1ed 100644 (file)
@@ -106,8 +106,8 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
-static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
-                                                     struct rtnl_link_stats64 *stats)
+static void l2tp_eth_get_stats64(struct net_device *dev,
+                                struct rtnl_link_stats64 *stats)
 {
        struct l2tp_eth *priv = netdev_priv(dev);
 
@@ -117,10 +117,8 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
        stats->rx_bytes   = atomic_long_read(&priv->rx_bytes);
        stats->rx_packets = atomic_long_read(&priv->rx_packets);
        stats->rx_errors  = atomic_long_read(&priv->rx_errors);
-       return stats;
 }
 
-
 static const struct net_device_ops l2tp_eth_netdev_ops = {
        .ndo_init               = l2tp_eth_dev_init,
        .ndo_uninit             = l2tp_eth_dev_uninit,
index 28c21546d5b60dcd07bbf6347389e97c918bf40f..c59712057dc838170f47f917cc3676f9f06f958b 100644 (file)
@@ -54,19 +54,26 @@ static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
        struct sock *sk;
 
        sk_for_each_bound(sk, &l2tp_ip_bind_table) {
-               struct inet_sock *inet = inet_sk(sk);
-               struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
+               const struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
+               const struct inet_sock *inet = inet_sk(sk);
 
-               if (l2tp == NULL)
+               if (!net_eq(sock_net(sk), net))
                        continue;
 
-               if ((l2tp->conn_id == tunnel_id) &&
-                   net_eq(sock_net(sk), net) &&
-                   !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
-                   (!inet->inet_daddr || !raddr || inet->inet_daddr == raddr) &&
-                   (!sk->sk_bound_dev_if || !dif ||
-                    sk->sk_bound_dev_if == dif))
-                       goto found;
+               if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif)
+                       continue;
+
+               if (inet->inet_rcv_saddr && laddr &&
+                   inet->inet_rcv_saddr != laddr)
+                       continue;
+
+               if (inet->inet_daddr && raddr && inet->inet_daddr != raddr)
+                       continue;
+
+               if (l2tp->conn_id != tunnel_id)
+                       continue;
+
+               goto found;
        }
 
        sk = NULL;
@@ -259,7 +266,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        if (!sock_flag(sk, SOCK_ZAPPED))
                goto out;
 
-       if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
+       if (sk->sk_state != TCP_CLOSE)
                goto out;
 
        chk_addr_ret = inet_addr_type(net, addr->l2tp_addr.s_addr);
index f47c45250f86c9189e0a6bbfd92b21cbe2069406..a4abcbc4c09ae65424a701a1200b7535fa3635ac 100644 (file)
@@ -57,8 +57,8 @@ static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
        return (struct l2tp_ip6_sock *)sk;
 }
 
-static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
-                                          struct in6_addr *laddr,
+static struct sock *__l2tp_ip6_bind_lookup(const struct net *net,
+                                          const struct in6_addr *laddr,
                                           const struct in6_addr *raddr,
                                           int dif, u32 tunnel_id)
 {
@@ -67,18 +67,26 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
        sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
                const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
                const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
-               struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
+               const struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
 
-               if (l2tp == NULL)
+               if (!net_eq(sock_net(sk), net))
                        continue;
 
-               if ((l2tp->conn_id == tunnel_id) &&
-                   net_eq(sock_net(sk), net) &&
-                   (!sk_laddr || ipv6_addr_any(sk_laddr) || ipv6_addr_equal(sk_laddr, laddr)) &&
-                   (!raddr || ipv6_addr_any(sk_raddr) || ipv6_addr_equal(sk_raddr, raddr)) &&
-                   (!sk->sk_bound_dev_if || !dif ||
-                    sk->sk_bound_dev_if == dif))
-                       goto found;
+               if (sk->sk_bound_dev_if && dif && sk->sk_bound_dev_if != dif)
+                       continue;
+
+               if (sk_laddr && !ipv6_addr_any(sk_laddr) &&
+                   !ipv6_addr_any(laddr) && !ipv6_addr_equal(sk_laddr, laddr))
+                       continue;
+
+               if (!ipv6_addr_any(sk_raddr) && raddr &&
+                   !ipv6_addr_any(raddr) && !ipv6_addr_equal(sk_raddr, raddr))
+                       continue;
+
+               if (l2tp->conn_id != tunnel_id)
+                       continue;
+
+               goto found;
        }
 
        sk = NULL;
@@ -650,7 +658,8 @@ out:
        return err < 0 ? err : len;
 
 do_confirm:
-       dst_confirm(dst);
+       if (msg->msg_flags & MSG_PROBE)
+               dst_confirm_neigh(dst, &fl6.daddr);
        if (!(msg->msg_flags & MSG_PROBE) || len)
                goto back_from_confirm;
        err = 0;
index 3891cbd2adeab7bdcb062be3dfb36926cf91d23c..76e30f4797fbd5e4b4ff1f379decf0c3d891cf88 100644 (file)
@@ -6,6 +6,7 @@ config MAC80211
        select CRYPTO_AES
        select CRYPTO_CCM
        select CRYPTO_GCM
+       select CRYPTO_CMAC
        select CRC32
        ---help---
          This option enables the hardware independent IEEE 802.11
index d0bd5fff5f0a6241cd057183ed701de938c60757..2fb65588490c3ddbd7fe69b56ce0e7fb0a8f0312 100644 (file)
 #define CMAC_TLEN_256 16 /* CMAC TLen = 128 bits (16 octets) */
 #define AAD_LEN 20
 
+static const u8 zero[CMAC_TLEN_256];
 
-void gf_mulx(u8 *pad)
-{
-       int i, carry;
-
-       carry = pad[0] & 0x80;
-       for (i = 0; i < AES_BLOCK_SIZE - 1; i++)
-               pad[i] = (pad[i] << 1) | (pad[i + 1] >> 7);
-       pad[AES_BLOCK_SIZE - 1] <<= 1;
-       if (carry)
-               pad[AES_BLOCK_SIZE - 1] ^= 0x87;
-}
-
-void aes_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
-                    const u8 *addr[], const size_t *len, u8 *mac,
-                    size_t mac_len)
-{
-       u8 cbc[AES_BLOCK_SIZE], pad[AES_BLOCK_SIZE];
-       const u8 *pos, *end;
-       size_t i, e, left, total_len;
-
-       memset(cbc, 0, AES_BLOCK_SIZE);
-
-       total_len = 0;
-       for (e = 0; e < num_elem; e++)
-               total_len += len[e];
-       left = total_len;
-
-       e = 0;
-       pos = addr[0];
-       end = pos + len[0];
-
-       while (left >= AES_BLOCK_SIZE) {
-               for (i = 0; i < AES_BLOCK_SIZE; i++) {
-                       cbc[i] ^= *pos++;
-                       if (pos >= end) {
-                               e++;
-                               pos = addr[e];
-                               end = pos + len[e];
-                       }
-               }
-               if (left > AES_BLOCK_SIZE)
-                       crypto_cipher_encrypt_one(tfm, cbc, cbc);
-               left -= AES_BLOCK_SIZE;
-       }
-
-       memset(pad, 0, AES_BLOCK_SIZE);
-       crypto_cipher_encrypt_one(tfm, pad, pad);
-       gf_mulx(pad);
-
-       if (left || total_len == 0) {
-               for (i = 0; i < left; i++) {
-                       cbc[i] ^= *pos++;
-                       if (pos >= end) {
-                               e++;
-                               pos = addr[e];
-                               end = pos + len[e];
-                       }
-               }
-               cbc[left] ^= 0x80;
-               gf_mulx(pad);
-       }
-
-       for (i = 0; i < AES_BLOCK_SIZE; i++)
-               pad[i] ^= cbc[i];
-       crypto_cipher_encrypt_one(tfm, pad, pad);
-       memcpy(mac, pad, mac_len);
-}
-
-
-void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
+void ieee80211_aes_cmac(struct crypto_shash *tfm, const u8 *aad,
                        const u8 *data, size_t data_len, u8 *mic)
 {
-       const u8 *addr[3];
-       size_t len[3];
-       u8 zero[CMAC_TLEN];
+       SHASH_DESC_ON_STACK(desc, tfm);
+       u8 out[AES_BLOCK_SIZE];
 
-       memset(zero, 0, CMAC_TLEN);
-       addr[0] = aad;
-       len[0] = AAD_LEN;
-       addr[1] = data;
-       len[1] = data_len - CMAC_TLEN;
-       addr[2] = zero;
-       len[2] = CMAC_TLEN;
+       desc->tfm = tfm;
 
-       aes_cmac_vector(tfm, 3, addr, len, mic, CMAC_TLEN);
+       crypto_shash_init(desc);
+       crypto_shash_update(desc, aad, AAD_LEN);
+       crypto_shash_update(desc, data, data_len - CMAC_TLEN);
+       crypto_shash_finup(desc, zero, CMAC_TLEN, out);
+
+       memcpy(mic, out, CMAC_TLEN);
 }
 
-void ieee80211_aes_cmac_256(struct crypto_cipher *tfm, const u8 *aad,
+void ieee80211_aes_cmac_256(struct crypto_shash *tfm, const u8 *aad,
                            const u8 *data, size_t data_len, u8 *mic)
 {
-       const u8 *addr[3];
-       size_t len[3];
-       u8 zero[CMAC_TLEN_256];
+       SHASH_DESC_ON_STACK(desc, tfm);
 
-       memset(zero, 0, CMAC_TLEN_256);
-       addr[0] = aad;
-       len[0] = AAD_LEN;
-       addr[1] = data;
-       len[1] = data_len - CMAC_TLEN_256;
-       addr[2] = zero;
-       len[2] = CMAC_TLEN_256;
+       desc->tfm = tfm;
 
-       aes_cmac_vector(tfm, 3, addr, len, mic, CMAC_TLEN_256);
+       crypto_shash_init(desc);
+       crypto_shash_update(desc, aad, AAD_LEN);
+       crypto_shash_update(desc, data, data_len - CMAC_TLEN_256);
+       crypto_shash_finup(desc, zero, CMAC_TLEN_256, mic);
 }
 
-struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[],
-                                                  size_t key_len)
+struct crypto_shash *ieee80211_aes_cmac_key_setup(const u8 key[],
+                                                 size_t key_len)
 {
-       struct crypto_cipher *tfm;
+       struct crypto_shash *tfm;
 
-       tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
+       tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
        if (!IS_ERR(tfm))
-               crypto_cipher_setkey(tfm, key, key_len);
+               crypto_shash_setkey(tfm, key, key_len);
 
        return tfm;
 }
 
-
-void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm)
+void ieee80211_aes_cmac_key_free(struct crypto_shash *tfm)
 {
-       crypto_free_cipher(tfm);
+       crypto_free_shash(tfm);
 }
index c827e1d5de8b11a2b9cb8d0d17a7385e83bdd8bc..fef531f420030b994fccde4bca81ce0d150b1708 100644 (file)
 #define AES_CMAC_H
 
 #include <linux/crypto.h>
+#include <crypto/hash.h>
 
-void gf_mulx(u8 *pad);
-void aes_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
-                    const u8 *addr[], const size_t *len, u8 *mac,
-                    size_t mac_len);
-struct crypto_cipher *ieee80211_aes_cmac_key_setup(const u8 key[],
-                                                  size_t key_len);
-void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
+struct crypto_shash *ieee80211_aes_cmac_key_setup(const u8 key[],
+                                                 size_t key_len);
+void ieee80211_aes_cmac(struct crypto_shash *tfm, const u8 *aad,
                        const u8 *data, size_t data_len, u8 *mic);
-void ieee80211_aes_cmac_256(struct crypto_cipher *tfm, const u8 *aad,
+void ieee80211_aes_cmac_256(struct crypto_shash *tfm, const u8 *aad,
                            const u8 *data, size_t data_len, u8 *mic);
-void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm);
+void ieee80211_aes_cmac_key_free(struct crypto_shash *tfm);
 
 #endif /* AES_CMAC_H */
index e91e503bf99257d7e8f3945c1500eebaa91f5e81..ac879bb17870d4602fd151504f68d26c21eeac9b 100644 (file)
@@ -208,8 +208,8 @@ static int ieee80211_nan_change_conf(struct wiphy *wiphy,
        if (changes & CFG80211_NAN_CONF_CHANGED_PREF)
                new_conf.master_pref = conf->master_pref;
 
-       if (changes & CFG80211_NAN_CONF_CHANGED_DUAL)
-               new_conf.dual = conf->dual;
+       if (changes & CFG80211_NAN_CONF_CHANGED_BANDS)
+               new_conf.bands = conf->bands;
 
        ret = drv_nan_change_conf(sdata->local, sdata, &new_conf, changes);
        if (!ret)
@@ -3563,6 +3563,17 @@ void ieee80211_nan_func_match(struct ieee80211_vif *vif,
 }
 EXPORT_SYMBOL(ieee80211_nan_func_match);
 
+static int ieee80211_set_multicast_to_unicast(struct wiphy *wiphy,
+                                             struct net_device *dev,
+                                             const bool enabled)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+       sdata->u.ap.multicast_to_unicast = enabled;
+
+       return 0;
+}
+
 const struct cfg80211_ops mac80211_config_ops = {
        .add_virtual_intf = ieee80211_add_iface,
        .del_virtual_intf = ieee80211_del_iface,
@@ -3653,4 +3664,5 @@ const struct cfg80211_ops mac80211_config_ops = {
        .nan_change_conf = ieee80211_nan_change_conf,
        .add_nan_func = ieee80211_add_nan_func,
        .del_nan_func = ieee80211_del_nan_func,
+       .set_multicast_to_unicast = ieee80211_set_multicast_to_unicast,
 };
index a0d901d8992ea892bbb44890e08d6f782c68b4e3..89178b46b32fab38f9c7dfc4359237ecd3fd9a8c 100644 (file)
@@ -1267,7 +1267,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
        struct ieee80211_sub_if_data *sdata, *sdata_tmp;
        struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx;
        struct ieee80211_chanctx *new_ctx = NULL;
-       int i, err, n_assigned, n_reserved, n_ready;
+       int err, n_assigned, n_reserved, n_ready;
        int n_ctx = 0, n_vifs_switch = 0, n_vifs_assign = 0, n_vifs_ctxless = 0;
 
        lockdep_assert_held(&local->mtx);
@@ -1388,8 +1388,6 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
         * Update all structures, values and pointers to point to new channel
         * context(s).
         */
-
-       i = 0;
        list_for_each_entry(ctx, &local->chanctx_list, list) {
                if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
                        continue;
index e02ba42ca827501fdefc155d0ad71f2d4ffc2f36..5fae001f286ceb255f48236d297e3b05c6e8eac4 100644 (file)
@@ -243,6 +243,38 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
        return rv;
 }
 
+static ssize_t misc_read(struct file *file, char __user *user_buf,
+                        size_t count, loff_t *ppos)
+{
+       struct ieee80211_local *local = file->private_data;
+       /* Max len of each line is 16 characters, plus 9 for 'pending:\n' */
+       size_t bufsz = IEEE80211_MAX_QUEUES * 16 + 9;
+       char *buf;
+       char *pos, *end;
+       ssize_t rv;
+       int i;
+       int ln;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       pos = buf;
+       end = buf + bufsz - 1;
+
+       pos += scnprintf(pos, end - pos, "pending:\n");
+
+       for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
+               ln = skb_queue_len(&local->pending[i]);
+               pos += scnprintf(pos, end - pos, "[%i] %d\n",
+                                i, ln);
+       }
+
+       rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+       kfree(buf);
+       return rv;
+}
+
 static ssize_t queues_read(struct file *file, char __user *user_buf,
                           size_t count, loff_t *ppos)
 {
@@ -263,6 +295,7 @@ static ssize_t queues_read(struct file *file, char __user *user_buf,
 
 DEBUGFS_READONLY_FILE_OPS(hwflags);
 DEBUGFS_READONLY_FILE_OPS(queues);
+DEBUGFS_READONLY_FILE_OPS(misc);
 
 /* statistics stuff */
 
@@ -330,7 +363,9 @@ void debugfs_hw_add(struct ieee80211_local *local)
 
        DEBUGFS_ADD(total_ps_buffered);
        DEBUGFS_ADD(wep_iv);
+       DEBUGFS_ADD(rate_ctrl_alg);
        DEBUGFS_ADD(queues);
+       DEBUGFS_ADD(misc);
 #ifdef CONFIG_PM
        DEBUGFS_ADD_MODE(reset, 0200);
 #endif
index 1a05f85cb1f0610b41ea212df2624a9078ea16cd..8f5fff8b20409e6ea5ed011fb1770b71f225fa1d 100644 (file)
@@ -519,6 +519,8 @@ static ssize_t ieee80211_if_fmt_aqm(
 }
 IEEE80211_IF_FILE_R(aqm);
 
+IEEE80211_IF_FILE(multicast_to_unicast, u.ap.multicast_to_unicast, HEX);
+
 /* IBSS attributes */
 static ssize_t ieee80211_if_fmt_tsf(
        const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
@@ -683,6 +685,7 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
        DEBUGFS_ADD(dtim_count);
        DEBUGFS_ADD(num_buffered_multicast);
        DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
+       DEBUGFS_ADD_MODE(multicast_to_unicast, 0600);
 }
 
 static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
index f6003b8c2c3335925d605d46d0072a8a96fed930..42601820db20e6c338f919dbc3c09481669d4f6e 100644 (file)
@@ -522,6 +522,7 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
                return;
 
        DEBUGFS_ADD(flags);
+       DEBUGFS_ADD(aid);
        DEBUGFS_ADD(num_ps_buf_frames);
        DEBUGFS_ADD(last_seq_ctrl);
        DEBUGFS_ADD(agg_status);
index 5c3af5eb405232167bbd62a13b4d1f37370d6bc0..3cfb1e2ab7ac7c183efe250f1a7096627b88e969 100644 (file)
@@ -9,66 +9,58 @@
 
 #include <crypto/aes.h>
 #include <crypto/algapi.h>
+#include <crypto/hash.h>
 #include <crypto/skcipher.h>
 
 #include "ieee80211_i.h"
 #include "aes_cmac.h"
 #include "fils_aead.h"
 
-static int aes_s2v(struct crypto_cipher *tfm,
+static void gf_mulx(u8 *pad)
+{
+       u64 a = get_unaligned_be64(pad);
+       u64 b = get_unaligned_be64(pad + 8);
+
+       put_unaligned_be64((a << 1) | (b >> 63), pad);
+       put_unaligned_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0), pad + 8);
+}
+
+static int aes_s2v(struct crypto_shash *tfm,
                   size_t num_elem, const u8 *addr[], size_t len[], u8 *v)
 {
-       u8 d[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE];
+       u8 d[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE] = {};
+       SHASH_DESC_ON_STACK(desc, tfm);
        size_t i;
-       const u8 *data[2];
-       size_t data_len[2], data_elems;
+
+       desc->tfm = tfm;
 
        /* D = AES-CMAC(K, <zero>) */
-       memset(tmp, 0, AES_BLOCK_SIZE);
-       data[0] = tmp;
-       data_len[0] = AES_BLOCK_SIZE;
-       aes_cmac_vector(tfm, 1, data, data_len, d, AES_BLOCK_SIZE);
+       crypto_shash_digest(desc, tmp, AES_BLOCK_SIZE, d);
 
        for (i = 0; i < num_elem - 1; i++) {
                /* D = dbl(D) xor AES_CMAC(K, Si) */
                gf_mulx(d); /* dbl */
-               aes_cmac_vector(tfm, 1, &addr[i], &len[i], tmp,
-                               AES_BLOCK_SIZE);
+               crypto_shash_digest(desc, addr[i], len[i], tmp);
                crypto_xor(d, tmp, AES_BLOCK_SIZE);
        }
 
+       crypto_shash_init(desc);
+
        if (len[i] >= AES_BLOCK_SIZE) {
                /* len(Sn) >= 128 */
-               size_t j;
-               const u8 *pos;
-
                /* T = Sn xorend D */
-
-               /* Use a temporary buffer to perform xorend on Sn (addr[i]) to
-                * avoid modifying the const input argument.
-                */
-               data[0] = addr[i];
-               data_len[0] = len[i] - AES_BLOCK_SIZE;
-               pos = addr[i] + data_len[0];
-               for (j = 0; j < AES_BLOCK_SIZE; j++)
-                       tmp[j] = pos[j] ^ d[j];
-               data[1] = tmp;
-               data_len[1] = AES_BLOCK_SIZE;
-               data_elems = 2;
+               crypto_shash_update(desc, addr[i], len[i] - AES_BLOCK_SIZE);
+               crypto_xor(d, addr[i] + len[i] - AES_BLOCK_SIZE,
+                          AES_BLOCK_SIZE);
        } else {
                /* len(Sn) < 128 */
                /* T = dbl(D) xor pad(Sn) */
                gf_mulx(d); /* dbl */
-               memset(tmp, 0, AES_BLOCK_SIZE);
-               memcpy(tmp, addr[i], len[i]);
-               tmp[len[i]] = 0x80;
-               crypto_xor(d, tmp, AES_BLOCK_SIZE);
-               data[0] = d;
-               data_len[0] = sizeof(d);
-               data_elems = 1;
+               crypto_xor(d, addr[i], len[i]);
+               d[len[i]] ^= 0x80;
        }
        /* V = AES-CMAC(K, T) */
-       aes_cmac_vector(tfm, data_elems, data, data_len, v, AES_BLOCK_SIZE);
+       crypto_shash_finup(desc, d, AES_BLOCK_SIZE, v);
 
        return 0;
 }
@@ -80,7 +72,7 @@ static int aes_siv_encrypt(const u8 *key, size_t key_len,
                           size_t len[], u8 *out)
 {
        u8 v[AES_BLOCK_SIZE];
-       struct crypto_cipher *tfm;
+       struct crypto_shash *tfm;
        struct crypto_skcipher *tfm2;
        struct skcipher_request *req;
        int res;
@@ -95,14 +87,14 @@ static int aes_siv_encrypt(const u8 *key, size_t key_len,
 
        /* S2V */
 
-       tfm = crypto_alloc_cipher("aes", 0, 0);
+       tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
        /* K1 for S2V */
-       res = crypto_cipher_setkey(tfm, key, key_len);
+       res = crypto_shash_setkey(tfm, key, key_len);
        if (!res)
                res = aes_s2v(tfm, num_elem, addr, len, v);
-       crypto_free_cipher(tfm);
+       crypto_free_shash(tfm);
        if (res)
                return res;
 
@@ -157,7 +149,7 @@ static int aes_siv_decrypt(const u8 *key, size_t key_len,
                           size_t num_elem, const u8 *addr[], size_t len[],
                           u8 *out)
 {
-       struct crypto_cipher *tfm;
+       struct crypto_shash *tfm;
        struct crypto_skcipher *tfm2;
        struct skcipher_request *req;
        struct scatterlist src[1], dst[1];
@@ -210,14 +202,14 @@ static int aes_siv_decrypt(const u8 *key, size_t key_len,
 
        /* S2V */
 
-       tfm = crypto_alloc_cipher("aes", 0, 0);
+       tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
        /* K1 for S2V */
-       res = crypto_cipher_setkey(tfm, key, key_len);
+       res = crypto_shash_setkey(tfm, key, key_len);
        if (!res)
                res = aes_s2v(tfm, num_elem, addr, len, check);
-       crypto_free_cipher(tfm);
+       crypto_free_shash(tfm);
        if (res)
                return res;
        if (memcmp(check, frame_iv, AES_BLOCK_SIZE) != 0)
index a31d30713d0897e7b41f71b444443abdd8c94645..98999d3d5262743cf32ef92480772f034e70baf1 100644 (file)
@@ -487,14 +487,14 @@ int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
        struct beacon_data *presp, *old_presp;
        struct cfg80211_bss *cbss;
        const struct cfg80211_bss_ies *ies;
-       u16 capability = 0;
+       u16 capability = WLAN_CAPABILITY_IBSS;
        u64 tsf;
        int ret = 0;
 
        sdata_assert_lock(sdata);
 
        if (ifibss->privacy)
-               capability = WLAN_CAPABILITY_PRIVACY;
+               capability |= WLAN_CAPABILITY_PRIVACY;
 
        cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan,
                                ifibss->bssid, ifibss->ssid,
index b2069fbd60f9e13a7888a3daed3fba789cd3593b..159a1a733725069417631f9c6386dc8e49450947 100644 (file)
@@ -297,6 +297,7 @@ struct ieee80211_if_ap {
                         driver_smps_mode; /* smps mode request */
 
        struct work_struct request_smps_work;
+       bool multicast_to_unicast;
 };
 
 struct ieee80211_if_wds {
@@ -624,8 +625,8 @@ struct ieee80211_mesh_sync_ops {
                             struct ieee80211_rx_status *rx_status);
 
        /* should be called with beacon_data under RCU read lock */
-       void (*adjust_tbtt)(struct ieee80211_sub_if_data *sdata,
-                           struct beacon_data *beacon);
+       void (*adjust_tsf)(struct ieee80211_sub_if_data *sdata,
+                          struct beacon_data *beacon);
        /* add other framework functions here */
 };
 
@@ -688,7 +689,6 @@ struct ieee80211_if_mesh {
        const struct ieee80211_mesh_sync_ops *sync_ops;
        s64 sync_offset_clockdrift_max;
        spinlock_t sync_offset_lock;
-       bool adjusting_tbtt;
        /* mesh power save */
        enum nl80211_mesh_power_mode nonpeer_pm;
        int ps_peers_light_sleep;
index d37ae7dc114b2c2eb5b8dc4773c6a688b047e0a2..40813dd3301c600978374e259953ca5d661022ce 100644 (file)
@@ -1123,7 +1123,7 @@ static u16 ieee80211_netdev_select_queue(struct net_device *dev,
        return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
 }
 
-static struct rtnl_link_stats64 *
+static void
 ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        int i;
@@ -1148,8 +1148,6 @@ ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
                stats->rx_bytes   += rx_bytes;
                stats->tx_bytes   += tx_bytes;
        }
-
-       return stats;
 }
 
 static const struct net_device_ops ieee80211_dataif_ops = {
index 4aa20cef08595955702cd0aa5746d2dde1e56647..ebdb80b85dc3aa3b12da1f8925df3552a51674b3 100644 (file)
@@ -93,7 +93,7 @@ struct ieee80211_key {
                } ccmp;
                struct {
                        u8 rx_pn[IEEE80211_CMAC_PN_LEN];
-                       struct crypto_cipher *tfm;
+                       struct crypto_shash *tfm;
                        u32 replays; /* dot11RSNAStatsCMACReplays */
                        u32 icverrors; /* dot11RSNAStatsCMACICVErrors */
                } aes_cmac;
index 50e1b7f78bd49605d2dbca4c215befecc1d8d001..c28b0af9c1f21735915433aecd632165a4a82580 100644 (file)
@@ -279,10 +279,6 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
        /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
        *pos |= ifmsh->ps_peers_deep_sleep ?
                        IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00;
-       *pos++ |= ifmsh->adjusting_tbtt ?
-                       IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
-       *pos++ = 0x00;
-
        return 0;
 }
 
@@ -850,7 +846,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
        ifmsh->mesh_cc_id = 0;  /* Disabled */
        /* register sync ops from extensible synchronization framework */
        ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id);
-       ifmsh->adjusting_tbtt = false;
        ifmsh->sync_offset_clockdrift_max = 0;
        set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
        ieee80211_mesh_root_setup(ifmsh);
@@ -1349,7 +1344,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
                ieee80211_mesh_rootpath(sdata);
 
        if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags))
-               mesh_sync_adjust_tbtt(sdata);
+               mesh_sync_adjust_tsf(sdata);
 
        if (test_and_clear_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags))
                mesh_bss_info_changed(sdata);
index 26b9ccbe1fce7cc5125c84419cf47e60c0201d55..7e5f271e3c30dbf960b78aecb87afeaff9d347c3 100644 (file)
@@ -341,7 +341,7 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
 }
 
 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
-void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
+void mesh_sync_adjust_tsf(struct ieee80211_sub_if_data *sdata);
 void ieee80211s_stop(void);
 #else
 static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
index 7fcdcf622655286b01eb965acea7d631faf43c24..fcba70e57073f372793d69d247e78ea11f8c2597 100644 (file)
@@ -505,12 +505,14 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
 
        /* Userspace handles station allocation */
        if (sdata->u.mesh.user_mpm ||
-           sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
-               cfg80211_notify_new_peer_candidate(sdata->dev, addr,
-                                                  elems->ie_start,
-                                                  elems->total_len,
-                                                  GFP_KERNEL);
-       else
+           sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
+               if (mesh_peer_accepts_plinks(elems) &&
+                   mesh_plink_availables(sdata))
+                       cfg80211_notify_new_peer_candidate(sdata->dev, addr,
+                                                          elems->ie_start,
+                                                          elems->total_len,
+                                                          GFP_KERNEL);
+       } else
                sta = __mesh_sta_info_alloc(sdata, addr);
 
        return sta;
index faca22cd02b59cf9c79e20307fe47f36a562ad60..a435f094a82e30ef1494342463556c0b669ddd00 100644 (file)
@@ -12,7 +12,7 @@
 #include "mesh.h"
 #include "driver-ops.h"
 
-/* This is not in the standard.  It represents a tolerable tbtt drift below
+/* This is not in the standard.  It represents a tolerable tsf drift below
  * which we do no TSF adjustment.
  */
 #define TOFFSET_MINIMUM_ADJUSTMENT 10
@@ -46,7 +46,7 @@ static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie)
                        IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING) != 0;
 }
 
-void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
+void mesh_sync_adjust_tsf(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
@@ -57,12 +57,12 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
 
        spin_lock_bh(&ifmsh->sync_offset_lock);
        if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) {
-               msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting\n",
+               msync_dbg(sdata, "TSF : max clockdrift=%lld; adjusting\n",
                          (long long) ifmsh->sync_offset_clockdrift_max);
                tsfdelta = -ifmsh->sync_offset_clockdrift_max;
                ifmsh->sync_offset_clockdrift_max = 0;
        } else {
-               msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting by %llu\n",
+               msync_dbg(sdata, "TSF : max clockdrift=%lld; adjusting by %llu\n",
                          (long long) ifmsh->sync_offset_clockdrift_max,
                          (unsigned long long) beacon_int_fraction);
                tsfdelta = -beacon_int_fraction;
@@ -123,7 +123,6 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
         */
 
        if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
-               clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
                msync_dbg(sdata, "STA %pM : is adjusting TBTT\n",
                          sta->sta.addr);
                goto no_sync;
@@ -168,15 +167,13 @@ no_sync:
        rcu_read_unlock();
 }
 
-static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
+static void mesh_sync_offset_adjust_tsf(struct ieee80211_sub_if_data *sdata,
                                         struct beacon_data *beacon)
 {
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-       u8 cap;
 
        WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
        WARN_ON(!rcu_read_lock_held());
-       cap = beacon->meshconf->meshconf_cap;
 
        spin_lock_bh(&ifmsh->sync_offset_lock);
 
@@ -187,24 +184,16 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
                 * the tsf adjustment to the mesh tasklet
                 */
                msync_dbg(sdata,
-                         "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n",
+                         "TSF : kicking off TSF adjustment with clockdrift_max=%lld\n",
                          ifmsh->sync_offset_clockdrift_max);
                set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags);
-
-               ifmsh->adjusting_tbtt = true;
        } else {
                msync_dbg(sdata,
-                         "TBTT : max clockdrift=%lld; too small to adjust\n",
+                         "TSF : max clockdrift=%lld; too small to adjust\n",
                          (long long)ifmsh->sync_offset_clockdrift_max);
                ifmsh->sync_offset_clockdrift_max = 0;
-
-               ifmsh->adjusting_tbtt = false;
        }
        spin_unlock_bh(&ifmsh->sync_offset_lock);
-
-       beacon->meshconf->meshconf_cap = ifmsh->adjusting_tbtt ?
-                       IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING | cap :
-                       ~IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING & cap;
 }
 
 static const struct sync_method sync_methods[] = {
@@ -212,7 +201,7 @@ static const struct sync_method sync_methods[] = {
                .method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
                .ops = {
                        .rx_bcn_presp = &mesh_sync_offset_rx_bcn_presp,
-                       .adjust_tbtt = &mesh_sync_offset_adjust_tbtt,
+                       .adjust_tsf = &mesh_sync_offset_adjust_tsf,
                }
        },
 };
index 098ce9b179ee3753410eda69b9267c5a34b7b9b7..6e90301154d5a6ba4a7e9bb612ac8243cf09a23c 100644 (file)
@@ -1486,10 +1486,6 @@ void ieee80211_recalc_ps(struct ieee80211_local *local)
 
        if (count == 1 && ieee80211_powersave_allowed(found)) {
                u8 dtimper = found->u.mgd.dtim_period;
-               s32 beaconint_us;
-
-               beaconint_us = ieee80211_tu_to_usec(
-                                       found->vif.bss_conf.beacon_int);
 
                timeout = local->dynamic_ps_forced_timeout;
                if (timeout < 0)
@@ -3423,14 +3419,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                        ieee80211_cqm_rssi_notify(
                                &sdata->vif,
                                NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
-                               GFP_KERNEL);
+                               sig, GFP_KERNEL);
                } else if (sig > thold &&
                           (last_event == 0 || sig > last_event + hyst)) {
                        ifmgd->last_cqm_event_signal = sig;
                        ieee80211_cqm_rssi_notify(
                                &sdata->vif,
                                NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
-                               GFP_KERNEL);
+                               sig, GFP_KERNEL);
                }
        }
 
@@ -5045,13 +5041,14 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
 
 void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
                               enum nl80211_cqm_rssi_threshold_event rssi_event,
+                              s32 rssi_level,
                               gfp_t gfp)
 {
        struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 
-       trace_api_cqm_rssi_notify(sdata, rssi_event);
+       trace_api_cqm_rssi_notify(sdata, rssi_event, rssi_level);
 
-       cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp);
+       cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, rssi_level, gfp);
 }
 EXPORT_SYMBOL(ieee80211_cqm_rssi_notify);
 
index 14c5ba3a1b1c6c3b139729f8921ec01ef1b03d3d..3ebe4405a2d43d66f76bdef3fd4f10cd3fa82b8f 100644 (file)
@@ -159,21 +159,23 @@ minstrel_update_rates(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
 void
 minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs)
 {
+       unsigned int cur_prob;
+
        if (unlikely(mrs->attempts > 0)) {
                mrs->sample_skipped = 0;
-               mrs->cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
+               cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
                if (unlikely(!mrs->att_hist)) {
-                       mrs->prob_ewma = mrs->cur_prob;
+                       mrs->prob_ewma = cur_prob;
                } else {
                        /* update exponential weighted moving variance */
-                       mrs->prob_ewmsd = minstrel_ewmsd(mrs->prob_ewmsd,
-                                                        mrs->cur_prob,
-                                                        mrs->prob_ewma,
-                                                        EWMA_LEVEL);
+                       mrs->prob_ewmv = minstrel_ewmv(mrs->prob_ewmv,
+                                                       cur_prob,
+                                                       mrs->prob_ewma,
+                                                       EWMA_LEVEL);
 
                        /*update exponential weighted moving avarage */
                        mrs->prob_ewma = minstrel_ewma(mrs->prob_ewma,
-                                                      mrs->cur_prob,
+                                                      cur_prob,
                                                       EWMA_LEVEL);
                }
                mrs->att_hist += mrs->attempts;
@@ -365,6 +367,11 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
                return;
 #endif
 
+       /* Don't use EAPOL frames for sampling on non-mrr hw */
+       if (mp->hw->max_rates == 1 &&
+           (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
+               return;
+
        delta = (mi->total_packets * sampling_ratio / 100) -
                        (mi->sample_packets + mi->sample_deferred / 2);
 
index c230bbe93262b0affc476c01b8d1db9ab7dfe054..be6c3f35f48b30b2cb17a2ceb398fe37f6f593f3 100644 (file)
@@ -14,7 +14,7 @@
 #define SAMPLE_COLUMNS 10      /* number of columns in sample table */
 
 /* scaled fraction values */
-#define MINSTREL_SCALE  16
+#define MINSTREL_SCALE  12
 #define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div)
 #define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE)
 
@@ -36,21 +36,16 @@ minstrel_ewma(int old, int new, int weight)
 }
 
 /*
- * Perform EWMSD (Exponentially Weighted Moving Standard Deviation) calculation
+ * Perform EWMV (Exponentially Weighted Moving Variance) calculation
  */
 static inline int
-minstrel_ewmsd(int old_ewmsd, int cur_prob, int prob_ewma, int weight)
+minstrel_ewmv(int old_ewmv, int cur_prob, int prob_ewma, int weight)
 {
-       int diff, incr, tmp_var;
+       int diff, incr;
 
-       /* calculate exponential weighted moving variance */
-       diff = MINSTREL_TRUNC((cur_prob - prob_ewma) * 1000000);
+       diff = cur_prob - prob_ewma;
        incr = (EWMA_DIV - weight) * diff / EWMA_DIV;
-       tmp_var = old_ewmsd * old_ewmsd;
-       tmp_var = weight * (tmp_var + diff * incr / 1000000) / EWMA_DIV;
-
-       /* return standard deviation */
-       return (u16) int_sqrt(tmp_var);
+       return weight * (old_ewmv + MINSTREL_TRUNC(diff * incr)) / EWMA_DIV;
 }
 
 struct minstrel_rate_stats {
@@ -59,15 +54,13 @@ struct minstrel_rate_stats {
        u16 success, last_success;
 
        /* total attempts/success counters */
-       u64 att_hist, succ_hist;
+       u32 att_hist, succ_hist;
 
        /* statistis of packet delivery probability
-        *  cur_prob  - current prob within last update intervall
         *  prob_ewma - exponential weighted moving average of prob
         *  prob_ewmsd - exp. weighted moving standard deviation of prob */
-       unsigned int cur_prob;
-       unsigned int prob_ewma;
-       u16 prob_ewmsd;
+       u16 prob_ewma;
+       u16 prob_ewmv;
 
        /* maximum retry counts */
        u8 retry_count;
@@ -153,6 +146,14 @@ struct minstrel_debugfs_info {
        char buf[];
 };
 
+/* Get EWMSD (Exponentially Weighted Moving Standard Deviation) * 10 */
+static inline int
+minstrel_get_ewmsd10(struct minstrel_rate_stats *mrs)
+{
+       unsigned int ewmv = mrs->prob_ewmv;
+       return int_sqrt(MINSTREL_TRUNC(ewmv * 1000 * 1000));
+}
+
 extern const struct rate_control_ops mac80211_minstrel;
 void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
 void minstrel_remove_sta_debugfs(void *priv, void *priv_sta);
index 820b0abc9c0d6f7662cf7a98cc497b310d1281e3..36fc971deb860e5ee3b3e70e9de2b5fd91f7ea95 100644 (file)
@@ -75,7 +75,7 @@ minstrel_stats_open(struct inode *inode, struct file *file)
 {
        struct minstrel_sta_info *mi = inode->i_private;
        struct minstrel_debugfs_info *ms;
-       unsigned int i, tp_max, tp_avg, prob, eprob;
+       unsigned int i, tp_max, tp_avg, eprob;
        char *p;
 
        ms = kmalloc(2048, GFP_KERNEL);
@@ -86,13 +86,14 @@ minstrel_stats_open(struct inode *inode, struct file *file)
        p = ms->buf;
        p += sprintf(p, "\n");
        p += sprintf(p,
-                    "best   __________rate_________    ________statistics________    ________last_______    ______sum-of________\n");
+                    "best   __________rate_________    ________statistics________    ____last_____    ______sum-of________\n");
        p += sprintf(p,
-                    "rate  [name idx airtime max_tp]  [avg(tp) avg(prob) sd(prob)]  [prob.|retry|suc|att]  [#success | #attempts]\n");
+                    "rate  [name idx airtime max_tp]  [avg(tp) avg(prob) sd(prob)]  [retry|suc|att]  [#success | #attempts]\n");
 
        for (i = 0; i < mi->n_rates; i++) {
                struct minstrel_rate *mr = &mi->r[i];
                struct minstrel_rate_stats *mrs = &mi->r[i].stats;
+               unsigned int prob_ewmsd;
 
                *(p++) = (i == mi->max_tp_rate[0]) ? 'A' : ' ';
                *(p++) = (i == mi->max_tp_rate[1]) ? 'B' : ' ';
@@ -107,17 +108,16 @@ minstrel_stats_open(struct inode *inode, struct file *file)
 
                tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
                tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
-               prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
                eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+               prob_ewmsd = minstrel_get_ewmsd10(mrs);
 
                p += sprintf(p, "%4u.%1u    %4u.%1u     %3u.%1u    %3u.%1u"
-                               "     %3u.%1u %3u   %3u %-3u   "
+                               "     %3u   %3u %-3u   "
                                "%9llu   %-9llu\n",
                                tp_max / 10, tp_max % 10,
                                tp_avg / 10, tp_avg % 10,
                                eprob / 10, eprob % 10,
-                               mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10,
-                               prob / 10, prob % 10,
+                               prob_ewmsd / 10, prob_ewmsd % 10,
                                mrs->retry_count,
                                mrs->last_success,
                                mrs->last_attempts,
@@ -148,7 +148,7 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file)
 {
        struct minstrel_sta_info *mi = inode->i_private;
        struct minstrel_debugfs_info *ms;
-       unsigned int i, tp_max, tp_avg, prob, eprob;
+       unsigned int i, tp_max, tp_avg, eprob;
        char *p;
 
        ms = kmalloc(2048, GFP_KERNEL);
@@ -161,6 +161,7 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file)
        for (i = 0; i < mi->n_rates; i++) {
                struct minstrel_rate *mr = &mi->r[i];
                struct minstrel_rate_stats *mrs = &mi->r[i].stats;
+               unsigned int prob_ewmsd;
 
                p += sprintf(p, "%s" ,((i == mi->max_tp_rate[0]) ? "A" : ""));
                p += sprintf(p, "%s" ,((i == mi->max_tp_rate[1]) ? "B" : ""));
@@ -175,16 +176,15 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file)
 
                tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
                tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
-               prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
                eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+               prob_ewmsd = minstrel_get_ewmsd10(mrs);
 
-               p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,%u,"
+               p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,%u,"
                                "%llu,%llu,%d,%d\n",
                                tp_max / 10, tp_max % 10,
                                tp_avg / 10, tp_avg % 10,
                                eprob / 10, eprob % 10,
-                               mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10,
-                               prob / 10, prob % 10,
+                               prob_ewmsd / 10, prob_ewmsd % 10,
                                mrs->retry_count,
                                mrs->last_success,
                                mrs->last_attempts,
index 30fbabf4bcbc16aeb93e9c673d190cd3b615988b..8e783e197e935cd8bcd0b7b7d5e88b8d2c9210cc 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/ieee80211.h>
 #include <net/mac80211.h>
 #include "rate.h"
+#include "sta_info.h"
 #include "rc80211_minstrel.h"
 #include "rc80211_minstrel_ht.h"
 
@@ -154,67 +155,47 @@ MODULE_PARM_DESC(minstrel_vht_only,
 const struct mcs_group minstrel_mcs_groups[] = {
        MCS_GROUP(1, 0, BW_20),
        MCS_GROUP(2, 0, BW_20),
-#if MINSTREL_MAX_STREAMS >= 3
        MCS_GROUP(3, 0, BW_20),
-#endif
 
        MCS_GROUP(1, 1, BW_20),
        MCS_GROUP(2, 1, BW_20),
-#if MINSTREL_MAX_STREAMS >= 3
        MCS_GROUP(3, 1, BW_20),
-#endif
 
        MCS_GROUP(1, 0, BW_40),
        MCS_GROUP(2, 0, BW_40),
-#if MINSTREL_MAX_STREAMS >= 3
        MCS_GROUP(3, 0, BW_40),
-#endif
 
        MCS_GROUP(1, 1, BW_40),
        MCS_GROUP(2, 1, BW_40),
-#if MINSTREL_MAX_STREAMS >= 3
        MCS_GROUP(3, 1, BW_40),
-#endif
 
        CCK_GROUP,
 
 #ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
        VHT_GROUP(1, 0, BW_20),
        VHT_GROUP(2, 0, BW_20),
-#if MINSTREL_MAX_STREAMS >= 3
        VHT_GROUP(3, 0, BW_20),
-#endif
 
        VHT_GROUP(1, 1, BW_20),
        VHT_GROUP(2, 1, BW_20),
-#if MINSTREL_MAX_STREAMS >= 3
        VHT_GROUP(3, 1, BW_20),
-#endif
 
        VHT_GROUP(1, 0, BW_40),
        VHT_GROUP(2, 0, BW_40),
-#if MINSTREL_MAX_STREAMS >= 3
        VHT_GROUP(3, 0, BW_40),
-#endif
 
        VHT_GROUP(1, 1, BW_40),
        VHT_GROUP(2, 1, BW_40),
-#if MINSTREL_MAX_STREAMS >= 3
        VHT_GROUP(3, 1, BW_40),
-#endif
 
        VHT_GROUP(1, 0, BW_80),
        VHT_GROUP(2, 0, BW_80),
-#if MINSTREL_MAX_STREAMS >= 3
        VHT_GROUP(3, 0, BW_80),
-#endif
 
        VHT_GROUP(1, 1, BW_80),
        VHT_GROUP(2, 1, BW_80),
-#if MINSTREL_MAX_STREAMS >= 3
        VHT_GROUP(3, 1, BW_80),
 #endif
-#endif
 };
 
 static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
@@ -301,7 +282,7 @@ minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
                                break;
 
                /* short preamble */
-               if (!(mi->groups[group].supported & BIT(idx)))
+               if (!(mi->supported[group] & BIT(idx)))
                        idx += 4;
        }
        return &mi->groups[group].rates[idx];
@@ -486,7 +467,7 @@ minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
                          MCS_GROUP_RATES].streams;
        for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
                mg = &mi->groups[group];
-               if (!mg->supported || group == MINSTREL_CCK_GROUP)
+               if (!mi->supported[group] || group == MINSTREL_CCK_GROUP)
                        continue;
 
                tmp_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
@@ -540,7 +521,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
        for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
 
                mg = &mi->groups[group];
-               if (!mg->supported)
+               if (!mi->supported[group])
                        continue;
 
                mi->sample_count++;
@@ -550,7 +531,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
                        tmp_group_tp_rate[j] = group;
 
                for (i = 0; i < MCS_GROUP_RATES; i++) {
-                       if (!(mg->supported & BIT(i)))
+                       if (!(mi->supported[group] & BIT(i)))
                                continue;
 
                        index = MCS_GROUP_RATES * group + i;
@@ -636,7 +617,7 @@ minstrel_set_next_sample_idx(struct minstrel_ht_sta *mi)
                mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups);
                mg = &mi->groups[mi->sample_group];
 
-               if (!mg->supported)
+               if (!mi->supported[mi->sample_group])
                        continue;
 
                if (++mg->index >= MCS_GROUP_RATES) {
@@ -657,7 +638,7 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary)
        while (group > 0) {
                group--;
 
-               if (!mi->groups[group].supported)
+               if (!mi->supported[group])
                        continue;
 
                if (minstrel_mcs_groups[group].streams >
@@ -994,7 +975,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
        sample_idx = sample_table[mg->column][mg->index];
        minstrel_set_next_sample_idx(mi);
 
-       if (!(mg->supported & BIT(sample_idx)))
+       if (!(mi->supported[sample_group] & BIT(sample_idx)))
                return -1;
 
        mrs = &mg->rates[sample_idx];
@@ -1048,22 +1029,6 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
        return sample_idx;
 }
 
-static void
-minstrel_ht_check_cck_shortpreamble(struct minstrel_priv *mp,
-                                   struct minstrel_ht_sta *mi, bool val)
-{
-       u8 supported = mi->groups[MINSTREL_CCK_GROUP].supported;
-
-       if (!supported || !mi->cck_supported_short)
-               return;
-
-       if (supported & (mi->cck_supported_short << (val * 4)))
-               return;
-
-       supported ^= mi->cck_supported_short | (mi->cck_supported_short << 4);
-       mi->groups[MINSTREL_CCK_GROUP].supported = supported;
-}
-
 static void
 minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                      struct ieee80211_tx_rate_control *txrc)
@@ -1087,7 +1052,6 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
                minstrel_aggr_check(sta, txrc->skb);
 
        info->flags |= mi->tx_flags;
-       minstrel_ht_check_cck_shortpreamble(mp, mi, txrc->short_preamble);
 
 #ifdef CONFIG_MAC80211_DEBUGFS
        if (mp->fixed_rate_idx != -1)
@@ -1154,7 +1118,7 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
                        mi->cck_supported_short |= BIT(i);
        }
 
-       mi->groups[MINSTREL_CCK_GROUP].supported = mi->cck_supported;
+       mi->supported[MINSTREL_CCK_GROUP] = mi->cck_supported;
 }
 
 static void
@@ -1168,6 +1132,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
        struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
        u16 sta_cap = sta->ht_cap.cap;
        struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+       struct sta_info *sinfo = container_of(sta, struct sta_info, sta);
        int use_vht;
        int n_supported = 0;
        int ack_dur;
@@ -1224,7 +1189,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
                u32 gflags = minstrel_mcs_groups[i].flags;
                int bw, nss;
 
-               mi->groups[i].supported = 0;
+               mi->supported[i] = 0;
                if (i == MINSTREL_CCK_GROUP) {
                        minstrel_ht_update_cck(mp, mi, sband, sta);
                        continue;
@@ -1256,8 +1221,8 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
                        if (use_vht && minstrel_vht_only)
                                continue;
 #endif
-                       mi->groups[i].supported = mcs->rx_mask[nss - 1];
-                       if (mi->groups[i].supported)
+                       mi->supported[i] = mcs->rx_mask[nss - 1];
+                       if (mi->supported[i])
                                n_supported++;
                        continue;
                }
@@ -1283,16 +1248,19 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
                else
                        bw = BW_20;
 
-               mi->groups[i].supported = minstrel_get_valid_vht_rates(bw, nss,
+               mi->supported[i] = minstrel_get_valid_vht_rates(bw, nss,
                                vht_cap->vht_mcs.tx_mcs_map);
 
-               if (mi->groups[i].supported)
+               if (mi->supported[i])
                        n_supported++;
        }
 
        if (!n_supported)
                goto use_legacy;
 
+       if (test_sta_flag(sinfo, WLAN_STA_SHORT_PREAMBLE))
+               mi->cck_supported_short |= mi->cck_supported_short << 4;
+
        /* create an initial rate table with the lowest supported rates */
        minstrel_ht_update_stats(mp, mi);
        minstrel_ht_update_rates(mp, mi);
index e8b52a94d24b64fd3e78b4d3a390c3dbdad9e328..de1646c42e82df72f883f47daa8b2a696746e7f0 100644 (file)
@@ -52,9 +52,6 @@ struct minstrel_mcs_group_data {
        u8 index;
        u8 column;
 
-       /* bitfield of supported MCS rates of this group */
-       u16 supported;
-
        /* sorted rate set within a MCS group*/
        u16 max_group_tp_rate[MAX_THR_RATES];
        u16 max_group_prob_rate;
@@ -101,6 +98,9 @@ struct minstrel_ht_sta {
        u8 cck_supported;
        u8 cck_supported_short;
 
+       /* Bitfield of supported MCS rates of all groups */
+       u16 supported[MINSTREL_GROUPS_NB];
+
        /* MCS rate group info and statistics */
        struct minstrel_mcs_group_data groups[MINSTREL_GROUPS_NB];
 };
index 5320e35ed3d0f2dc899176b9e569b59877de9e38..7d969e300fb3a48742e841eb0140aba03b5de071 100644 (file)
@@ -19,12 +19,12 @@ static char *
 minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
 {
        const struct mcs_group *mg;
-       unsigned int j, tp_max, tp_avg, prob, eprob, tx_time;
+       unsigned int j, tp_max, tp_avg, eprob, tx_time;
        char htmode = '2';
        char gimode = 'L';
        u32 gflags;
 
-       if (!mi->groups[i].supported)
+       if (!mi->supported[i])
                return p;
 
        mg = &minstrel_mcs_groups[i];
@@ -41,8 +41,9 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
                struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j];
                static const int bitrates[4] = { 10, 20, 55, 110 };
                int idx = i * MCS_GROUP_RATES + j;
+               unsigned int prob_ewmsd;
 
-               if (!(mi->groups[i].supported & BIT(j)))
+               if (!(mi->supported[i] & BIT(j)))
                        continue;
 
                if (gflags & IEEE80211_TX_RC_MCS) {
@@ -83,17 +84,16 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
 
                tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
                tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
-               prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
                eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+               prob_ewmsd = minstrel_get_ewmsd10(mrs);
 
                p += sprintf(p, "%4u.%1u    %4u.%1u     %3u.%1u    %3u.%1u"
-                               "     %3u.%1u %3u   %3u %-3u   "
+                               "     %3u   %3u %-3u   "
                                "%9llu   %-9llu\n",
                                tp_max / 10, tp_max % 10,
                                tp_avg / 10, tp_avg % 10,
                                eprob / 10, eprob % 10,
-                               mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10,
-                               prob / 10, prob % 10,
+                               prob_ewmsd / 10, prob_ewmsd % 10,
                                mrs->retry_count,
                                mrs->last_success,
                                mrs->last_attempts,
@@ -130,9 +130,9 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
 
        p += sprintf(p, "\n");
        p += sprintf(p,
-                    "              best   ____________rate__________    ________statistics________    ________last_______    ______sum-of________\n");
+                    "              best   ____________rate__________    ________statistics________    _____last____    ______sum-of________\n");
        p += sprintf(p,
-                    "mode guard #  rate  [name   idx airtime  max_tp]  [avg(tp) avg(prob) sd(prob)]  [prob.|retry|suc|att]  [#success | #attempts]\n");
+                    "mode guard #  rate  [name   idx airtime  max_tp]  [avg(tp) avg(prob) sd(prob)]  [retry|suc|att]  [#success | #attempts]\n");
 
        p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p);
        for (i = 0; i < MINSTREL_CCK_GROUP; i++)
@@ -165,12 +165,12 @@ static char *
 minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p)
 {
        const struct mcs_group *mg;
-       unsigned int j, tp_max, tp_avg, prob, eprob, tx_time;
+       unsigned int j, tp_max, tp_avg, eprob, tx_time;
        char htmode = '2';
        char gimode = 'L';
        u32 gflags;
 
-       if (!mi->groups[i].supported)
+       if (!mi->supported[i])
                return p;
 
        mg = &minstrel_mcs_groups[i];
@@ -187,8 +187,9 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p)
                struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j];
                static const int bitrates[4] = { 10, 20, 55, 110 };
                int idx = i * MCS_GROUP_RATES + j;
+               unsigned int prob_ewmsd;
 
-               if (!(mi->groups[i].supported & BIT(j)))
+               if (!(mi->supported[i] & BIT(j)))
                        continue;
 
                if (gflags & IEEE80211_TX_RC_MCS) {
@@ -226,16 +227,15 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p)
 
                tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
                tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
-               prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
                eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+               prob_ewmsd = minstrel_get_ewmsd10(mrs);
 
-               p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,"
+               p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,"
                                "%u,%llu,%llu,",
                                tp_max / 10, tp_max % 10,
                                tp_avg / 10, tp_avg % 10,
                                eprob / 10, eprob % 10,
-                               mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10,
-                               prob / 10, prob % 10,
+                               prob_ewmsd / 10, prob_ewmsd % 10,
                                mrs->retry_count,
                                mrs->last_success,
                                mrs->last_attempts,
index 3090dd4342f6eee6f2d3bdba67493849622ac1a7..50ca3828b1242edb74f2835898cbe5f6d1dce975 100644 (file)
@@ -1391,7 +1391,7 @@ EXPORT_SYMBOL(ieee80211_sta_pspoll);
 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
 {
        struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
-       u8 ac = ieee802_1d_to_ac[tid & 7];
+       int ac = ieee80211_ac_from_tid(tid);
 
        /*
         * If this AC is not trigger-enabled do nothing unless the
@@ -1908,7 +1908,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
        unsigned int frag, seq;
        struct ieee80211_fragment_entry *entry;
        struct sk_buff *skb;
-       struct ieee80211_rx_status *status;
 
        hdr = (struct ieee80211_hdr *)rx->skb->data;
        fc = hdr->frame_control;
@@ -2034,9 +2033,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
                dev_kfree_skb(skb);
        }
 
-       /* Complete frame has been reassembled - process it now */
-       status = IEEE80211_SKB_RXCB(rx->skb);
-
  out:
        ieee80211_led_rx(rx->local);
  out_no_led:
index 23d8ac8292796714df893e32072e44edb3617ca0..faab3c490d2b755f32ee09638eaf0d45067780cd 100644 (file)
@@ -1120,7 +1120,6 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
        u32 rate_masks[NUM_NL80211_BANDS] = {};
        u8 bands_used = 0;
        u8 *ie;
-       size_t len;
 
        iebufsz = local->scan_ies_len + req->ie_len;
 
@@ -1145,10 +1144,9 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
 
        ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
 
-       len = ieee80211_build_preq_ies(local, ie, num_bands * iebufsz,
-                                      &sched_scan_ies, req->ie,
-                                      req->ie_len, bands_used,
-                                      rate_masks, &chandef);
+       ieee80211_build_preq_ies(local, ie, num_bands * iebufsz,
+                                &sched_scan_ies, req->ie,
+                                req->ie_len, bands_used, rate_masks, &chandef);
 
        ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
        if (ret == 0) {
index 50c309094c37ba5e73160613f6e316c0a6a161f2..4774e663a4112f7793a327d3680510568e10ccb8 100644 (file)
@@ -513,23 +513,23 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
 {
        struct ieee80211_local *local = sta->local;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       struct station_info *sinfo;
+       struct station_info *sinfo = NULL;
        int err = 0;
 
        lockdep_assert_held(&local->sta_mtx);
 
-       sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
-       if (!sinfo) {
-               err = -ENOMEM;
-               goto out_err;
-       }
-
        /* check if STA exists already */
        if (sta_info_get_bss(sdata, sta->sta.addr)) {
                err = -EEXIST;
                goto out_err;
        }
 
+       sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
+       if (!sinfo) {
+               err = -ENOMEM;
+               goto out_err;
+       }
+
        local->num_sta++;
        local->sta_generation++;
        smp_mb();
@@ -2051,16 +2051,12 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        struct ieee80211_local *local = sdata->local;
-       struct rate_control_ref *ref = NULL;
        u32 thr = 0;
        int i, ac, cpu;
        struct ieee80211_sta_rx_stats *last_rxstats;
 
        last_rxstats = sta_get_last_rx_stats(sta);
 
-       if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
-               ref = local->rate_ctrl;
-
        sinfo->generation = sdata->local->sta_generation;
 
        /* do before driver, so beacon filtering drivers have a
index ddf71c648cab008baaff8ed430b9b222410f8ce1..a3af6e1bfd984d3548b3f48dad508028b7958bae 100644 (file)
@@ -95,7 +95,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
                 */
                if (*p & IEEE80211_QOS_CTL_EOSP)
                        *p &= ~IEEE80211_QOS_CTL_EOSP;
-               ac = ieee802_1d_to_ac[tid & 7];
+               ac = ieee80211_ac_from_tid(tid);
        } else {
                ac = IEEE80211_AC_BE;
        }
@@ -541,6 +541,11 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
        } else if (info->ack_frame_id) {
                ieee80211_report_ack_skb(local, info, acked, dropped);
        }
+
+       if (!dropped && skb->destructor) {
+               skb->wifi_acked_valid = 1;
+               skb->wifi_acked = acked;
+       }
 }
 
 /*
@@ -633,10 +638,9 @@ void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
        struct ieee80211_local *local = hw_to_local(hw);
        struct ieee80211_supported_band *sband;
        int retry_count;
-       int rates_idx;
        bool acked, noack_success;
 
-       rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
+       ieee80211_tx_get_rates(hw, info, &retry_count);
 
        sband = hw->wiphy->bands[info->band];
 
index 92a47afaa989e6b63b494eacc23c56409f767642..0d645bc148d0bda31edaf7c2c52a3fdad2a5e8aa 100644 (file)
@@ -1736,21 +1736,21 @@ TRACE_EVENT(drv_start_nan,
                LOCAL_ENTRY
                VIF_ENTRY
                __field(u8, master_pref)
-               __field(u8, dual)
+               __field(u8, bands)
        ),
 
        TP_fast_assign(
                LOCAL_ASSIGN;
                VIF_ASSIGN;
                __entry->master_pref = conf->master_pref;
-               __entry->dual = conf->dual;
+               __entry->bands = conf->bands;
        ),
 
        TP_printk(
                LOCAL_PR_FMT  VIF_PR_FMT
-               ", master preference: %u, dual: %d",
+               ", master preference: %u, bands: 0x%0x",
                LOCAL_PR_ARG, VIF_PR_ARG, __entry->master_pref,
-               __entry->dual
+               __entry->bands
        )
 );
 
@@ -1787,7 +1787,7 @@ TRACE_EVENT(drv_nan_change_conf,
                LOCAL_ENTRY
                VIF_ENTRY
                __field(u8, master_pref)
-               __field(u8, dual)
+               __field(u8, bands)
                __field(u32, changes)
        ),
 
@@ -1795,15 +1795,15 @@ TRACE_EVENT(drv_nan_change_conf,
                LOCAL_ASSIGN;
                VIF_ASSIGN;
                __entry->master_pref = conf->master_pref;
-               __entry->dual = conf->dual;
+               __entry->bands = conf->bands;
                __entry->changes = changes;
        ),
 
        TP_printk(
                LOCAL_PR_FMT  VIF_PR_FMT
-               ", master preference: %u, dual: %d, changes: 0x%x",
+               ", master preference: %u, bands: 0x%0x, changes: 0x%x",
                LOCAL_PR_ARG, VIF_PR_ARG, __entry->master_pref,
-               __entry->dual, __entry->changes
+               __entry->bands, __entry->changes
        )
 );
 
@@ -1996,23 +1996,26 @@ TRACE_EVENT(api_connection_loss,
 
 TRACE_EVENT(api_cqm_rssi_notify,
        TP_PROTO(struct ieee80211_sub_if_data *sdata,
-                enum nl80211_cqm_rssi_threshold_event rssi_event),
+                enum nl80211_cqm_rssi_threshold_event rssi_event,
+                s32 rssi_level),
 
-       TP_ARGS(sdata, rssi_event),
+       TP_ARGS(sdata, rssi_event, rssi_level),
 
        TP_STRUCT__entry(
                VIF_ENTRY
                __field(u32, rssi_event)
+               __field(s32, rssi_level)
        ),
 
        TP_fast_assign(
                VIF_ASSIGN;
                __entry->rssi_event = rssi_event;
+               __entry->rssi_level = rssi_level;
        ),
 
        TP_printk(
-               VIF_PR_FMT " event:%d",
-               VIF_PR_ARG, __entry->rssi_event
+               VIF_PR_FMT " event:%d rssi:%d",
+               VIF_PR_ARG, __entry->rssi_event, __entry->rssi_level
        )
 );
 
index 797e847cbc49a1a5f1a515f01aa68c0212aed997..ba8d7db0a07165e7b33f71caa260f352771d434a 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/skbuff.h>
+#include <linux/if_vlan.h>
 #include <linux/etherdevice.h>
 #include <linux/bitmap.h>
 #include <linux/rcupdate.h>
@@ -63,6 +64,10 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
        struct ieee80211_chanctx_conf *chanctx_conf;
        u32 rate_flags = 0;
 
+       /* assume HW handles this */
+       if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))
+               return 0;
+
        rcu_read_lock();
        chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf);
        if (chanctx_conf) {
@@ -71,10 +76,6 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
        }
        rcu_read_unlock();
 
-       /* assume HW handles this */
-       if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))
-               return 0;
-
        /* uh huh? */
        if (WARN_ON_ONCE(tx->rate.idx < 0))
                return 0;
@@ -1413,7 +1414,7 @@ void ieee80211_txq_init(struct ieee80211_sub_if_data *sdata,
                txqi->txq.sta = &sta->sta;
                sta->sta.txq[tid] = &txqi->txq;
                txqi->txq.tid = tid;
-               txqi->txq.ac = ieee802_1d_to_ac[tid & 7];
+               txqi->txq.ac = ieee80211_ac_from_tid(tid);
        } else {
                sdata->vif.txq = &txqi->txq;
                txqi->txq.tid = 0;
@@ -3571,6 +3572,115 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
        rcu_read_unlock();
 }
 
+static int ieee80211_change_da(struct sk_buff *skb, struct sta_info *sta)
+{
+       struct ethhdr *eth;
+       int err;
+
+       err = skb_ensure_writable(skb, ETH_HLEN);
+       if (unlikely(err))
+               return err;
+
+       eth = (void *)skb->data;
+       ether_addr_copy(eth->h_dest, sta->sta.addr);
+
+       return 0;
+}
+
+static bool ieee80211_multicast_to_unicast(struct sk_buff *skb,
+                                          struct net_device *dev)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       const struct ethhdr *eth = (void *)skb->data;
+       const struct vlan_ethhdr *ethvlan = (void *)skb->data;
+       __be16 ethertype;
+
+       if (likely(!is_multicast_ether_addr(eth->h_dest)))
+               return false;
+
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_AP_VLAN:
+               if (sdata->u.vlan.sta)
+                       return false;
+               if (sdata->wdev.use_4addr)
+                       return false;
+               /* fall through */
+       case NL80211_IFTYPE_AP:
+               /* check runtime toggle for this bss */
+               if (!sdata->bss->multicast_to_unicast)
+                       return false;
+               break;
+       default:
+               return false;
+       }
+
+       /* multicast to unicast conversion only for some payload */
+       ethertype = eth->h_proto;
+       if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN)
+               ethertype = ethvlan->h_vlan_encapsulated_proto;
+       switch (ethertype) {
+       case htons(ETH_P_ARP):
+       case htons(ETH_P_IP):
+       case htons(ETH_P_IPV6):
+               break;
+       default:
+               return false;
+       }
+
+       return true;
+}
+
+static void
+ieee80211_convert_to_unicast(struct sk_buff *skb, struct net_device *dev,
+                            struct sk_buff_head *queue)
+{
+       struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+       struct ieee80211_local *local = sdata->local;
+       const struct ethhdr *eth = (struct ethhdr *)skb->data;
+       struct sta_info *sta, *first = NULL;
+       struct sk_buff *cloned_skb;
+
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(sta, &local->sta_list, list) {
+               if (sdata != sta->sdata)
+                       /* AP-VLAN mismatch */
+                       continue;
+               if (unlikely(ether_addr_equal(eth->h_source, sta->sta.addr)))
+                       /* do not send back to source */
+                       continue;
+               if (!first) {
+                       first = sta;
+                       continue;
+               }
+               cloned_skb = skb_clone(skb, GFP_ATOMIC);
+               if (!cloned_skb)
+                       goto multicast;
+               if (unlikely(ieee80211_change_da(cloned_skb, sta))) {
+                       dev_kfree_skb(cloned_skb);
+                       goto multicast;
+               }
+               __skb_queue_tail(queue, cloned_skb);
+       }
+
+       if (likely(first)) {
+               if (unlikely(ieee80211_change_da(skb, first)))
+                       goto multicast;
+               __skb_queue_tail(queue, skb);
+       } else {
+               /* no STA connected, drop */
+               kfree_skb(skb);
+               skb = NULL;
+       }
+
+       goto out;
+multicast:
+       __skb_queue_purge(queue);
+       __skb_queue_tail(queue, skb);
+out:
+       rcu_read_unlock();
+}
+
 /**
  * ieee80211_subif_start_xmit - netif start_xmit function for 802.3 vifs
  * @skb: packet to be sent
@@ -3581,7 +3691,17 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
 netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
                                       struct net_device *dev)
 {
-       __ieee80211_subif_start_xmit(skb, dev, 0);
+       if (unlikely(ieee80211_multicast_to_unicast(skb, dev))) {
+               struct sk_buff_head queue;
+
+               __skb_queue_head_init(&queue);
+               ieee80211_convert_to_unicast(skb, dev, &queue);
+               while ((skb = __skb_dequeue(&queue)))
+                       __ieee80211_subif_start_xmit(skb, dev, 0);
+       } else {
+               __ieee80211_subif_start_xmit(skb, dev, 0);
+       }
+
        return NETDEV_TX_OK;
 }
 
@@ -4074,7 +4194,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
                }
 
                if (ifmsh->sync_ops)
-                       ifmsh->sync_ops->adjust_tbtt(sdata, beacon);
+                       ifmsh->sync_ops->adjust_tsf(sdata, beacon);
 
                skb = dev_alloc_skb(local->tx_headroom +
                                    beacon->head_len +
@@ -4539,7 +4659,7 @@ void __ieee80211_tx_skb_tid_band(struct ieee80211_sub_if_data *sdata,
                                 struct sk_buff *skb, int tid,
                                 enum nl80211_band band)
 {
-       int ac = ieee802_1d_to_ac[tid & 7];
+       int ac = ieee80211_ac_from_tid(tid);
 
        skb_reset_mac_header(skb);
        skb_set_queue_mapping(skb, ac);
index 43e45bb660bcde02af964a31a71efd64f07ba448..19ec2189d3acbe2490bb3f2f6a14abee15a185c8 100644 (file)
@@ -436,14 +436,10 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
                                  struct sta_info *sta, u8 opmode,
                                  enum nl80211_band band)
 {
-       struct ieee80211_local *local = sdata->local;
-       struct ieee80211_supported_band *sband;
        enum ieee80211_sta_rx_bandwidth new_bw;
        u32 changed = 0;
        u8 nss;
 
-       sband = local->hw.wiphy->bands[band];
-
        /* ignore - no support for BF yet */
        if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)
                return 0;
index efa3f48f1ec5d51ea7191c8ae90dda77c0d330e1..73e8f347802ecadf5efb11fffa7b13d32a5705c1 100644 (file)
@@ -293,7 +293,8 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
                        return RX_DROP_UNUSABLE;
                ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
                /* remove ICV */
-               if (pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN))
+               if (!(status->flag & RX_FLAG_ICV_STRIPPED) &&
+                   pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN))
                        return RX_DROP_UNUSABLE;
        }
 
index 8af6dd388d1195540fdaf66f7ac55a452620ad5f..c1ef22df865fe77bf7cf0a42d560f54db98a3edd 100644 (file)
@@ -294,7 +294,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
                return RX_DROP_UNUSABLE;
 
        /* Trim ICV */
-       skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
+       if (!(status->flag & RX_FLAG_ICV_STRIPPED))
+               skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
 
        /* Remove IV */
        memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen);
index 5b77377e5a15474e39037be5e6e873ebceb33555..64d3bf269a26896b55517517091c12bfd3a0411f 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/ipv6.h>
 #include <linux/mpls.h>
 #include <linux/vmalloc.h>
+#include <linux/percpu.h>
 #include <net/ip.h>
 #include <net/dst.h>
 #include <net/sock.h>
@@ -17,8 +18,8 @@
 #include <net/netns/generic.h>
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ipv6.h>
-#include <net/addrconf.h>
 #endif
+#include <net/addrconf.h>
 #include <net/nexthop.h>
 #include "internal.h"
 
@@ -48,11 +49,6 @@ static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
        return rt;
 }
 
-static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
-{
-       return rcu_dereference_rtnl(dev->mpls_ptr);
-}
-
 bool mpls_output_possible(const struct net_device *dev)
 {
        return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
@@ -98,6 +94,31 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 }
 EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
 
+void mpls_stats_inc_outucastpkts(struct net_device *dev,
+                                const struct sk_buff *skb)
+{
+       struct mpls_dev *mdev;
+
+       if (skb->protocol == htons(ETH_P_MPLS_UC)) {
+               mdev = mpls_dev_get(dev);
+               if (mdev)
+                       MPLS_INC_STATS_LEN(mdev, skb->len,
+                                          tx_packets,
+                                          tx_bytes);
+       } else if (skb->protocol == htons(ETH_P_IP)) {
+               IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               struct inet6_dev *in6dev = __in6_dev_get(dev);
+
+               if (in6dev)
+                       IP6_UPD_PO_STATS(dev_net(dev), in6dev,
+                                        IPSTATS_MIB_OUT, skb->len);
+#endif
+       }
+}
+EXPORT_SYMBOL_GPL(mpls_stats_inc_outucastpkts);
+
 static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
 {
        struct mpls_entry_decoded dec;
@@ -255,6 +276,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
        struct mpls_nh *nh;
        struct mpls_entry_decoded dec;
        struct net_device *out_dev;
+       struct mpls_dev *out_mdev;
        struct mpls_dev *mdev;
        unsigned int hh_len;
        unsigned int new_header_size;
@@ -264,34 +286,39 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
        /* Careful this entire function runs inside of an rcu critical section */
 
        mdev = mpls_dev_get(dev);
-       if (!mdev || !mdev->input_enabled)
+       if (!mdev)
                goto drop;
 
-       if (skb->pkt_type != PACKET_HOST)
+       MPLS_INC_STATS_LEN(mdev, skb->len, rx_packets,
+                          rx_bytes);
+
+       if (!mdev->input_enabled) {
+               MPLS_INC_STATS(mdev, rx_dropped);
                goto drop;
+       }
+
+       if (skb->pkt_type != PACKET_HOST)
+               goto err;
 
        if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
-               goto drop;
+               goto err;
 
        if (!pskb_may_pull(skb, sizeof(*hdr)))
-               goto drop;
+               goto err;
 
        /* Read and decode the label */
        hdr = mpls_hdr(skb);
        dec = mpls_entry_decode(hdr);
 
        rt = mpls_route_input_rcu(net, dec.label);
-       if (!rt)
+       if (!rt) {
+               MPLS_INC_STATS(mdev, rx_noroute);
                goto drop;
+       }
 
        nh = mpls_select_multipath(rt, skb);
        if (!nh)
-               goto drop;
-
-       /* Find the output device */
-       out_dev = rcu_dereference(nh->nh_dev);
-       if (!mpls_output_possible(out_dev))
-               goto drop;
+               goto err;
 
        /* Pop the label */
        skb_pull(skb, sizeof(*hdr));
@@ -300,20 +327,25 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
        skb_orphan(skb);
 
        if (skb_warn_if_lro(skb))
-               goto drop;
+               goto err;
 
        skb_forward_csum(skb);
 
        /* Verify ttl is valid */
        if (dec.ttl <= 1)
-               goto drop;
+               goto err;
        dec.ttl -= 1;
 
+       /* Find the output device */
+       out_dev = rcu_dereference(nh->nh_dev);
+       if (!mpls_output_possible(out_dev))
+               goto tx_err;
+
        /* Verify the destination can hold the packet */
        new_header_size = mpls_nh_header_size(nh);
        mtu = mpls_dev_mtu(out_dev);
        if (mpls_pkt_too_big(skb, mtu - new_header_size))
-               goto drop;
+               goto tx_err;
 
        hh_len = LL_RESERVED_SPACE(out_dev);
        if (!out_dev->header_ops)
@@ -321,7 +353,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
 
        /* Ensure there is enough space for the headers in the skb */
        if (skb_cow(skb, hh_len + new_header_size))
-               goto drop;
+               goto tx_err;
 
        skb->dev = out_dev;
        skb->protocol = htons(ETH_P_MPLS_UC);
@@ -329,7 +361,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
        if (unlikely(!new_header_size && dec.bos)) {
                /* Penultimate hop popping */
                if (!mpls_egress(rt, skb, dec))
-                       goto drop;
+                       goto err;
        } else {
                bool bos;
                int i;
@@ -345,6 +377,8 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
                }
        }
 
+       mpls_stats_inc_outucastpkts(out_dev, skb);
+
        /* If via wasn't specified then send out using device address */
        if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC)
                err = neigh_xmit(NEIGH_LINK_TABLE, out_dev,
@@ -357,6 +391,13 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
                                    __func__, err);
        return 0;
 
+tx_err:
+       out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL;
+       if (out_mdev)
+               MPLS_INC_STATS(out_mdev, tx_errors);
+       goto drop;
+err:
+       MPLS_INC_STATS(mdev, rx_errors);
 drop:
        kfree_skb(skb);
        return NET_RX_DROP;
@@ -855,6 +896,70 @@ errout:
        return err;
 }
 
+static void mpls_get_stats(struct mpls_dev *mdev,
+                          struct mpls_link_stats *stats)
+{
+       struct mpls_pcpu_stats *p;
+       int i;
+
+       memset(stats, 0, sizeof(*stats));
+
+       for_each_possible_cpu(i) {
+               struct mpls_link_stats local;
+               unsigned int start;
+
+               p = per_cpu_ptr(mdev->stats, i);
+               do {
+                       start = u64_stats_fetch_begin(&p->syncp);
+                       local = p->stats;
+               } while (u64_stats_fetch_retry(&p->syncp, start));
+
+               stats->rx_packets       += local.rx_packets;
+               stats->rx_bytes         += local.rx_bytes;
+               stats->tx_packets       += local.tx_packets;
+               stats->tx_bytes         += local.tx_bytes;
+               stats->rx_errors        += local.rx_errors;
+               stats->tx_errors        += local.tx_errors;
+               stats->rx_dropped       += local.rx_dropped;
+               stats->tx_dropped       += local.tx_dropped;
+               stats->rx_noroute       += local.rx_noroute;
+       }
+}
+
+static int mpls_fill_stats_af(struct sk_buff *skb,
+                             const struct net_device *dev)
+{
+       struct mpls_link_stats *stats;
+       struct mpls_dev *mdev;
+       struct nlattr *nla;
+
+       mdev = mpls_dev_get(dev);
+       if (!mdev)
+               return -ENODATA;
+
+       nla = nla_reserve_64bit(skb, MPLS_STATS_LINK,
+                               sizeof(struct mpls_link_stats),
+                               MPLS_STATS_UNSPEC);
+       if (!nla)
+               return -EMSGSIZE;
+
+       stats = nla_data(nla);
+       mpls_get_stats(mdev, stats);
+
+       return 0;
+}
+
+static size_t mpls_get_stats_af_size(const struct net_device *dev)
+{
+       struct mpls_dev *mdev;
+
+       mdev = mpls_dev_get(dev);
+       if (!mdev)
+               return 0;
+
+       return nla_total_size_64bit(sizeof(struct mpls_link_stats));
+}
+
 #define MPLS_PERDEV_SYSCTL_OFFSET(field)       \
        (&((struct mpls_dev *)0)->field)
 
@@ -913,6 +1018,7 @@ static struct mpls_dev *mpls_add_dev(struct net_device *dev)
 {
        struct mpls_dev *mdev;
        int err = -ENOMEM;
+       int i;
 
        ASSERT_RTNL();
 
@@ -920,6 +1026,17 @@ static struct mpls_dev *mpls_add_dev(struct net_device *dev)
        if (!mdev)
                return ERR_PTR(err);
 
+       mdev->stats = alloc_percpu(struct mpls_pcpu_stats);
+       if (!mdev->stats)
+               goto free;
+
+       for_each_possible_cpu(i) {
+               struct mpls_pcpu_stats *mpls_stats;
+
+               mpls_stats = per_cpu_ptr(mdev->stats, i);
+               u64_stats_init(&mpls_stats->syncp);
+       }
+
        err = mpls_dev_sysctl_register(dev, mdev);
        if (err)
                goto free;
@@ -929,10 +1046,19 @@ static struct mpls_dev *mpls_add_dev(struct net_device *dev)
        return mdev;
 
 free:
+       free_percpu(mdev->stats);
        kfree(mdev);
        return ERR_PTR(err);
 }
 
+static void mpls_dev_destroy_rcu(struct rcu_head *head)
+{
+       struct mpls_dev *mdev = container_of(head, struct mpls_dev, rcu);
+
+       free_percpu(mdev->stats);
+       kfree(mdev);
+}
+
 static void mpls_ifdown(struct net_device *dev, int event)
 {
        struct mpls_route __rcu **platform_label;
@@ -1047,7 +1173,7 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
                if (mdev) {
                        mpls_dev_sysctl_unregister(mdev);
                        RCU_INIT_POINTER(dev->mpls_ptr, NULL);
-                       kfree_rcu(mdev, rcu);
+                       call_rcu(&mdev->rcu, mpls_dev_destroy_rcu);
                }
                break;
        case NETDEV_CHANGENAME:
@@ -1708,6 +1834,12 @@ static struct pernet_operations mpls_net_ops = {
        .exit = mpls_net_exit,
 };
 
+static struct rtnl_af_ops mpls_af_ops __read_mostly = {
+       .family            = AF_MPLS,
+       .fill_stats_af     = mpls_fill_stats_af,
+       .get_stats_af_size = mpls_get_stats_af_size,
+};
+
 static int __init mpls_init(void)
 {
        int err;
@@ -1724,6 +1856,8 @@ static int __init mpls_init(void)
 
        dev_add_pack(&mpls_packet_type);
 
+       rtnl_af_register(&mpls_af_ops);
+
        rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, NULL);
        rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, NULL);
        rtnl_register(PF_MPLS, RTM_GETROUTE, NULL, mpls_dump_routes, NULL);
@@ -1740,6 +1874,7 @@ module_init(mpls_init);
 static void __exit mpls_exit(void)
 {
        rtnl_unregister_all(PF_MPLS);
+       rtnl_af_unregister(&mpls_af_ops);
        dev_remove_pack(&mpls_packet_type);
        unregister_netdevice_notifier(&mpls_dev_notifier);
        unregister_pernet_subsys(&mpls_net_ops);
index bdfef6c3271a5a0e39fbbd03b9464a9fe1bdb3f0..d972430346050774676ae690c09bc762fdb23239 100644 (file)
@@ -9,13 +9,58 @@ struct mpls_entry_decoded {
        u8 bos;
 };
 
+struct mpls_pcpu_stats {
+       struct mpls_link_stats  stats;
+       struct u64_stats_sync   syncp;
+};
+
 struct mpls_dev {
-       int                     input_enabled;
+       int                             input_enabled;
 
-       struct ctl_table_header *sysctl;
-       struct rcu_head         rcu;
+       struct mpls_pcpu_stats __percpu *stats;
+
+       struct ctl_table_header         *sysctl;
+       struct rcu_head                 rcu;
 };
 
+#if BITS_PER_LONG == 32
+
+#define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field)         \
+       do {                                                            \
+               __typeof__(*(mdev)->stats) *ptr =                       \
+                       raw_cpu_ptr((mdev)->stats);                     \
+               local_bh_disable();                                     \
+               u64_stats_update_begin(&ptr->syncp);                    \
+               ptr->stats.pkts_field++;                                \
+               ptr->stats.bytes_field += (len);                        \
+               u64_stats_update_end(&ptr->syncp);                      \
+               local_bh_enable();                                      \
+       } while (0)
+
+#define MPLS_INC_STATS(mdev, field)                                    \
+       do {                                                            \
+               __typeof__(*(mdev)->stats) *ptr =                       \
+                       raw_cpu_ptr((mdev)->stats);                     \
+               local_bh_disable();                                     \
+               u64_stats_update_begin(&ptr->syncp);                    \
+               ptr->stats.field++;                                     \
+               u64_stats_update_end(&ptr->syncp);                      \
+               local_bh_enable();                                      \
+       } while (0)
+
+#else
+
+#define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field)         \
+       do {                                                            \
+               this_cpu_inc((mdev)->stats->stats.pkts_field);          \
+               this_cpu_add((mdev)->stats->stats.bytes_field, (len));  \
+       } while (0)
+
+#define MPLS_INC_STATS(mdev, field)                    \
+       this_cpu_inc((mdev)->stats->stats.field)
+
+#endif
+
 struct sk_buff;
 
 #define LABEL_NOT_SPECIFIED (1 << 20)
@@ -114,6 +159,11 @@ static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *
        return result;
 }
 
+static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
+{
+       return rcu_dereference_rtnl(dev->mpls_ptr);
+}
+
 int nla_put_labels(struct sk_buff *skb, int attrtype,  u8 labels,
                   const u32 label[]);
 int nla_get_labels(const struct nlattr *nla, u32 max_labels, u8 *labels,
@@ -123,5 +173,7 @@ int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table,
 bool mpls_output_possible(const struct net_device *dev);
 unsigned int mpls_dev_mtu(const struct net_device *dev);
 bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
+void mpls_stats_inc_outucastpkts(struct net_device *dev,
+                                const struct sk_buff *skb);
 
 #endif /* MPLS_INTERNAL_H */
index 1d281c1ff7c10b3ae6e0245e2b95cc404dd791c4..e4e4424f9eb1f5531d22463687d74c2e2ca971a6 100644 (file)
@@ -48,11 +48,15 @@ static int mpls_xmit(struct sk_buff *skb)
        struct dst_entry *dst = skb_dst(skb);
        struct rtable *rt = NULL;
        struct rt6_info *rt6 = NULL;
+       struct mpls_dev *out_mdev;
        int err = 0;
        bool bos;
        int i;
        unsigned int ttl;
 
+       /* Find the output device */
+       out_dev = dst->dev;
+
        /* Obtain the ttl */
        if (dst->ops->family == AF_INET) {
                ttl = ip_hdr(skb)->ttl;
@@ -66,8 +70,6 @@ static int mpls_xmit(struct sk_buff *skb)
 
        skb_orphan(skb);
 
-       /* Find the output device */
-       out_dev = dst->dev;
        if (!mpls_output_possible(out_dev) ||
            !dst->lwtstate || skb_warn_if_lro(skb))
                goto drop;
@@ -109,6 +111,8 @@ static int mpls_xmit(struct sk_buff *skb)
                bos = false;
        }
 
+       mpls_stats_inc_outucastpkts(out_dev, skb);
+
        if (rt)
                err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway,
                                 skb);
@@ -122,11 +126,14 @@ static int mpls_xmit(struct sk_buff *skb)
        return LWTUNNEL_XMIT_DONE;
 
 drop:
+       out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL;
+       if (out_mdev)
+               MPLS_INC_STATS(out_mdev, tx_errors);
        kfree_skb(skb);
        return -EINVAL;
 }
 
-static int mpls_build_state(struct net_device *dev, struct nlattr *nla,
+static int mpls_build_state(struct nlattr *nla,
                            unsigned int family, const void *cfg,
                            struct lwtunnel_state **ts)
 {
index bbc45f8a7b2de6801eab367fa7f3611e23b92b9e..9b28864cc36a9e19406a015493ee3249bd2afd2c 100644 (file)
@@ -162,6 +162,7 @@ config NF_CT_PROTO_SCTP
        bool 'SCTP protocol connection tracking support'
        depends on NETFILTER_ADVANCED
        default y
+       select LIBCRC32C
        help
          With this option enabled, the layer 3 independent connection
          tracking code will be able to do state tracking on SCTP connections.
@@ -397,7 +398,6 @@ config NF_NAT_PROTO_SCTP
        bool
        default NF_NAT && NF_CT_PROTO_SCTP
        depends on NF_NAT && NF_CT_PROTO_SCTP
-       select LIBCRC32C
 
 config NF_NAT_AMANDA
        tristate
@@ -467,10 +467,10 @@ config NF_TABLES_NETDEV
          This option enables support for the "netdev" table.
 
 config NFT_EXTHDR
-       tristate "Netfilter nf_tables IPv6 exthdr module"
+       tristate "Netfilter nf_tables exthdr module"
        help
          This option adds the "exthdr" expression that you can use to match
-         IPv6 extension headers.
+         IPv6 extension headers and tcp options.
 
 config NFT_META
        tristate "Netfilter nf_tables meta module"
@@ -509,6 +509,12 @@ config NFT_SET_HASH
          This option adds the "hash" set type that is used to build one-way
          mappings between matchings and actions.
 
+config NFT_SET_BITMAP
+       tristate "Netfilter nf_tables bitmap set module"
+       help
+         This option adds the "bitmap" set type that is used to build sets
+         whose keys are smaller or equal to 16 bits.
+
 config NFT_COUNTER
        tristate "Netfilter nf_tables counter module"
        help
index ca30d1960f1db95964f06c5d23a08a1b83e492af..c9b78e7b342f97328bdba267d8cedb2ab4c19834 100644 (file)
@@ -7,7 +7,6 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
 nf_conntrack-$(CONFIG_NF_CONNTRACK_LABELS) += nf_conntrack_labels.o
 nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
 nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
-nf_conntrack-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o
 
 obj-$(CONFIG_NETFILTER) = netfilter.o
 
@@ -47,7 +46,6 @@ nf_nat-y      := nf_nat_core.o nf_nat_proto_unknown.o nf_nat_proto_common.o \
 # NAT protocols (nf_nat)
 nf_nat-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
 nf_nat-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o
-nf_nat-$(CONFIG_NF_NAT_PROTO_UDPLITE) += nf_nat_proto_udplite.o
 
 # generic transport layer logging
 obj-$(CONFIG_NF_LOG_COMMON) += nf_log_common.o
@@ -95,6 +93,7 @@ obj-$(CONFIG_NFT_REJECT)      += nft_reject.o
 obj-$(CONFIG_NFT_REJECT_INET)  += nft_reject_inet.o
 obj-$(CONFIG_NFT_SET_RBTREE)   += nft_set_rbtree.o
 obj-$(CONFIG_NFT_SET_HASH)     += nft_set_hash.o
+obj-$(CONFIG_NFT_SET_BITMAP)   += nft_set_bitmap.o
 obj-$(CONFIG_NFT_COUNTER)      += nft_counter.o
 obj-$(CONFIG_NFT_LOG)          += nft_log.o
 obj-$(CONFIG_NFT_MASQ)         += nft_masq.o
index ce6adfae521a1cdf5f479984ada3699dd7f3ddaa..a87a6f8a74d8c90b2e6801a9d391ced7084dd15f 100644 (file)
@@ -375,7 +375,7 @@ void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
 {
        void (*attach)(struct sk_buff *, const struct sk_buff *);
 
-       if (skb->nfct) {
+       if (skb->_nfct) {
                rcu_read_lock();
                attach = rcu_dereference(ip_ct_attach);
                if (attach)
index 55e0169caa4ce2fe81f0c9f1199c2be84c986b58..5aeb0dde6ccc5e525e740ca5fde3ba2c58c50070 100644 (file)
@@ -426,10 +426,9 @@ ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol
         */
        svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, vport);
 
-       if (svc == NULL
-           && protocol == IPPROTO_TCP
-           && atomic_read(&ipvs->ftpsvc_counter)
-           && (vport == FTPDATA || ntohs(vport) >= PROT_SOCK)) {
+       if (!svc && protocol == IPPROTO_TCP &&
+           atomic_read(&ipvs->ftpsvc_counter) &&
+           (vport == FTPDATA || ntohs(vport) >= inet_prot_sock(ipvs->net))) {
                /*
                 * Check if ftp service entry exists, the packet
                 * might belong to FTP data connections.
@@ -711,7 +710,6 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, int dest_af,
                      dest->vport == svc->port))) {
                        /* HIT */
                        list_del(&dest->t_list);
-                       ip_vs_dest_hold(dest);
                        goto out;
                }
        }
@@ -741,7 +739,7 @@ static void ip_vs_dest_free(struct ip_vs_dest *dest)
  *  When the ip_vs_control_clearup is activated by ipvs module exit,
  *  the service tables must have been flushed and all the connections
  *  are expired, and the refcnt of each destination in the trash must
- *  be 0, so we simply release them here.
+ *  be 1, so we simply release them here.
  */
 static void ip_vs_trash_cleanup(struct netns_ipvs *ipvs)
 {
@@ -1080,11 +1078,10 @@ static void __ip_vs_del_dest(struct netns_ipvs *ipvs, struct ip_vs_dest *dest,
        if (list_empty(&ipvs->dest_trash) && !cleanup)
                mod_timer(&ipvs->dest_trash_timer,
                          jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
-       /* dest lives in trash without reference */
+       /* dest lives in trash with reference */
        list_add(&dest->t_list, &ipvs->dest_trash);
        dest->idle_start = 0;
        spin_unlock_bh(&ipvs->dest_trash_lock);
-       ip_vs_dest_put(dest);
 }
 
 
@@ -1160,7 +1157,7 @@ static void ip_vs_dest_trash_expire(unsigned long data)
 
        spin_lock(&ipvs->dest_trash_lock);
        list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) {
-               if (atomic_read(&dest->refcnt) > 0)
+               if (atomic_read(&dest->refcnt) > 1)
                        continue;
                if (dest->idle_start) {
                        if (time_before(now, dest->idle_start +
index 4e8083c5e01d1ec631258af169c18aceed101e3a..071b97fcbefb083ded417e06e739a4622b237fe8 100644 (file)
@@ -350,16 +350,31 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
        spin_unlock(&pcpu->lock);
 }
 
+#define NFCT_ALIGN(len)        (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
+
 /* Released via destroy_conntrack() */
 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
                                 const struct nf_conntrack_zone *zone,
                                 gfp_t flags)
 {
-       struct nf_conn *tmpl;
+       struct nf_conn *tmpl, *p;
 
-       tmpl = kzalloc(sizeof(*tmpl), flags);
-       if (tmpl == NULL)
-               return NULL;
+       if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
+               tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
+               if (!tmpl)
+                       return NULL;
+
+               p = tmpl;
+               tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
+               if (tmpl != p) {
+                       tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
+                       tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
+               }
+       } else {
+               tmpl = kzalloc(sizeof(*tmpl), flags);
+               if (!tmpl)
+                       return NULL;
+       }
 
        tmpl->status = IPS_TEMPLATE;
        write_pnet(&tmpl->ct_net, net);
@@ -374,7 +389,11 @@ void nf_ct_tmpl_free(struct nf_conn *tmpl)
 {
        nf_ct_ext_destroy(tmpl);
        nf_ct_ext_free(tmpl);
-       kfree(tmpl);
+
+       if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
+               kfree((char *)tmpl - tmpl->proto.tmpl_padto);
+       else
+               kfree(tmpl);
 }
 EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
 
@@ -686,12 +705,12 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
            !nfct_nat(ct) &&
            !nf_ct_is_dying(ct) &&
            atomic_inc_not_zero(&ct->ct_general.use)) {
-               nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct);
-               nf_conntrack_put(skb->nfct);
-               /* Assign conntrack already in hashes to this skbuff. Don't
-                * modify skb->nfctinfo to ensure consistent stateful filtering.
-                */
-               skb->nfct = &ct->ct_general;
+               enum ip_conntrack_info oldinfo;
+               struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
+
+               nf_ct_acct_merge(ct, ctinfo, loser_ct);
+               nf_conntrack_put(&loser_ct->ct_general);
+               nf_ct_set(skb, ct, oldinfo);
                return NF_ACCEPT;
        }
        NF_CT_STAT_INC(net, drop);
@@ -1218,7 +1237,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
        return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
 }
 
-/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
+/* On success, returns conntrack ptr, sets skb->_nfct | ctinfo */
 static inline struct nf_conn *
 resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
                  struct sk_buff *skb,
@@ -1277,8 +1296,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
                }
                *set_reply = 0;
        }
-       skb->nfct = &ct->ct_general;
-       skb->nfctinfo = *ctinfo;
+       nf_ct_set(skb, ct, *ctinfo);
        return ct;
 }
 
@@ -1286,7 +1304,7 @@ unsigned int
 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
                struct sk_buff *skb)
 {
-       struct nf_conn *ct, *tmpl = NULL;
+       struct nf_conn *ct, *tmpl;
        enum ip_conntrack_info ctinfo;
        struct nf_conntrack_l3proto *l3proto;
        struct nf_conntrack_l4proto *l4proto;
@@ -1296,14 +1314,14 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
        int set_reply = 0;
        int ret;
 
-       if (skb->nfct) {
+       tmpl = nf_ct_get(skb, &ctinfo);
+       if (tmpl) {
                /* Previously seen (loopback or untracked)?  Ignore. */
-               tmpl = (struct nf_conn *)skb->nfct;
                if (!nf_ct_is_template(tmpl)) {
                        NF_CT_STAT_INC_ATOMIC(net, ignore);
                        return NF_ACCEPT;
                }
-               skb->nfct = NULL;
+               skb->_nfct = 0;
        }
 
        /* rcu_read_lock()ed by nf_hook_thresh */
@@ -1324,8 +1342,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
         * inverse of the return code tells to the netfilter
         * core what to do with the packet. */
        if (l4proto->error != NULL) {
-               ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
-                                    pf, hooknum);
+               ret = l4proto->error(net, tmpl, skb, dataoff, pf, hooknum);
                if (ret <= 0) {
                        NF_CT_STAT_INC_ATOMIC(net, error);
                        NF_CT_STAT_INC_ATOMIC(net, invalid);
@@ -1333,7 +1350,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
                        goto out;
                }
                /* ICMP[v6] protocol trackers may assign one conntrack. */
-               if (skb->nfct)
+               if (skb->_nfct)
                        goto out;
        }
 repeat:
@@ -1353,7 +1370,7 @@ repeat:
                goto out;
        }
 
-       NF_CT_ASSERT(skb->nfct);
+       NF_CT_ASSERT(skb_nfct(skb));
 
        /* Decide what timeout policy we want to apply to this flow. */
        timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
@@ -1363,8 +1380,8 @@ repeat:
                /* Invalid: inverse of the return code tells
                 * the netfilter core what to do */
                pr_debug("nf_conntrack_in: Can't track with proto module\n");
-               nf_conntrack_put(skb->nfct);
-               skb->nfct = NULL;
+               nf_conntrack_put(&ct->ct_general);
+               skb->_nfct = 0;
                NF_CT_STAT_INC_ATOMIC(net, invalid);
                if (ret == -NF_DROP)
                        NF_CT_STAT_INC_ATOMIC(net, drop);
@@ -1522,9 +1539,8 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
                ctinfo = IP_CT_RELATED;
 
        /* Attach to new skbuff, and increment count */
-       nskb->nfct = &ct->ct_general;
-       nskb->nfctinfo = ctinfo;
-       nf_conntrack_get(nskb->nfct);
+       nf_ct_set(nskb, ct, ctinfo);
+       nf_conntrack_get(skb_nfct(nskb));
 }
 
 /* Bring out ya dead! */
@@ -1860,7 +1876,8 @@ int nf_conntrack_init_start(void)
        nf_conntrack_max = max_factor * nf_conntrack_htable_size;
 
        nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
-                                               sizeof(struct nf_conn), 0,
+                                               sizeof(struct nf_conn),
+                                               NFCT_INFOMASK + 1,
                                                SLAB_DESTROY_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
        if (!nf_conntrack_cachep)
                goto err_cachep;
index f8dbacf66795d929a220d1acf35ea40298ec3be4..e19a69787d994a506ed7e237598aa2cd6c4014ef 100644 (file)
@@ -353,7 +353,7 @@ void nf_ct_expect_put(struct nf_conntrack_expect *exp)
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
 
-static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
+static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
 {
        struct nf_conn_help *master_help = nfct_help(exp->master);
        struct nf_conntrack_helper *helper;
@@ -380,7 +380,6 @@ static int nf_ct_expect_insert(struct nf_conntrack_expect *exp)
        add_timer(&exp->timeout);
 
        NF_CT_STAT_INC(net, expect_create);
-       return 0;
 }
 
 /* Race with expectations being used means we could have none to find; OK. */
@@ -464,9 +463,8 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
        if (ret <= 0)
                goto out;
 
-       ret = nf_ct_expect_insert(expect);
-       if (ret < 0)
-               goto out;
+       nf_ct_expect_insert(expect);
+
        spin_unlock_bh(&nf_conntrack_expect_lock);
        nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
        return ret;
index b68ce6ac13b35af50f8ad0d9545ec03150c24974..93dd1c5b7bff9e5285530a446bba6811ec26ead4 100644 (file)
@@ -561,7 +561,6 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
 
 static int dccp_error(struct net *net, struct nf_conn *tmpl,
                      struct sk_buff *skb, unsigned int dataoff,
-                     enum ip_conntrack_info *ctinfo,
                      u_int8_t pf, unsigned int hooknum)
 {
        struct dccp_hdr _dh, *dh;
index a0efde38da44742ccab5578f5d726afbf5758f41..33279aab583d5eac3016b4a58f6bf2ea8b457395 100644 (file)
@@ -22,7 +22,9 @@
 #include <linux/seq_file.h>
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
+#include <net/sctp/checksum.h>
 
+#include <net/netfilter/nf_log.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_l4proto.h>
 #include <net/netfilter/nf_conntrack_ecache.h>
@@ -505,6 +507,34 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
        return true;
 }
 
+static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb,
+                     unsigned int dataoff,
+                     u8 pf, unsigned int hooknum)
+{
+       const struct sctphdr *sh;
+       struct sctphdr _sctph;
+       const char *logmsg;
+
+       sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
+       if (!sh) {
+               logmsg = "nf_ct_sctp: short packet ";
+               goto out_invalid;
+       }
+       if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
+           skb->ip_summed == CHECKSUM_NONE) {
+               if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
+                       logmsg = "nf_ct_sctp: bad CRC ";
+                       goto out_invalid;
+               }
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       }
+       return NF_ACCEPT;
+out_invalid:
+       if (LOG_INVALID(net, IPPROTO_SCTP))
+               nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", logmsg);
+       return -NF_ACCEPT;
+}
+
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
@@ -752,6 +782,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
        .packet                 = sctp_packet,
        .get_timeouts           = sctp_get_timeouts,
        .new                    = sctp_new,
+       .error                  = sctp_error,
        .me                     = THIS_MODULE,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .to_nlattr              = sctp_to_nlattr,
@@ -786,6 +817,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
        .packet                 = sctp_packet,
        .get_timeouts           = sctp_get_timeouts,
        .new                    = sctp_new,
+       .error                  = sctp_error,
        .me                     = THIS_MODULE,
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .to_nlattr              = sctp_to_nlattr,
index 69f687740c76b755fa46f1e4d63c35314c130ddb..b122e9dacfed06e27aecab3fbc71203772d7b427 100644 (file)
@@ -750,7 +750,6 @@ static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
 static int tcp_error(struct net *net, struct nf_conn *tmpl,
                     struct sk_buff *skb,
                     unsigned int dataoff,
-                    enum ip_conntrack_info *ctinfo,
                     u_int8_t pf,
                     unsigned int hooknum)
 {
index 20f35ed680301335aa92d56eba9a00aeec9f589e..f6ebce6178ca65d040382af45bc962862b883d4a 100644 (file)
@@ -108,8 +108,60 @@ static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
        return true;
 }
 
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+static int udplite_error(struct net *net, struct nf_conn *tmpl,
+                        struct sk_buff *skb,
+                        unsigned int dataoff,
+                        u8 pf, unsigned int hooknum)
+{
+       unsigned int udplen = skb->len - dataoff;
+       const struct udphdr *hdr;
+       struct udphdr _hdr;
+       unsigned int cscov;
+
+       /* Header is too small? */
+       hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
+       if (!hdr) {
+               if (LOG_INVALID(net, IPPROTO_UDPLITE))
+                       nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
+                                     "nf_ct_udplite: short packet ");
+               return -NF_ACCEPT;
+       }
+
+       cscov = ntohs(hdr->len);
+       if (cscov == 0) {
+               cscov = udplen;
+       } else if (cscov < sizeof(*hdr) || cscov > udplen) {
+               if (LOG_INVALID(net, IPPROTO_UDPLITE))
+                       nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
+                                     "nf_ct_udplite: invalid checksum coverage ");
+               return -NF_ACCEPT;
+       }
+
+       /* UDPLITE mandates checksums */
+       if (!hdr->check) {
+               if (LOG_INVALID(net, IPPROTO_UDPLITE))
+                       nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
+                                     "nf_ct_udplite: checksum missing ");
+               return -NF_ACCEPT;
+       }
+
+       /* Checksum invalid? Ignore. */
+       if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
+           nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP,
+                               pf)) {
+               if (LOG_INVALID(net, IPPROTO_UDPLITE))
+                       nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
+                                     "nf_ct_udplite: bad UDPLite checksum ");
+               return -NF_ACCEPT;
+       }
+
+       return NF_ACCEPT;
+}
+#endif
+
 static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
-                    unsigned int dataoff, enum ip_conntrack_info *ctinfo,
+                    unsigned int dataoff,
                     u_int8_t pf,
                     unsigned int hooknum)
 {
@@ -290,6 +342,41 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
 };
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4);
 
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
+{
+       .l3proto                = PF_INET,
+       .l4proto                = IPPROTO_UDPLITE,
+       .name                   = "udplite",
+       .allow_clash            = true,
+       .pkt_to_tuple           = udp_pkt_to_tuple,
+       .invert_tuple           = udp_invert_tuple,
+       .print_tuple            = udp_print_tuple,
+       .packet                 = udp_packet,
+       .get_timeouts           = udp_get_timeouts,
+       .new                    = udp_new,
+       .error                  = udplite_error,
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+       .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
+       .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
+       .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
+       .nla_policy             = nf_ct_port_nla_policy,
+#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+       .ctnl_timeout           = {
+               .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
+               .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
+               .nlattr_max     = CTA_TIMEOUT_UDP_MAX,
+               .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
+               .nla_policy     = udp_timeout_nla_policy,
+       },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+       .init_net               = udp_init_net,
+       .get_net_proto          = udp_get_net_proto,
+};
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite4);
+#endif
+
 struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
 {
        .l3proto                = PF_INET6,
@@ -322,3 +409,38 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
        .get_net_proto          = udp_get_net_proto,
 };
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6);
+
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE
+struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
+{
+       .l3proto                = PF_INET6,
+       .l4proto                = IPPROTO_UDPLITE,
+       .name                   = "udplite",
+       .allow_clash            = true,
+       .pkt_to_tuple           = udp_pkt_to_tuple,
+       .invert_tuple           = udp_invert_tuple,
+       .print_tuple            = udp_print_tuple,
+       .packet                 = udp_packet,
+       .get_timeouts           = udp_get_timeouts,
+       .new                    = udp_new,
+       .error                  = udplite_error,
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+       .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
+       .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
+       .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
+       .nla_policy             = nf_ct_port_nla_policy,
+#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+       .ctnl_timeout           = {
+               .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
+               .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
+               .nlattr_max     = CTA_TIMEOUT_UDP_MAX,
+               .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
+               .nla_policy     = udp_timeout_nla_policy,
+       },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+       .init_net               = udp_init_net,
+       .get_net_proto          = udp_get_net_proto,
+};
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6);
+#endif
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
deleted file mode 100644 (file)
index c35f7bf..0000000
+++ /dev/null
@@ -1,324 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- * (C) 2007 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/timer.h>
-#include <linux/udp.h>
-#include <linux/seq_file.h>
-#include <linux/skbuff.h>
-#include <linux/ipv6.h>
-#include <net/ip6_checksum.h>
-#include <net/checksum.h>
-
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv6.h>
-#include <net/netfilter/nf_conntrack_l4proto.h>
-#include <net/netfilter/nf_conntrack_ecache.h>
-#include <net/netfilter/nf_log.h>
-
-static unsigned int udplite_timeouts[UDPLITE_CT_MAX] = {
-       [UDPLITE_CT_UNREPLIED]  = 30*HZ,
-       [UDPLITE_CT_REPLIED]    = 180*HZ,
-};
-
-static inline struct nf_udplite_net *udplite_pernet(struct net *net)
-{
-       return &net->ct.nf_ct_proto.udplite;
-}
-
-static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
-                                unsigned int dataoff,
-                                struct net *net,
-                                struct nf_conntrack_tuple *tuple)
-{
-       const struct udphdr *hp;
-       struct udphdr _hdr;
-
-       /* Actually only need first 4 bytes to get ports. */
-       hp = skb_header_pointer(skb, dataoff, 4, &_hdr);
-       if (hp == NULL)
-               return false;
-
-       tuple->src.u.udp.port = hp->source;
-       tuple->dst.u.udp.port = hp->dest;
-       return true;
-}
-
-static bool udplite_invert_tuple(struct nf_conntrack_tuple *tuple,
-                                const struct nf_conntrack_tuple *orig)
-{
-       tuple->src.u.udp.port = orig->dst.u.udp.port;
-       tuple->dst.u.udp.port = orig->src.u.udp.port;
-       return true;
-}
-
-/* Print out the per-protocol part of the tuple. */
-static void udplite_print_tuple(struct seq_file *s,
-                               const struct nf_conntrack_tuple *tuple)
-{
-       seq_printf(s, "sport=%hu dport=%hu ",
-                  ntohs(tuple->src.u.udp.port),
-                  ntohs(tuple->dst.u.udp.port));
-}
-
-static unsigned int *udplite_get_timeouts(struct net *net)
-{
-       return udplite_pernet(net)->timeouts;
-}
-
-/* Returns verdict for packet, and may modify conntracktype */
-static int udplite_packet(struct nf_conn *ct,
-                         const struct sk_buff *skb,
-                         unsigned int dataoff,
-                         enum ip_conntrack_info ctinfo,
-                         u_int8_t pf,
-                         unsigned int hooknum,
-                         unsigned int *timeouts)
-{
-       /* If we've seen traffic both ways, this is some kind of UDP
-          stream.  Extend timeout. */
-       if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
-               nf_ct_refresh_acct(ct, ctinfo, skb,
-                                  timeouts[UDPLITE_CT_REPLIED]);
-               /* Also, more likely to be important, and not a probe */
-               if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
-                       nf_conntrack_event_cache(IPCT_ASSURED, ct);
-       } else {
-               nf_ct_refresh_acct(ct, ctinfo, skb,
-                                  timeouts[UDPLITE_CT_UNREPLIED]);
-       }
-       return NF_ACCEPT;
-}
-
-/* Called when a new connection for this protocol found. */
-static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
-                       unsigned int dataoff, unsigned int *timeouts)
-{
-       return true;
-}
-
-static int udplite_error(struct net *net, struct nf_conn *tmpl,
-                        struct sk_buff *skb,
-                        unsigned int dataoff,
-                        enum ip_conntrack_info *ctinfo,
-                        u_int8_t pf,
-                        unsigned int hooknum)
-{
-       unsigned int udplen = skb->len - dataoff;
-       const struct udphdr *hdr;
-       struct udphdr _hdr;
-       unsigned int cscov;
-
-       /* Header is too small? */
-       hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
-       if (hdr == NULL) {
-               if (LOG_INVALID(net, IPPROTO_UDPLITE))
-                       nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
-                                     "nf_ct_udplite: short packet ");
-               return -NF_ACCEPT;
-       }
-
-       cscov = ntohs(hdr->len);
-       if (cscov == 0)
-               cscov = udplen;
-       else if (cscov < sizeof(*hdr) || cscov > udplen) {
-               if (LOG_INVALID(net, IPPROTO_UDPLITE))
-                       nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
-                               "nf_ct_udplite: invalid checksum coverage ");
-               return -NF_ACCEPT;
-       }
-
-       /* UDPLITE mandates checksums */
-       if (!hdr->check) {
-               if (LOG_INVALID(net, IPPROTO_UDPLITE))
-                       nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
-                                     "nf_ct_udplite: checksum missing ");
-               return -NF_ACCEPT;
-       }
-
-       /* Checksum invalid? Ignore. */
-       if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
-           nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP,
-                               pf)) {
-               if (LOG_INVALID(net, IPPROTO_UDPLITE))
-                       nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
-                                     "nf_ct_udplite: bad UDPLite checksum ");
-               return -NF_ACCEPT;
-       }
-
-       return NF_ACCEPT;
-}
-
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
-
-#include <linux/netfilter/nfnetlink.h>
-#include <linux/netfilter/nfnetlink_cttimeout.h>
-
-static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[],
-                                        struct net *net, void *data)
-{
-       unsigned int *timeouts = data;
-       struct nf_udplite_net *un = udplite_pernet(net);
-
-       /* set default timeouts for UDPlite. */
-       timeouts[UDPLITE_CT_UNREPLIED] = un->timeouts[UDPLITE_CT_UNREPLIED];
-       timeouts[UDPLITE_CT_REPLIED] = un->timeouts[UDPLITE_CT_REPLIED];
-
-       if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) {
-               timeouts[UDPLITE_CT_UNREPLIED] =
-                 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_UNREPLIED])) * HZ;
-       }
-       if (tb[CTA_TIMEOUT_UDPLITE_REPLIED]) {
-               timeouts[UDPLITE_CT_REPLIED] =
-                 ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_REPLIED])) * HZ;
-       }
-       return 0;
-}
-
-static int
-udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
-{
-       const unsigned int *timeouts = data;
-
-       if (nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
-                        htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)) ||
-           nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
-                        htonl(timeouts[UDPLITE_CT_REPLIED] / HZ)))
-               goto nla_put_failure;
-       return 0;
-
-nla_put_failure:
-       return -ENOSPC;
-}
-
-static const struct nla_policy
-udplite_timeout_nla_policy[CTA_TIMEOUT_UDPLITE_MAX+1] = {
-       [CTA_TIMEOUT_UDPLITE_UNREPLIED] = { .type = NLA_U32 },
-       [CTA_TIMEOUT_UDPLITE_REPLIED]   = { .type = NLA_U32 },
-};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-
-#ifdef CONFIG_SYSCTL
-static struct ctl_table udplite_sysctl_table[] = {
-       {
-               .procname       = "nf_conntrack_udplite_timeout",
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "nf_conntrack_udplite_timeout_stream",
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       { }
-};
-#endif /* CONFIG_SYSCTL */
-
-static int udplite_kmemdup_sysctl_table(struct nf_proto_net *pn,
-                                       struct nf_udplite_net *un)
-{
-#ifdef CONFIG_SYSCTL
-       if (pn->ctl_table)
-               return 0;
-
-       pn->ctl_table = kmemdup(udplite_sysctl_table,
-                               sizeof(udplite_sysctl_table),
-                               GFP_KERNEL);
-       if (!pn->ctl_table)
-               return -ENOMEM;
-
-       pn->ctl_table[0].data = &un->timeouts[UDPLITE_CT_UNREPLIED];
-       pn->ctl_table[1].data = &un->timeouts[UDPLITE_CT_REPLIED];
-#endif
-       return 0;
-}
-
-static int udplite_init_net(struct net *net, u_int16_t proto)
-{
-       struct nf_udplite_net *un = udplite_pernet(net);
-       struct nf_proto_net *pn = &un->pn;
-
-       if (!pn->users) {
-               int i;
-
-               for (i = 0 ; i < UDPLITE_CT_MAX; i++)
-                       un->timeouts[i] = udplite_timeouts[i];
-       }
-
-       return udplite_kmemdup_sysctl_table(pn, un);
-}
-
-struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
-{
-       .l3proto                = PF_INET,
-       .l4proto                = IPPROTO_UDPLITE,
-       .name                   = "udplite",
-       .allow_clash            = true,
-       .pkt_to_tuple           = udplite_pkt_to_tuple,
-       .invert_tuple           = udplite_invert_tuple,
-       .print_tuple            = udplite_print_tuple,
-       .packet                 = udplite_packet,
-       .get_timeouts           = udplite_get_timeouts,
-       .new                    = udplite_new,
-       .error                  = udplite_error,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
-       .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
-       .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
-       .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
-       .nla_policy             = nf_ct_port_nla_policy,
-#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
-       .ctnl_timeout           = {
-               .nlattr_to_obj  = udplite_timeout_nlattr_to_obj,
-               .obj_to_nlattr  = udplite_timeout_obj_to_nlattr,
-               .nlattr_max     = CTA_TIMEOUT_UDPLITE_MAX,
-               .obj_size       = sizeof(unsigned int) *
-                                       CTA_TIMEOUT_UDPLITE_MAX,
-               .nla_policy     = udplite_timeout_nla_policy,
-       },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-       .init_net               = udplite_init_net,
-};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite4);
-
-struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
-{
-       .l3proto                = PF_INET6,
-       .l4proto                = IPPROTO_UDPLITE,
-       .name                   = "udplite",
-       .allow_clash            = true,
-       .pkt_to_tuple           = udplite_pkt_to_tuple,
-       .invert_tuple           = udplite_invert_tuple,
-       .print_tuple            = udplite_print_tuple,
-       .packet                 = udplite_packet,
-       .get_timeouts           = udplite_get_timeouts,
-       .new                    = udplite_new,
-       .error                  = udplite_error,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
-       .tuple_to_nlattr        = nf_ct_port_tuple_to_nlattr,
-       .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
-       .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
-       .nla_policy             = nf_ct_port_nla_policy,
-#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
-       .ctnl_timeout           = {
-               .nlattr_to_obj  = udplite_timeout_nlattr_to_obj,
-               .obj_to_nlattr  = udplite_timeout_obj_to_nlattr,
-               .nlattr_max     = CTA_TIMEOUT_UDPLITE_MAX,
-               .obj_size       = sizeof(unsigned int) *
-                                       CTA_TIMEOUT_UDPLITE_MAX,
-               .nla_policy     = udplite_timeout_nla_policy,
-       },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-       .init_net               = udplite_init_net,
-};
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6);
index c3fc14e021ecf55ba9085cd7ee7a86bfc5352750..24174c5202398fa28287db69b570a12db15c1771 100644 (file)
@@ -809,13 +809,11 @@ static int refresh_signalling_expectation(struct nf_conn *ct,
                    exp->tuple.dst.protonum != proto ||
                    exp->tuple.dst.u.udp.port != port)
                        continue;
-               if (!del_timer(&exp->timeout))
-                       continue;
-               exp->flags &= ~NF_CT_EXPECT_INACTIVE;
-               exp->timeout.expires = jiffies + expires * HZ;
-               add_timer(&exp->timeout);
-               found = 1;
-               break;
+               if (mod_timer_pending(&exp->timeout, jiffies + expires * HZ)) {
+                       exp->flags &= ~NF_CT_EXPECT_INACTIVE;
+                       found = 1;
+                       break;
+               }
        }
        spin_unlock_bh(&nf_conntrack_expect_lock);
        return found;
index d009ae66345329c87eb6217ed8ca95708b0d0c64..2256147dcaad80ea982868607b2e66b233c27c6f 100644 (file)
@@ -642,6 +642,9 @@ static int __init nf_conntrack_standalone_init(void)
        if (ret < 0)
                goto out_start;
 
+       BUILD_BUG_ON(SKB_NFCT_PTRMASK != NFCT_PTRMASK);
+       BUILD_BUG_ON(NFCT_INFOMASK <= IP_CT_NUMBER);
+
 #ifdef CONFIG_SYSCTL
        nf_ct_netfilter_header =
                register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);
index ffb9e8ada899b770293744ed0da5bebba4b2166e..8d85a0598b603c5f7d1f2f6f0c0ba68bb96b4d58 100644 (file)
@@ -15,6 +15,9 @@
 
 #define NFLOGGER_NAME_LEN              64
 
+int sysctl_nf_log_all_netns __read_mostly;
+EXPORT_SYMBOL(sysctl_nf_log_all_netns);
+
 static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
 static DEFINE_MUTEX(nf_log_mutex);
 
@@ -413,6 +416,18 @@ static const struct file_operations nflog_file_ops = {
 #ifdef CONFIG_SYSCTL
 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
 static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
+static struct ctl_table_header *nf_log_sysctl_fhdr;
+
+static struct ctl_table nf_log_sysctl_ftable[] = {
+       {
+               .procname       = "nf_log_all_netns",
+               .data           = &sysctl_nf_log_all_netns,
+               .maxlen         = sizeof(sysctl_nf_log_all_netns),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
+       { }
+};
 
 static int nf_log_proc_dostring(struct ctl_table *table, int write,
                         void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -482,6 +497,10 @@ static int netfilter_log_sysctl_init(struct net *net)
                        nf_log_sysctl_table[i].extra1 =
                                (void *)(unsigned long) i;
                }
+               nf_log_sysctl_fhdr = register_net_sysctl(net, "net/netfilter",
+                                                        nf_log_sysctl_ftable);
+               if (!nf_log_sysctl_fhdr)
+                       goto err_freg;
        }
 
        for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
@@ -498,6 +517,9 @@ static int netfilter_log_sysctl_init(struct net *net)
 err_reg:
        if (!net_eq(net, &init_net))
                kfree(table);
+       else
+               unregister_net_sysctl_table(nf_log_sysctl_fhdr);
+err_freg:
 err_alloc:
        return -ENOMEM;
 }
@@ -510,6 +532,8 @@ static void netfilter_log_sysctl_exit(struct net *net)
        unregister_net_sysctl_table(net->nf.nf_log_dir_header);
        if (!net_eq(net, &init_net))
                kfree(table);
+       else
+               unregister_net_sysctl_table(nf_log_sysctl_fhdr);
 }
 #else
 static int netfilter_log_sysctl_init(struct net *net)
index 2840abb5bb99a7590b228f13e5769a62902f3cf3..211661cb2c90a5d25a57d5ce41c9c5f9898c5302 100644 (file)
@@ -60,7 +60,7 @@ static void mangle_contents(struct sk_buff *skb,
                __skb_trim(skb, skb->len + rep_len - match_len);
        }
 
-       if (nf_ct_l3num((struct nf_conn *)skb->nfct) == NFPROTO_IPV4) {
+       if (nf_ct_l3num((struct nf_conn *)skb_nfct(skb)) == NFPROTO_IPV4) {
                /* fix IP hdr checksum information */
                ip_hdr(skb)->tot_len = htons(skb->len);
                ip_send_check(ip_hdr(skb));
index b1e627227b6e2670fb6ce9d151e8965a4c8731c3..edd4a77dc09a837e71e4322d328033ee0af90ee9 100644 (file)
@@ -30,20 +30,15 @@ udp_unique_tuple(const struct nf_nat_l3proto *l3proto,
                                    &udp_port_rover);
 }
 
-static bool
-udp_manip_pkt(struct sk_buff *skb,
-             const struct nf_nat_l3proto *l3proto,
-             unsigned int iphdroff, unsigned int hdroff,
-             const struct nf_conntrack_tuple *tuple,
-             enum nf_nat_manip_type maniptype)
+static void
+__udp_manip_pkt(struct sk_buff *skb,
+               const struct nf_nat_l3proto *l3proto,
+               unsigned int iphdroff, struct udphdr *hdr,
+               const struct nf_conntrack_tuple *tuple,
+               enum nf_nat_manip_type maniptype, bool do_csum)
 {
-       struct udphdr *hdr;
        __be16 *portptr, newport;
 
-       if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
-               return false;
-       hdr = (struct udphdr *)(skb->data + hdroff);
-
        if (maniptype == NF_NAT_MANIP_SRC) {
                /* Get rid of src port */
                newport = tuple->src.u.udp.port;
@@ -53,7 +48,7 @@ udp_manip_pkt(struct sk_buff *skb,
                newport = tuple->dst.u.udp.port;
                portptr = &hdr->dest;
        }
-       if (hdr->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+       if (do_csum) {
                l3proto->csum_update(skb, iphdroff, &hdr->check,
                                     tuple, maniptype);
                inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
@@ -62,9 +57,68 @@ udp_manip_pkt(struct sk_buff *skb,
                        hdr->check = CSUM_MANGLED_0;
        }
        *portptr = newport;
+}
+
+static bool udp_manip_pkt(struct sk_buff *skb,
+                         const struct nf_nat_l3proto *l3proto,
+                         unsigned int iphdroff, unsigned int hdroff,
+                         const struct nf_conntrack_tuple *tuple,
+                         enum nf_nat_manip_type maniptype)
+{
+       struct udphdr *hdr;
+       bool do_csum;
+
+       if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+               return false;
+
+       hdr = (struct udphdr *)(skb->data + hdroff);
+       do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL;
+
+       __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, do_csum);
+       return true;
+}
+
+#ifdef CONFIG_NF_NAT_PROTO_UDPLITE
+static u16 udplite_port_rover;
+
+static bool udplite_manip_pkt(struct sk_buff *skb,
+                             const struct nf_nat_l3proto *l3proto,
+                             unsigned int iphdroff, unsigned int hdroff,
+                             const struct nf_conntrack_tuple *tuple,
+                             enum nf_nat_manip_type maniptype)
+{
+       struct udphdr *hdr;
+
+       if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
+               return false;
+
+       hdr = (struct udphdr *)(skb->data + hdroff);
+       __udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, true);
        return true;
 }
 
+static void
+udplite_unique_tuple(const struct nf_nat_l3proto *l3proto,
+                    struct nf_conntrack_tuple *tuple,
+                    const struct nf_nat_range *range,
+                    enum nf_nat_manip_type maniptype,
+                    const struct nf_conn *ct)
+{
+       nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
+                                   &udplite_port_rover);
+}
+
+const struct nf_nat_l4proto nf_nat_l4proto_udplite = {
+       .l4proto                = IPPROTO_UDPLITE,
+       .manip_pkt              = udplite_manip_pkt,
+       .in_range               = nf_nat_l4proto_in_range,
+       .unique_tuple           = udplite_unique_tuple,
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
+       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
+#endif
+};
+#endif /* CONFIG_NF_NAT_PROTO_UDPLITE */
+
 const struct nf_nat_l4proto nf_nat_l4proto_udp = {
        .l4proto                = IPPROTO_UDP,
        .manip_pkt              = udp_manip_pkt,
diff --git a/net/netfilter/nf_nat_proto_udplite.c b/net/netfilter/nf_nat_proto_udplite.c
deleted file mode 100644 (file)
index 366bfbf..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
- * (C) 2008 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/types.h>
-#include <linux/udp.h>
-
-#include <linux/netfilter.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_l3proto.h>
-#include <net/netfilter/nf_nat_l4proto.h>
-
-static u16 udplite_port_rover;
-
-static void
-udplite_unique_tuple(const struct nf_nat_l3proto *l3proto,
-                    struct nf_conntrack_tuple *tuple,
-                    const struct nf_nat_range *range,
-                    enum nf_nat_manip_type maniptype,
-                    const struct nf_conn *ct)
-{
-       nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
-                                   &udplite_port_rover);
-}
-
-static bool
-udplite_manip_pkt(struct sk_buff *skb,
-                 const struct nf_nat_l3proto *l3proto,
-                 unsigned int iphdroff, unsigned int hdroff,
-                 const struct nf_conntrack_tuple *tuple,
-                 enum nf_nat_manip_type maniptype)
-{
-       struct udphdr *hdr;
-       __be16 *portptr, newport;
-
-       if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
-               return false;
-
-       hdr = (struct udphdr *)(skb->data + hdroff);
-
-       if (maniptype == NF_NAT_MANIP_SRC) {
-               /* Get rid of source port */
-               newport = tuple->src.u.udp.port;
-               portptr = &hdr->source;
-       } else {
-               /* Get rid of dst port */
-               newport = tuple->dst.u.udp.port;
-               portptr = &hdr->dest;
-       }
-
-       l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
-       inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, false);
-       if (!hdr->check)
-               hdr->check = CSUM_MANGLED_0;
-
-       *portptr = newport;
-       return true;
-}
-
-const struct nf_nat_l4proto nf_nat_l4proto_udplite = {
-       .l4proto                = IPPROTO_UDPLITE,
-       .manip_pkt              = udplite_manip_pkt,
-       .in_range               = nf_nat_l4proto_in_range,
-       .unique_tuple           = udplite_unique_tuple,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
-       .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
-#endif
-};
index 1b913760f205be79e1809c983cb3140c284a00cc..ff7304ae58ac4f99cf4841badcbe0afd8a1f6968 100644 (file)
@@ -240,6 +240,10 @@ static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type,
        if (trans == NULL)
                return NULL;
 
+       if (msg_type == NFT_MSG_NEWRULE && ctx->nla[NFTA_RULE_ID] != NULL) {
+               nft_trans_rule_id(trans) =
+                       ntohl(nla_get_be32(ctx->nla[NFTA_RULE_ID]));
+       }
        nft_trans_rule(trans) = rule;
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
 
@@ -576,6 +580,28 @@ err:
        return err;
 }
 
+static void _nf_tables_table_disable(struct net *net,
+                                    const struct nft_af_info *afi,
+                                    struct nft_table *table,
+                                    u32 cnt)
+{
+       struct nft_chain *chain;
+       u32 i = 0;
+
+       list_for_each_entry(chain, &table->chains, list) {
+               if (!nft_is_active_next(net, chain))
+                       continue;
+               if (!(chain->flags & NFT_BASE_CHAIN))
+                       continue;
+
+               if (cnt && i++ == cnt)
+                       break;
+
+               nf_unregister_net_hooks(net, nft_base_chain(chain)->ops,
+                                       afi->nops);
+       }
+}
+
 static int nf_tables_table_enable(struct net *net,
                                  const struct nft_af_info *afi,
                                  struct nft_table *table)
@@ -598,18 +624,8 @@ static int nf_tables_table_enable(struct net *net,
        }
        return 0;
 err:
-       list_for_each_entry(chain, &table->chains, list) {
-               if (!nft_is_active_next(net, chain))
-                       continue;
-               if (!(chain->flags & NFT_BASE_CHAIN))
-                       continue;
-
-               if (i-- <= 0)
-                       break;
-
-               nf_unregister_net_hooks(net, nft_base_chain(chain)->ops,
-                                       afi->nops);
-       }
+       if (i)
+               _nf_tables_table_disable(net, afi, table, i);
        return err;
 }
 
@@ -617,17 +633,7 @@ static void nf_tables_table_disable(struct net *net,
                                    const struct nft_af_info *afi,
                                    struct nft_table *table)
 {
-       struct nft_chain *chain;
-
-       list_for_each_entry(chain, &table->chains, list) {
-               if (!nft_is_active_next(net, chain))
-                       continue;
-               if (!(chain->flags & NFT_BASE_CHAIN))
-                       continue;
-
-               nf_unregister_net_hooks(net, nft_base_chain(chain)->ops,
-                                       afi->nops);
-       }
+       _nf_tables_table_disable(net, afi, table, 0);
 }
 
 static int nf_tables_updtable(struct nft_ctx *ctx)
@@ -696,10 +702,7 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
        if (IS_ERR(table)) {
                if (PTR_ERR(table) != -ENOENT)
                        return PTR_ERR(table);
-               table = NULL;
-       }
-
-       if (table != NULL) {
+       } else {
                if (nlh->nlmsg_flags & NLM_F_EXCL)
                        return -EEXIST;
                if (nlh->nlmsg_flags & NLM_F_REPLACE)
@@ -2294,6 +2297,22 @@ err1:
        return err;
 }
 
+static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
+                                            const struct nlattr *nla)
+{
+       u32 id = ntohl(nla_get_be32(nla));
+       struct nft_trans *trans;
+
+       list_for_each_entry(trans, &net->nft.commit_list, list) {
+               struct nft_rule *rule = nft_trans_rule(trans);
+
+               if (trans->msg_type == NFT_MSG_NEWRULE &&
+                   id == nft_trans_rule_id(trans))
+                       return rule;
+       }
+       return ERR_PTR(-ENOENT);
+}
+
 static int nf_tables_delrule(struct net *net, struct sock *nlsk,
                             struct sk_buff *skb, const struct nlmsghdr *nlh,
                             const struct nlattr * const nla[])
@@ -2331,6 +2350,12 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk,
                        if (IS_ERR(rule))
                                return PTR_ERR(rule);
 
+                       err = nft_delrule(&ctx, rule);
+               } else if (nla[NFTA_RULE_ID]) {
+                       rule = nft_rule_lookup_byid(net, nla[NFTA_RULE_ID]);
+                       if (IS_ERR(rule))
+                               return PTR_ERR(rule);
+
                        err = nft_delrule(&ctx, rule);
                } else {
                        err = nft_delrule_by_chain(&ctx);
@@ -2399,12 +2424,14 @@ nft_select_set_ops(const struct nlattr * const nla[],
        features = 0;
        if (nla[NFTA_SET_FLAGS] != NULL) {
                features = ntohl(nla_get_be32(nla[NFTA_SET_FLAGS]));
-               features &= NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_TIMEOUT;
+               features &= NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_TIMEOUT |
+                           NFT_SET_OBJECT;
        }
 
-       bops       = NULL;
-       best.size  = ~0;
-       best.class = ~0;
+       bops        = NULL;
+       best.size   = ~0;
+       best.lookup = ~0;
+       best.space  = ~0;
 
        list_for_each_entry(ops, &nf_tables_set_ops, list) {
                if ((ops->features & features) != features)
@@ -2414,16 +2441,27 @@ nft_select_set_ops(const struct nlattr * const nla[],
 
                switch (policy) {
                case NFT_SET_POL_PERFORMANCE:
-                       if (est.class < best.class)
-                               break;
-                       if (est.class == best.class && est.size < best.size)
+                       if (est.lookup < best.lookup)
                                break;
+                       if (est.lookup == best.lookup) {
+                               if (!desc->size) {
+                                       if (est.space < best.space)
+                                               break;
+                               } else if (est.size < best.size) {
+                                       break;
+                               }
+                       }
                        continue;
                case NFT_SET_POL_MEMORY:
-                       if (est.size < best.size)
-                               break;
-                       if (est.size == best.size && est.class < best.class)
+                       if (!desc->size) {
+                               if (est.space < best.space)
+                                       break;
+                               if (est.space == best.space &&
+                                   est.lookup < best.lookup)
+                                       break;
+                       } else if (est.size < best.size) {
                                break;
+                       }
                        continue;
                default:
                        break;
@@ -2966,10 +3004,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
        if (IS_ERR(set)) {
                if (PTR_ERR(set) != -ENOENT)
                        return PTR_ERR(set);
-               set = NULL;
-       }
-
-       if (set != NULL) {
+       } else {
                if (nlh->nlmsg_flags & NLM_F_EXCL)
                        return -EEXIST;
                if (nlh->nlmsg_flags & NLM_F_REPLACE)
@@ -3125,6 +3160,7 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
                iter.count      = 0;
                iter.err        = 0;
                iter.fn         = nf_tables_bind_check_setelem;
+               iter.flush      = false;
 
                set->ops->walk(ctx, set, &iter);
                if (iter.err < 0)
@@ -3378,6 +3414,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
        args.iter.count         = 0;
        args.iter.err           = 0;
        args.iter.fn            = nf_tables_dump_setelem;
+       args.iter.flush         = false;
        set->ops->walk(&ctx, set, &args.iter);
 
        nla_nest_end(skb, nest);
@@ -3756,7 +3793,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
        return 0;
 
 err6:
-       set->ops->remove(set, &elem);
+       set->ops->remove(ctx->net, set, &elem);
 err5:
        kfree(trans);
 err4:
@@ -3902,7 +3939,7 @@ static int nft_flush_set(const struct nft_ctx *ctx,
        if (!trans)
                return -ENOMEM;
 
-       if (!set->ops->deactivate_one(ctx->net, set, elem->priv)) {
+       if (!set->ops->flush(ctx->net, set, elem->priv)) {
                err = -ENOENT;
                goto err1;
        }
@@ -3940,15 +3977,14 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk,
                return -EBUSY;
 
        if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL) {
-               struct nft_set_dump_args args = {
-                       .iter   = {
-                               .genmask        = genmask,
-                               .fn             = nft_flush_set,
-                       },
+               struct nft_set_iter iter = {
+                       .genmask        = genmask,
+                       .fn             = nft_flush_set,
+                       .flush          = true,
                };
-               set->ops->walk(&ctx, set, &args.iter);
+               set->ops->walk(&ctx, set, &iter);
 
-               return args.iter.err;
+               return iter.err;
        }
 
        nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
@@ -4163,10 +4199,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
                if (err != -ENOENT)
                        return err;
 
-               obj = NULL;
-       }
-
-       if (obj != NULL) {
+       } else {
                if (nlh->nlmsg_flags & NLM_F_EXCL)
                        return -EEXIST;
 
@@ -4811,7 +4844,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                        nf_tables_setelem_notify(&trans->ctx, te->set,
                                                 &te->elem,
                                                 NFT_MSG_DELSETELEM, 0);
-                       te->set->ops->remove(te->set, &te->elem);
+                       te->set->ops->remove(net, te->set, &te->elem);
                        atomic_dec(&te->set->nelems);
                        te->set->ndeact--;
                        break;
@@ -4932,7 +4965,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
                case NFT_MSG_NEWSETELEM:
                        te = (struct nft_trans_elem *)trans->data;
 
-                       te->set->ops->remove(te->set, &te->elem);
+                       te->set->ops->remove(net, te->set, &te->elem);
                        atomic_dec(&te->set->nelems);
                        break;
                case NFT_MSG_DELSETELEM:
@@ -4966,6 +4999,11 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
        return 0;
 }
 
+static bool nf_tables_valid_genid(struct net *net, u32 genid)
+{
+       return net->nft.base_seq == genid;
+}
+
 static const struct nfnetlink_subsystem nf_tables_subsys = {
        .name           = "nf_tables",
        .subsys_id      = NFNL_SUBSYS_NFTABLES,
@@ -4973,6 +5011,7 @@ static const struct nfnetlink_subsystem nf_tables_subsys = {
        .cb             = nf_tables_cb,
        .commit         = nf_tables_commit,
        .abort          = nf_tables_abort,
+       .valid_genid    = nf_tables_valid_genid,
 };
 
 int nft_chain_validate_dependency(const struct nft_chain *chain,
@@ -5098,6 +5137,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
                        iter.count      = 0;
                        iter.err        = 0;
                        iter.fn         = nf_tables_loop_check_setelem;
+                       iter.flush      = false;
 
                        set->ops->walk(ctx, set, &iter);
                        if (iter.err < 0)
index a09fa9fd8f3d98b2f3dd8479b861db4e71b37cf5..a2148d0bc50ec4af1ae324549f058710b5266ada 100644 (file)
@@ -3,7 +3,7 @@
  *
  * (C) 2001 by Jay Schulist <jschlst@samba.org>,
  * (C) 2002-2005 by Harald Welte <laforge@gnumonks.org>
- * (C) 2005,2007 by Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2005-2017 by Pablo Neira Ayuso <pablo@netfilter.org>
  *
  * Initial netfilter messages via netlink development funded and
  * generally made possible by Network Robots, Inc. (www.networkrobots.com)
@@ -100,9 +100,9 @@ int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n)
 }
 EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister);
 
-static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t type)
+static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u16 type)
 {
-       u_int8_t subsys_id = NFNL_SUBSYS_ID(type);
+       u8 subsys_id = NFNL_SUBSYS_ID(type);
 
        if (subsys_id >= NFNL_SUBSYS_COUNT)
                return NULL;
@@ -111,9 +111,9 @@ static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t t
 }
 
 static inline const struct nfnl_callback *
-nfnetlink_find_client(u_int16_t type, const struct nfnetlink_subsystem *ss)
+nfnetlink_find_client(u16 type, const struct nfnetlink_subsystem *ss)
 {
-       u_int8_t cb_id = NFNL_MSG_TYPE(type);
+       u8 cb_id = NFNL_MSG_TYPE(type);
 
        if (cb_id >= ss->cb_count)
                return NULL;
@@ -185,7 +185,7 @@ replay:
 
        {
                int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
-               u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
+               u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
                struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
                struct nlattr *attr = (void *)nlh + min_len;
                int attrlen = nlh->nlmsg_len - min_len;
@@ -273,7 +273,7 @@ enum {
 };
 
 static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
-                               u_int16_t subsys_id)
+                               u16 subsys_id, u32 genid)
 {
        struct sk_buff *oskb = skb;
        struct net *net = sock_net(skb->sk);
@@ -315,6 +315,12 @@ replay:
                return kfree_skb(skb);
        }
 
+       if (genid && ss->valid_genid && !ss->valid_genid(net, genid)) {
+               nfnl_unlock(subsys_id);
+               netlink_ack(oskb, nlh, -ERESTART);
+               return kfree_skb(skb);
+       }
+
        while (skb->len >= nlmsg_total_size(0)) {
                int msglen, type;
 
@@ -365,7 +371,7 @@ replay:
 
                {
                        int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
-                       u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
+                       u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
                        struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
                        struct nlattr *attr = (void *)nlh + min_len;
                        int attrlen = nlh->nlmsg_len - min_len;
@@ -436,11 +442,51 @@ done:
        kfree_skb(skb);
 }
 
+static const struct nla_policy nfnl_batch_policy[NFNL_BATCH_MAX + 1] = {
+       [NFNL_BATCH_GENID]      = { .type = NLA_U32 },
+};
+
+static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
+       struct nlattr *attr = (void *)nlh + min_len;
+       struct nlattr *cda[NFNL_BATCH_MAX + 1];
+       int attrlen = nlh->nlmsg_len - min_len;
+       struct nfgenmsg *nfgenmsg;
+       int msglen, err;
+       u32 gen_id = 0;
+       u16 res_id;
+
+       msglen = NLMSG_ALIGN(nlh->nlmsg_len);
+       if (msglen > skb->len)
+               msglen = skb->len;
+
+       if (nlh->nlmsg_len < NLMSG_HDRLEN ||
+           skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
+               return;
+
+       err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy);
+       if (err < 0) {
+               netlink_ack(skb, nlh, err);
+               return;
+       }
+       if (cda[NFNL_BATCH_GENID])
+               gen_id = ntohl(nla_get_be32(cda[NFNL_BATCH_GENID]));
+
+       nfgenmsg = nlmsg_data(nlh);
+       skb_pull(skb, msglen);
+       /* Work around old nft using host byte order */
+       if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES)
+               res_id = NFNL_SUBSYS_NFTABLES;
+       else
+               res_id = ntohs(nfgenmsg->res_id);
+
+       nfnetlink_rcv_batch(skb, nlh, res_id, gen_id);
+}
+
 static void nfnetlink_rcv(struct sk_buff *skb)
 {
        struct nlmsghdr *nlh = nlmsg_hdr(skb);
-       u_int16_t res_id;
-       int msglen;
 
        if (nlh->nlmsg_len < NLMSG_HDRLEN ||
            skb->len < nlh->nlmsg_len)
@@ -451,28 +497,10 @@ static void nfnetlink_rcv(struct sk_buff *skb)
                return;
        }
 
-       if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN) {
-               struct nfgenmsg *nfgenmsg;
-
-               msglen = NLMSG_ALIGN(nlh->nlmsg_len);
-               if (msglen > skb->len)
-                       msglen = skb->len;
-
-               if (nlh->nlmsg_len < NLMSG_HDRLEN ||
-                   skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
-                       return;
-
-               nfgenmsg = nlmsg_data(nlh);
-               skb_pull(skb, msglen);
-               /* Work around old nft using host byte order */
-               if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES)
-                       res_id = NFNL_SUBSYS_NFTABLES;
-               else
-                       res_id = ntohs(nfgenmsg->res_id);
-               nfnetlink_rcv_batch(skb, nlh, res_id);
-       } else {
+       if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN)
+               nfnetlink_rcv_skb_batch(skb, nlh);
+       else
                netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
-       }
 }
 
 #ifdef CONFIG_MODULES
index e6baeaebe6537ea6337d001faa58631ebf6e41b2..c6b8022c0e47d43e11a7f2f351f3f0650c9ba604 100644 (file)
@@ -32,6 +32,11 @@ struct nft_ct {
        };
 };
 
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+static DEFINE_PER_CPU(struct nf_conn *, nft_ct_pcpu_template);
+static unsigned int nft_ct_pcpu_template_refcnt __read_mostly;
+#endif
+
 static u64 nft_ct_get_eval_counter(const struct nf_conn_counter *c,
                                   enum nft_ct_keys k,
                                   enum ip_conntrack_dir d)
@@ -129,12 +134,40 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                memcpy(dest, &count, sizeof(count));
                return;
        }
+       case NFT_CT_AVGPKT: {
+               const struct nf_conn_acct *acct = nf_conn_acct_find(ct);
+               u64 avgcnt = 0, bcnt = 0, pcnt = 0;
+
+               if (acct) {
+                       pcnt = nft_ct_get_eval_counter(acct->counter,
+                                                      NFT_CT_PKTS, priv->dir);
+                       bcnt = nft_ct_get_eval_counter(acct->counter,
+                                                      NFT_CT_BYTES, priv->dir);
+                       if (pcnt != 0)
+                               avgcnt = div64_u64(bcnt, pcnt);
+               }
+
+               memcpy(dest, &avgcnt, sizeof(avgcnt));
+               return;
+       }
        case NFT_CT_L3PROTOCOL:
                *dest = nf_ct_l3num(ct);
                return;
        case NFT_CT_PROTOCOL:
                *dest = nf_ct_protonum(ct);
                return;
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+       case NFT_CT_ZONE: {
+               const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
+
+               if (priv->dir < IP_CT_DIR_MAX)
+                       *dest = nf_ct_zone_id(zone, priv->dir);
+               else
+                       *dest = zone->id;
+
+               return;
+       }
+#endif
        default:
                break;
        }
@@ -163,6 +196,53 @@ err:
        regs->verdict.code = NFT_BREAK;
 }
 
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+static void nft_ct_set_zone_eval(const struct nft_expr *expr,
+                                struct nft_regs *regs,
+                                const struct nft_pktinfo *pkt)
+{
+       struct nf_conntrack_zone zone = { .dir = NF_CT_DEFAULT_ZONE_DIR };
+       const struct nft_ct *priv = nft_expr_priv(expr);
+       struct sk_buff *skb = pkt->skb;
+       enum ip_conntrack_info ctinfo;
+       u16 value = regs->data[priv->sreg];
+       struct nf_conn *ct;
+
+       ct = nf_ct_get(skb, &ctinfo);
+       if (ct) /* already tracked */
+               return;
+
+       zone.id = value;
+
+       switch (priv->dir) {
+       case IP_CT_DIR_ORIGINAL:
+               zone.dir = NF_CT_ZONE_DIR_ORIG;
+               break;
+       case IP_CT_DIR_REPLY:
+               zone.dir = NF_CT_ZONE_DIR_REPL;
+               break;
+       default:
+               break;
+       }
+
+       ct = this_cpu_read(nft_ct_pcpu_template);
+
+       if (likely(atomic_read(&ct->ct_general.use) == 1)) {
+               nf_ct_zone_add(ct, &zone);
+       } else {
+               /* previous skb got queued to userspace */
+               ct = nf_ct_tmpl_alloc(nft_net(pkt), &zone, GFP_ATOMIC);
+               if (!ct) {
+                       regs->verdict.code = NF_DROP;
+                       return;
+               }
+       }
+
+       atomic_inc(&ct->ct_general.use);
+       nf_ct_set(skb, ct, IP_CT_NEW);
+}
+#endif
+
 static void nft_ct_set_eval(const struct nft_expr *expr,
                            struct nft_regs *regs,
                            const struct nft_pktinfo *pkt)
@@ -241,6 +321,45 @@ static void nft_ct_netns_put(struct net *net, uint8_t family)
                nf_ct_netns_put(net, family);
 }
 
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+static void nft_ct_tmpl_put_pcpu(void)
+{
+       struct nf_conn *ct;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               ct = per_cpu(nft_ct_pcpu_template, cpu);
+               if (!ct)
+                       break;
+               nf_ct_put(ct);
+               per_cpu(nft_ct_pcpu_template, cpu) = NULL;
+       }
+}
+
+static bool nft_ct_tmpl_alloc_pcpu(void)
+{
+       struct nf_conntrack_zone zone = { .id = 0 };
+       struct nf_conn *tmp;
+       int cpu;
+
+       if (nft_ct_pcpu_template_refcnt)
+               return true;
+
+       for_each_possible_cpu(cpu) {
+               tmp = nf_ct_tmpl_alloc(&init_net, &zone, GFP_KERNEL);
+               if (!tmp) {
+                       nft_ct_tmpl_put_pcpu();
+                       return false;
+               }
+
+               atomic_set(&tmp->ct_general.use, 1);
+               per_cpu(nft_ct_pcpu_template, cpu) = tmp;
+       }
+
+       return true;
+}
+#endif
+
 static int nft_ct_get_init(const struct nft_ctx *ctx,
                           const struct nft_expr *expr,
                           const struct nlattr * const tb[])
@@ -250,6 +369,7 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
        int err;
 
        priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
+       priv->dir = IP_CT_DIR_MAX;
        switch (priv->key) {
        case NFT_CT_DIRECTION:
                if (tb[NFTA_CT_DIRECTION] != NULL)
@@ -316,11 +436,14 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
                break;
        case NFT_CT_BYTES:
        case NFT_CT_PKTS:
-               /* no direction? return sum of original + reply */
-               if (tb[NFTA_CT_DIRECTION] == NULL)
-                       priv->dir = IP_CT_DIR_MAX;
+       case NFT_CT_AVGPKT:
                len = sizeof(u64);
                break;
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+       case NFT_CT_ZONE:
+               len = sizeof(u16);
+               break;
+#endif
        default:
                return -EOPNOTSUPP;
        }
@@ -346,21 +469,41 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
        if (err < 0)
                return err;
 
-       if (priv->key == NFT_CT_BYTES || priv->key == NFT_CT_PKTS)
+       if (priv->key == NFT_CT_BYTES ||
+           priv->key == NFT_CT_PKTS  ||
+           priv->key == NFT_CT_AVGPKT)
                nf_ct_set_acct(ctx->net, true);
 
        return 0;
 }
 
+static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv)
+{
+       switch (priv->key) {
+#ifdef CONFIG_NF_CONNTRACK_LABELS
+       case NFT_CT_LABELS:
+               nf_connlabels_put(ctx->net);
+               break;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+       case NFT_CT_ZONE:
+               if (--nft_ct_pcpu_template_refcnt == 0)
+                       nft_ct_tmpl_put_pcpu();
+#endif
+       default:
+               break;
+       }
+}
+
 static int nft_ct_set_init(const struct nft_ctx *ctx,
                           const struct nft_expr *expr,
                           const struct nlattr * const tb[])
 {
        struct nft_ct *priv = nft_expr_priv(expr);
-       bool label_got = false;
        unsigned int len;
        int err;
 
+       priv->dir = IP_CT_DIR_MAX;
        priv->key = ntohl(nla_get_be32(tb[NFTA_CT_KEY]));
        switch (priv->key) {
 #ifdef CONFIG_NF_CONNTRACK_MARK
@@ -378,13 +521,30 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
                err = nf_connlabels_get(ctx->net, (len * BITS_PER_BYTE) - 1);
                if (err)
                        return err;
-               label_got = true;
+               break;
+#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+       case NFT_CT_ZONE:
+               if (!nft_ct_tmpl_alloc_pcpu())
+                       return -ENOMEM;
+               nft_ct_pcpu_template_refcnt++;
                break;
 #endif
        default:
                return -EOPNOTSUPP;
        }
 
+       if (tb[NFTA_CT_DIRECTION]) {
+               priv->dir = nla_get_u8(tb[NFTA_CT_DIRECTION]);
+               switch (priv->dir) {
+               case IP_CT_DIR_ORIGINAL:
+               case IP_CT_DIR_REPLY:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+       }
+
        priv->sreg = nft_parse_register(tb[NFTA_CT_SREG]);
        err = nft_validate_register_load(priv->sreg, len);
        if (err < 0)
@@ -397,8 +557,7 @@ static int nft_ct_set_init(const struct nft_ctx *ctx,
        return 0;
 
 err1:
-       if (label_got)
-               nf_connlabels_put(ctx->net);
+       __nft_ct_set_destroy(ctx, priv);
        return err;
 }
 
@@ -413,16 +572,7 @@ static void nft_ct_set_destroy(const struct nft_ctx *ctx,
 {
        struct nft_ct *priv = nft_expr_priv(expr);
 
-       switch (priv->key) {
-#ifdef CONFIG_NF_CONNTRACK_LABELS
-       case NFT_CT_LABELS:
-               nf_connlabels_put(ctx->net);
-               break;
-#endif
-       default:
-               break;
-       }
-
+       __nft_ct_set_destroy(ctx, priv);
        nft_ct_netns_put(ctx->net, ctx->afi->family);
 }
 
@@ -445,6 +595,8 @@ static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
                break;
        case NFT_CT_BYTES:
        case NFT_CT_PKTS:
+       case NFT_CT_AVGPKT:
+       case NFT_CT_ZONE:
                if (priv->dir < IP_CT_DIR_MAX &&
                    nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
                        goto nla_put_failure;
@@ -467,6 +619,17 @@ static int nft_ct_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
                goto nla_put_failure;
        if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
                goto nla_put_failure;
+
+       switch (priv->key) {
+       case NFT_CT_ZONE:
+               if (priv->dir < IP_CT_DIR_MAX &&
+                   nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
+                       goto nla_put_failure;
+               break;
+       default:
+               break;
+       }
+
        return 0;
 
 nla_put_failure:
@@ -492,6 +655,17 @@ static const struct nft_expr_ops nft_ct_set_ops = {
        .dump           = nft_ct_set_dump,
 };
 
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+static const struct nft_expr_ops nft_ct_set_zone_ops = {
+       .type           = &nft_ct_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_ct)),
+       .eval           = nft_ct_set_zone_eval,
+       .init           = nft_ct_set_init,
+       .destroy        = nft_ct_set_destroy,
+       .dump           = nft_ct_set_dump,
+};
+#endif
+
 static const struct nft_expr_ops *
 nft_ct_select_ops(const struct nft_ctx *ctx,
                    const struct nlattr * const tb[])
@@ -505,8 +679,13 @@ nft_ct_select_ops(const struct nft_ctx *ctx,
        if (tb[NFTA_CT_DREG])
                return &nft_ct_get_ops;
 
-       if (tb[NFTA_CT_SREG])
+       if (tb[NFTA_CT_SREG]) {
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+               if (nla_get_be32(tb[NFTA_CT_KEY]) == htonl(NFT_CT_ZONE))
+                       return &nft_ct_set_zone_ops;
+#endif
                return &nft_ct_set_ops;
+       }
 
        return ERR_PTR(-EINVAL);
 }
@@ -534,8 +713,7 @@ static void nft_notrack_eval(const struct nft_expr *expr,
 
        ct = nf_ct_untracked_get();
        atomic_inc(&ct->ct_general.use);
-       skb->nfct = &ct->ct_general;
-       skb->nfctinfo = IP_CT_NEW;
+       nf_ct_set(skb, ct, IP_CT_NEW);
 }
 
 static struct nft_expr_type nft_notrack_type;
index 47beb3abcc9daf46e084c0f189eaf7091d11241e..c308920b194cdbe5e3a2e9a09cfb8aab7267f588 100644 (file)
 #include <linux/netfilter.h>
 #include <linux/netfilter/nf_tables.h>
 #include <net/netfilter/nf_tables.h>
-// FIXME:
-#include <net/ipv6.h>
+#include <net/tcp.h>
 
 struct nft_exthdr {
        u8                      type;
        u8                      offset;
        u8                      len;
+       u8                      op;
        enum nft_registers      dreg:8;
+       u8                      flags;
 };
 
-static void nft_exthdr_eval(const struct nft_expr *expr,
-                           struct nft_regs *regs,
-                           const struct nft_pktinfo *pkt)
+static unsigned int optlen(const u8 *opt, unsigned int offset)
+{
+       /* Beware zero-length options: make finite progress */
+       if (opt[offset] <= TCPOPT_NOP || opt[offset + 1] == 0)
+               return 1;
+       else
+               return opt[offset + 1];
+}
+
+static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
+                                struct nft_regs *regs,
+                                const struct nft_pktinfo *pkt)
 {
        struct nft_exthdr *priv = nft_expr_priv(expr);
        u32 *dest = &regs->data[priv->dreg];
@@ -35,8 +45,12 @@ static void nft_exthdr_eval(const struct nft_expr *expr,
        int err;
 
        err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
-       if (err < 0)
+       if (priv->flags & NFT_EXTHDR_F_PRESENT) {
+               *dest = (err >= 0);
+               return;
+       } else if (err < 0) {
                goto err;
+       }
        offset += priv->offset;
 
        dest[priv->len / NFT_REG32_SIZE] = 0;
@@ -47,11 +61,59 @@ err:
        regs->verdict.code = NFT_BREAK;
 }
 
+static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
+                               struct nft_regs *regs,
+                               const struct nft_pktinfo *pkt)
+{
+       u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE];
+       struct nft_exthdr *priv = nft_expr_priv(expr);
+       unsigned int i, optl, tcphdr_len, offset;
+       u32 *dest = &regs->data[priv->dreg];
+       struct tcphdr *tcph;
+       u8 *opt;
+
+       if (!pkt->tprot_set || pkt->tprot != IPPROTO_TCP)
+               goto err;
+
+       tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, sizeof(*tcph), buff);
+       if (!tcph)
+               goto err;
+
+       tcphdr_len = __tcp_hdrlen(tcph);
+       if (tcphdr_len < sizeof(*tcph))
+               goto err;
+
+       tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, tcphdr_len, buff);
+       if (!tcph)
+               goto err;
+
+       opt = (u8 *)tcph;
+       for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
+               optl = optlen(opt, i);
+
+               if (priv->type != opt[i])
+                       continue;
+
+               if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
+                       goto err;
+
+               offset = i + priv->offset;
+               dest[priv->len / NFT_REG32_SIZE] = 0;
+               memcpy(dest, opt + offset, priv->len);
+
+               return;
+       }
+
+err:
+       regs->verdict.code = NFT_BREAK;
+}
+
 static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
        [NFTA_EXTHDR_DREG]              = { .type = NLA_U32 },
        [NFTA_EXTHDR_TYPE]              = { .type = NLA_U8 },
        [NFTA_EXTHDR_OFFSET]            = { .type = NLA_U32 },
        [NFTA_EXTHDR_LEN]               = { .type = NLA_U32 },
+       [NFTA_EXTHDR_FLAGS]             = { .type = NLA_U32 },
 };
 
 static int nft_exthdr_init(const struct nft_ctx *ctx,
@@ -59,13 +121,13 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
                           const struct nlattr * const tb[])
 {
        struct nft_exthdr *priv = nft_expr_priv(expr);
-       u32 offset, len;
+       u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6;
        int err;
 
-       if (tb[NFTA_EXTHDR_DREG] == NULL ||
-           tb[NFTA_EXTHDR_TYPE] == NULL ||
-           tb[NFTA_EXTHDR_OFFSET] == NULL ||
-           tb[NFTA_EXTHDR_LEN] == NULL)
+       if (!tb[NFTA_EXTHDR_DREG] ||
+           !tb[NFTA_EXTHDR_TYPE] ||
+           !tb[NFTA_EXTHDR_OFFSET] ||
+           !tb[NFTA_EXTHDR_LEN])
                return -EINVAL;
 
        err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset);
@@ -76,10 +138,27 @@ static int nft_exthdr_init(const struct nft_ctx *ctx,
        if (err < 0)
                return err;
 
+       if (tb[NFTA_EXTHDR_FLAGS]) {
+               err = nft_parse_u32_check(tb[NFTA_EXTHDR_FLAGS], U8_MAX, &flags);
+               if (err < 0)
+                       return err;
+
+               if (flags & ~NFT_EXTHDR_F_PRESENT)
+                       return -EINVAL;
+       }
+
+       if (tb[NFTA_EXTHDR_OP]) {
+               err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op);
+               if (err < 0)
+                       return err;
+       }
+
        priv->type   = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
        priv->offset = offset;
        priv->len    = len;
        priv->dreg   = nft_parse_register(tb[NFTA_EXTHDR_DREG]);
+       priv->flags  = flags;
+       priv->op     = op;
 
        return nft_validate_register_store(ctx, priv->dreg, NULL,
                                           NFT_DATA_VALUE, priv->len);
@@ -97,6 +176,10 @@ static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr)
                goto nla_put_failure;
        if (nla_put_be32(skb, NFTA_EXTHDR_LEN, htonl(priv->len)))
                goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_EXTHDR_FLAGS, htonl(priv->flags)))
+               goto nla_put_failure;
+       if (nla_put_be32(skb, NFTA_EXTHDR_OP, htonl(priv->op)))
+               goto nla_put_failure;
        return 0;
 
 nla_put_failure:
@@ -104,17 +187,45 @@ nla_put_failure:
 }
 
 static struct nft_expr_type nft_exthdr_type;
-static const struct nft_expr_ops nft_exthdr_ops = {
+static const struct nft_expr_ops nft_exthdr_ipv6_ops = {
        .type           = &nft_exthdr_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
-       .eval           = nft_exthdr_eval,
+       .eval           = nft_exthdr_ipv6_eval,
        .init           = nft_exthdr_init,
        .dump           = nft_exthdr_dump,
 };
 
+static const struct nft_expr_ops nft_exthdr_tcp_ops = {
+       .type           = &nft_exthdr_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
+       .eval           = nft_exthdr_tcp_eval,
+       .init           = nft_exthdr_init,
+       .dump           = nft_exthdr_dump,
+};
+
+static const struct nft_expr_ops *
+nft_exthdr_select_ops(const struct nft_ctx *ctx,
+                     const struct nlattr * const tb[])
+{
+       u32 op;
+
+       if (!tb[NFTA_EXTHDR_OP])
+               return &nft_exthdr_ipv6_ops;
+
+       op = ntohl(nla_get_u32(tb[NFTA_EXTHDR_OP]));
+       switch (op) {
+       case NFT_EXTHDR_OP_TCPOPT:
+               return &nft_exthdr_tcp_ops;
+       case NFT_EXTHDR_OP_IPV6:
+               return &nft_exthdr_ipv6_ops;
+       }
+
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
 static struct nft_expr_type nft_exthdr_type __read_mostly = {
        .name           = "exthdr",
-       .ops            = &nft_exthdr_ops,
+       .select_ops     = &nft_exthdr_select_ops,
        .policy         = nft_exthdr_policy,
        .maxattr        = NFTA_EXTHDR_MAX,
        .owner          = THIS_MODULE,
index 66c7f4b4c49bda596d9151d162bc68d95d957560..e1f5ca9b423b5ffda43ec5519d4c8832ce695899 100644 (file)
@@ -154,13 +154,36 @@ void nft_meta_get_eval(const struct nft_expr *expr,
                                *dest = PACKET_BROADCAST;
                        break;
                case NFPROTO_IPV6:
-                       if (ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF)
+                       *dest = PACKET_MULTICAST;
+                       break;
+               case NFPROTO_NETDEV:
+                       switch (skb->protocol) {
+                       case htons(ETH_P_IP): {
+                               int noff = skb_network_offset(skb);
+                               struct iphdr *iph, _iph;
+
+                               iph = skb_header_pointer(skb, noff,
+                                                        sizeof(_iph), &_iph);
+                               if (!iph)
+                                       goto err;
+
+                               if (ipv4_is_multicast(iph->daddr))
+                                       *dest = PACKET_MULTICAST;
+                               else
+                                       *dest = PACKET_BROADCAST;
+
+                               break;
+                       }
+                       case htons(ETH_P_IPV6):
                                *dest = PACKET_MULTICAST;
-                       else
-                               *dest = PACKET_BROADCAST;
+                               break;
+                       default:
+                               WARN_ON_ONCE(1);
+                               goto err;
+                       }
                        break;
                default:
-                       WARN_ON(1);
+                       WARN_ON_ONCE(1);
                        goto err;
                }
                break;
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
new file mode 100644 (file)
index 0000000..97f9649
--- /dev/null
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2017 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+
+/* This bitmap uses two bits to represent one element. These two bits determine
+ * the element state in the current and the future generation.
+ *
+ * An element can be in three states. The generation cursor is represented using
+ * the ^ character, note that this cursor shifts on every succesful transaction.
+ * If no transaction is going on, we observe all elements are in the following
+ * state:
+ *
+ * 11 = this element is active in the current generation. In case of no updates,
+ * ^    it stays active in the next generation.
+ * 00 = this element is inactive in the current generation. In case of no
+ * ^    updates, it stays inactive in the next generation.
+ *
+ * On transaction handling, we observe these two temporary states:
+ *
+ * 01 = this element is inactive in the current generation and it becomes active
+ * ^    in the next one. This happens when the element is inserted but commit
+ *      path has not yet been executed yet, so activation is still pending. On
+ *      transaction abortion, the element is removed.
+ * 10 = this element is active in the current generation and it becomes inactive
+ * ^    in the next one. This happens when the element is deactivated but commit
+ *      path has not yet been executed yet, so removal is still pending. On
+ *      transation abortion, the next generation bit is reset to go back to
+ *      restore its previous state.
+ */
+struct nft_bitmap {
+       u16     bitmap_size;
+       u8      bitmap[];
+};
+
+static inline void nft_bitmap_location(u32 key, u32 *idx, u32 *off)
+{
+       u32 k = (key << 1);
+
+       *idx = k / BITS_PER_BYTE;
+       *off = k % BITS_PER_BYTE;
+}
+
+/* Fetch the two bits that represent the element and check if it is active based
+ * on the generation mask.
+ */
+static inline bool
+nft_bitmap_active(const u8 *bitmap, u32 idx, u32 off, u8 genmask)
+{
+       return (bitmap[idx] & (0x3 << off)) & (genmask << off);
+}
+
+static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set,
+                             const u32 *key, const struct nft_set_ext **ext)
+{
+       const struct nft_bitmap *priv = nft_set_priv(set);
+       u8 genmask = nft_genmask_cur(net);
+       u32 idx, off;
+
+       nft_bitmap_location(*key, &idx, &off);
+
+       return nft_bitmap_active(priv->bitmap, idx, off, genmask);
+}
+
+static int nft_bitmap_insert(const struct net *net, const struct nft_set *set,
+                            const struct nft_set_elem *elem,
+                            struct nft_set_ext **_ext)
+{
+       struct nft_bitmap *priv = nft_set_priv(set);
+       struct nft_set_ext *ext = elem->priv;
+       u8 genmask = nft_genmask_next(net);
+       u32 idx, off;
+
+       nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+       if (nft_bitmap_active(priv->bitmap, idx, off, genmask))
+               return -EEXIST;
+
+       /* Enter 01 state. */
+       priv->bitmap[idx] |= (genmask << off);
+
+       return 0;
+}
+
+static void nft_bitmap_remove(const struct net *net,
+                             const struct nft_set *set,
+                             const struct nft_set_elem *elem)
+{
+       struct nft_bitmap *priv = nft_set_priv(set);
+       struct nft_set_ext *ext = elem->priv;
+       u8 genmask = nft_genmask_next(net);
+       u32 idx, off;
+
+       nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+       /* Enter 00 state. */
+       priv->bitmap[idx] &= ~(genmask << off);
+}
+
+static void nft_bitmap_activate(const struct net *net,
+                               const struct nft_set *set,
+                               const struct nft_set_elem *elem)
+{
+       struct nft_bitmap *priv = nft_set_priv(set);
+       struct nft_set_ext *ext = elem->priv;
+       u8 genmask = nft_genmask_next(net);
+       u32 idx, off;
+
+       nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+       /* Enter 11 state. */
+       priv->bitmap[idx] |= (genmask << off);
+}
+
+static bool nft_bitmap_flush(const struct net *net,
+                            const struct nft_set *set, void *ext)
+{
+       struct nft_bitmap *priv = nft_set_priv(set);
+       u8 genmask = nft_genmask_next(net);
+       u32 idx, off;
+
+       nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off);
+       /* Enter 10 state, similar to deactivation. */
+       priv->bitmap[idx] &= ~(genmask << off);
+
+       return true;
+}
+
+static struct nft_set_ext *nft_bitmap_ext_alloc(const struct nft_set *set,
+                                               const struct nft_set_elem *elem)
+{
+       struct nft_set_ext_tmpl tmpl;
+       struct nft_set_ext *ext;
+
+       nft_set_ext_prepare(&tmpl);
+       nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
+
+       ext = kzalloc(tmpl.len, GFP_KERNEL);
+       if (!ext)
+               return NULL;
+
+       nft_set_ext_init(ext, &tmpl);
+       memcpy(nft_set_ext_key(ext), elem->key.val.data, set->klen);
+
+       return ext;
+}
+
+static void *nft_bitmap_deactivate(const struct net *net,
+                                  const struct nft_set *set,
+                                  const struct nft_set_elem *elem)
+{
+       struct nft_bitmap *priv = nft_set_priv(set);
+       u8 genmask = nft_genmask_next(net);
+       struct nft_set_ext *ext;
+       u32 idx, off, key = 0;
+
+       memcpy(&key, elem->key.val.data, set->klen);
+       nft_bitmap_location(key, &idx, &off);
+
+       if (!nft_bitmap_active(priv->bitmap, idx, off, genmask))
+               return NULL;
+
+       /* We have no real set extension since this is a bitmap, allocate this
+        * dummy object that is released from the commit/abort path.
+        */
+       ext = nft_bitmap_ext_alloc(set, elem);
+       if (!ext)
+               return NULL;
+
+       /* Enter 10 state. */
+       priv->bitmap[idx] &= ~(genmask << off);
+
+       return ext;
+}
+
+static void nft_bitmap_walk(const struct nft_ctx *ctx,
+                           struct nft_set *set,
+                           struct nft_set_iter *iter)
+{
+       const struct nft_bitmap *priv = nft_set_priv(set);
+       struct nft_set_ext_tmpl tmpl;
+       struct nft_set_elem elem;
+       struct nft_set_ext *ext;
+       int idx, off;
+       u16 key;
+
+       nft_set_ext_prepare(&tmpl);
+       nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
+
+       for (idx = 0; idx < priv->bitmap_size; idx++) {
+               for (off = 0; off < BITS_PER_BYTE; off += 2) {
+                       if (iter->count < iter->skip)
+                               goto cont;
+
+                       if (!nft_bitmap_active(priv->bitmap, idx, off,
+                                              iter->genmask))
+                               goto cont;
+
+                       ext = kzalloc(tmpl.len, GFP_KERNEL);
+                       if (!ext) {
+                               iter->err = -ENOMEM;
+                               return;
+                       }
+                       nft_set_ext_init(ext, &tmpl);
+                       key = ((idx * BITS_PER_BYTE) + off) >> 1;
+                       memcpy(nft_set_ext_key(ext), &key, set->klen);
+
+                       elem.priv = ext;
+                       iter->err = iter->fn(ctx, set, iter, &elem);
+
+                       /* On set flush, this dummy extension object is released
+                        * from the commit/abort path.
+                        */
+                       if (!iter->flush)
+                               kfree(ext);
+
+                       if (iter->err < 0)
+                               return;
+cont:
+                       iter->count++;
+               }
+       }
+}
+
+/* The bitmap size is pow(2, key length in bits) / bits per byte. This is
+ * multiplied by two since each element takes two bits. For 8 bit keys, the
+ * bitmap consumes 66 bytes. For 16 bit keys, 16388 bytes.
+ */
+static inline u32 nft_bitmap_size(u32 klen)
+{
+       return ((2 << ((klen * BITS_PER_BYTE) - 1)) / BITS_PER_BYTE) << 1;
+}
+
+static inline u32 nft_bitmap_total_size(u32 klen)
+{
+       return sizeof(struct nft_bitmap) + nft_bitmap_size(klen);
+}
+
+static unsigned int nft_bitmap_privsize(const struct nlattr * const nla[])
+{
+       u32 klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN]));
+
+       return nft_bitmap_total_size(klen);
+}
+
+static int nft_bitmap_init(const struct nft_set *set,
+                        const struct nft_set_desc *desc,
+                        const struct nlattr * const nla[])
+{
+       struct nft_bitmap *priv = nft_set_priv(set);
+
+       priv->bitmap_size = nft_bitmap_total_size(set->klen);
+
+       return 0;
+}
+
+static void nft_bitmap_destroy(const struct nft_set *set)
+{
+}
+
+static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
+                               struct nft_set_estimate *est)
+{
+       /* Make sure bitmaps we don't get bitmaps larger than 16 Kbytes. */
+       if (desc->klen > 2)
+               return false;
+
+       est->size   = nft_bitmap_total_size(desc->klen);
+       est->lookup = NFT_SET_CLASS_O_1;
+       est->space  = NFT_SET_CLASS_O_1;
+
+       return true;
+}
+
+static struct nft_set_ops nft_bitmap_ops __read_mostly = {
+       .privsize       = nft_bitmap_privsize,
+       .estimate       = nft_bitmap_estimate,
+       .init           = nft_bitmap_init,
+       .destroy        = nft_bitmap_destroy,
+       .insert         = nft_bitmap_insert,
+       .remove         = nft_bitmap_remove,
+       .deactivate     = nft_bitmap_deactivate,
+       .flush          = nft_bitmap_flush,
+       .activate       = nft_bitmap_activate,
+       .lookup         = nft_bitmap_lookup,
+       .walk           = nft_bitmap_walk,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_bitmap_module_init(void)
+{
+       return nft_register_set(&nft_bitmap_ops);
+}
+
+static void __exit nft_bitmap_module_exit(void)
+{
+       nft_unregister_set(&nft_bitmap_ops);
+}
+
+module_init(nft_bitmap_module_init);
+module_exit(nft_bitmap_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_SET();
index e36069fb76aebd4140098f38a3758135e78b8d43..5f652720fc78e6de437c100d31524588e729001c 100644 (file)
@@ -167,8 +167,8 @@ static void nft_hash_activate(const struct net *net, const struct nft_set *set,
        nft_set_elem_clear_busy(&he->ext);
 }
 
-static bool nft_hash_deactivate_one(const struct net *net,
-                                   const struct nft_set *set, void *priv)
+static bool nft_hash_flush(const struct net *net,
+                          const struct nft_set *set, void *priv)
 {
        struct nft_hash_elem *he = priv;
 
@@ -195,7 +195,7 @@ static void *nft_hash_deactivate(const struct net *net,
        rcu_read_lock();
        he = rhashtable_lookup_fast(&priv->ht, &arg, nft_hash_params);
        if (he != NULL &&
-           !nft_hash_deactivate_one(net, set, he))
+           !nft_hash_flush(net, set, he))
                he = NULL;
 
        rcu_read_unlock();
@@ -203,7 +203,8 @@ static void *nft_hash_deactivate(const struct net *net,
        return he;
 }
 
-static void nft_hash_remove(const struct nft_set *set,
+static void nft_hash_remove(const struct net *net,
+                           const struct nft_set *set,
                            const struct nft_set_elem *elem)
 {
        struct nft_hash *priv = nft_set_priv(set);
@@ -383,7 +384,8 @@ static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
                est->size = esize + 2 * sizeof(struct nft_hash_elem *);
        }
 
-       est->class = NFT_SET_CLASS_O_1;
+       est->lookup = NFT_SET_CLASS_O_1;
+       est->space  = NFT_SET_CLASS_O_N;
 
        return true;
 }
@@ -397,12 +399,12 @@ static struct nft_set_ops nft_hash_ops __read_mostly = {
        .insert         = nft_hash_insert,
        .activate       = nft_hash_activate,
        .deactivate     = nft_hash_deactivate,
-       .deactivate_one = nft_hash_deactivate_one,
+       .flush          = nft_hash_flush,
        .remove         = nft_hash_remove,
        .lookup         = nft_hash_lookup,
        .update         = nft_hash_update,
        .walk           = nft_hash_walk,
-       .features       = NFT_SET_MAP | NFT_SET_TIMEOUT,
+       .features       = NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
        .owner          = THIS_MODULE,
 };
 
index f06f55ee516de44d3b0367cc52afb32173a2d0e8..71e8fb886a73b70489e635c63957cb9f8642ec5d 100644 (file)
@@ -151,7 +151,8 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
        return err;
 }
 
-static void nft_rbtree_remove(const struct nft_set *set,
+static void nft_rbtree_remove(const struct net *net,
+                             const struct nft_set *set,
                              const struct nft_set_elem *elem)
 {
        struct nft_rbtree *priv = nft_set_priv(set);
@@ -171,8 +172,8 @@ static void nft_rbtree_activate(const struct net *net,
        nft_set_elem_change_active(net, set, &rbe->ext);
 }
 
-static bool nft_rbtree_deactivate_one(const struct net *net,
-                                     const struct nft_set *set, void *priv)
+static bool nft_rbtree_flush(const struct net *net,
+                            const struct nft_set *set, void *priv)
 {
        struct nft_rbtree_elem *rbe = priv;
 
@@ -213,7 +214,7 @@ static void *nft_rbtree_deactivate(const struct net *net,
                                parent = parent->rb_right;
                                continue;
                        }
-                       nft_rbtree_deactivate_one(net, set, rbe);
+                       nft_rbtree_flush(net, set, rbe);
                        return rbe;
                }
        }
@@ -290,7 +291,8 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
        else
                est->size = nsize;
 
-       est->class = NFT_SET_CLASS_O_LOG_N;
+       est->lookup = NFT_SET_CLASS_O_LOG_N;
+       est->space  = NFT_SET_CLASS_O_N;
 
        return true;
 }
@@ -304,11 +306,11 @@ static struct nft_set_ops nft_rbtree_ops __read_mostly = {
        .insert         = nft_rbtree_insert,
        .remove         = nft_rbtree_remove,
        .deactivate     = nft_rbtree_deactivate,
-       .deactivate_one = nft_rbtree_deactivate_one,
+       .flush          = nft_rbtree_flush,
        .activate       = nft_rbtree_activate,
        .lookup         = nft_rbtree_lookup,
        .walk           = nft_rbtree_walk,
-       .features       = NFT_SET_INTERVAL | NFT_SET_MAP,
+       .features       = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT,
        .owner          = THIS_MODULE,
 };
 
index 2ff499680cc60b6ea7e22a8d02e2b82e32005a23..016db6be94b996c797c9c4de0fd324c737706393 100644 (file)
@@ -262,6 +262,60 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
 }
 EXPORT_SYMBOL_GPL(xt_request_find_target);
 
+
+static int xt_obj_to_user(u16 __user *psize, u16 size,
+                         void __user *pname, const char *name,
+                         u8 __user *prev, u8 rev)
+{
+       if (put_user(size, psize))
+               return -EFAULT;
+       if (copy_to_user(pname, name, strlen(name) + 1))
+               return -EFAULT;
+       if (put_user(rev, prev))
+               return -EFAULT;
+
+       return 0;
+}
+
+#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE)                             \
+       xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size,  \
+                      U->u.user.name, K->u.kernel.TYPE->name,          \
+                      &U->u.user.revision, K->u.kernel.TYPE->revision)
+
+int xt_data_to_user(void __user *dst, const void *src,
+                   int usersize, int size)
+{
+       usersize = usersize ? : size;
+       if (copy_to_user(dst, src, usersize))
+               return -EFAULT;
+       if (usersize != size && clear_user(dst + usersize, size - usersize))
+               return -EFAULT;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(xt_data_to_user);
+
+#define XT_DATA_TO_USER(U, K, TYPE, C_SIZE)                            \
+       xt_data_to_user(U->data, K->data,                               \
+                       K->u.kernel.TYPE->usersize,                     \
+                       C_SIZE ? : K->u.kernel.TYPE->TYPE##size)
+
+int xt_match_to_user(const struct xt_entry_match *m,
+                    struct xt_entry_match __user *u)
+{
+       return XT_OBJ_TO_USER(u, m, match, 0) ||
+              XT_DATA_TO_USER(u, m, match, 0);
+}
+EXPORT_SYMBOL_GPL(xt_match_to_user);
+
+int xt_target_to_user(const struct xt_entry_target *t,
+                     struct xt_entry_target __user *u)
+{
+       return XT_OBJ_TO_USER(u, t, target, 0) ||
+              XT_DATA_TO_USER(u, t, target, 0);
+}
+EXPORT_SYMBOL_GPL(xt_target_to_user);
+
 static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
 {
        const struct xt_match *m;
@@ -565,17 +619,14 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
        int off = xt_compat_match_offset(match);
        u_int16_t msize = m->u.user.match_size - off;
 
-       if (copy_to_user(cm, m, sizeof(*cm)) ||
-           put_user(msize, &cm->u.user.match_size) ||
-           copy_to_user(cm->u.user.name, m->u.kernel.match->name,
-                        strlen(m->u.kernel.match->name) + 1))
+       if (XT_OBJ_TO_USER(cm, m, match, msize))
                return -EFAULT;
 
        if (match->compat_to_user) {
                if (match->compat_to_user((void __user *)cm->data, m->data))
                        return -EFAULT;
        } else {
-               if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
+               if (XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
                        return -EFAULT;
        }
 
@@ -923,17 +974,14 @@ int xt_compat_target_to_user(const struct xt_entry_target *t,
        int off = xt_compat_target_offset(target);
        u_int16_t tsize = t->u.user.target_size - off;
 
-       if (copy_to_user(ct, t, sizeof(*ct)) ||
-           put_user(tsize, &ct->u.user.target_size) ||
-           copy_to_user(ct->u.user.name, t->u.kernel.target->name,
-                        strlen(t->u.kernel.target->name) + 1))
+       if (XT_OBJ_TO_USER(ct, t, target, tsize))
                return -EFAULT;
 
        if (target->compat_to_user) {
                if (target->compat_to_user((void __user *)ct->data, t->data))
                        return -EFAULT;
        } else {
-               if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
+               if (XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
                        return -EFAULT;
        }
 
index 95c75035874758ce56c8656b859204bf8f10e6cf..b008db0184b8a2a1679d5d4737427a672c36b560 100644 (file)
 static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct)
 {
        /* Previously seen (loopback)? Ignore. */
-       if (skb->nfct != NULL)
+       if (skb->_nfct != 0)
                return XT_CONTINUE;
 
        /* special case the untracked ct : we want the percpu object */
        if (!ct)
                ct = nf_ct_untracked_get();
        atomic_inc(&ct->ct_general.use);
-       skb->nfct = &ct->ct_general;
-       skb->nfctinfo = IP_CT_NEW;
+       nf_ct_set(skb, ct, IP_CT_NEW);
 
        return XT_CONTINUE;
 }
@@ -373,6 +372,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
                .name           = "CT",
                .family         = NFPROTO_UNSPEC,
                .targetsize     = sizeof(struct xt_ct_target_info),
+               .usersize       = offsetof(struct xt_ct_target_info, ct),
                .checkentry     = xt_ct_tg_check_v0,
                .destroy        = xt_ct_tg_destroy_v0,
                .target         = xt_ct_target_v0,
@@ -384,6 +384,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
                .family         = NFPROTO_UNSPEC,
                .revision       = 1,
                .targetsize     = sizeof(struct xt_ct_target_info_v1),
+               .usersize       = offsetof(struct xt_ct_target_info, ct),
                .checkentry     = xt_ct_tg_check_v1,
                .destroy        = xt_ct_tg_destroy_v1,
                .target         = xt_ct_target_v1,
@@ -395,6 +396,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
                .family         = NFPROTO_UNSPEC,
                .revision       = 2,
                .targetsize     = sizeof(struct xt_ct_target_info_v1),
+               .usersize       = offsetof(struct xt_ct_target_info, ct),
                .checkentry     = xt_ct_tg_check_v2,
                .destroy        = xt_ct_tg_destroy_v1,
                .target         = xt_ct_target_v1,
@@ -407,12 +409,11 @@ static unsigned int
 notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
        /* Previously seen (loopback)? Ignore. */
-       if (skb->nfct != NULL)
+       if (skb->_nfct != 0)
                return XT_CONTINUE;
 
-       skb->nfct = &nf_ct_untracked_get()->ct_general;
-       skb->nfctinfo = IP_CT_NEW;
-       nf_conntrack_get(skb->nfct);
+       nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW);
+       nf_conntrack_get(skb_nfct(skb));
 
        return XT_CONTINUE;
 }
index 91a373a3f534de8d8641341c33c08d8fc49cbd29..498b54fd04d7fae9b37909260bfb08ca256fd6ba 100644 (file)
@@ -162,6 +162,7 @@ static struct xt_target xt_rateest_tg_reg __read_mostly = {
        .checkentry = xt_rateest_tg_checkentry,
        .destroy    = xt_rateest_tg_destroy,
        .targetsize = sizeof(struct xt_rateest_target_info),
+       .usersize   = offsetof(struct xt_rateest_target_info, est),
        .me         = THIS_MODULE,
 };
 
index 1c57ace75ae62be26e4a5b26ded9d84fb5b377a0..86b0580b2216bb475717f4d64332320cff42cc3e 100644 (file)
@@ -133,6 +133,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
                .family     = NFPROTO_IPV4,
                .target     = tee_tg4,
                .targetsize = sizeof(struct xt_tee_tginfo),
+               .usersize   = offsetof(struct xt_tee_tginfo, priv),
                .checkentry = tee_tg_check,
                .destroy    = tee_tg_destroy,
                .me         = THIS_MODULE,
@@ -144,6 +145,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
                .family     = NFPROTO_IPV6,
                .target     = tee_tg6,
                .targetsize = sizeof(struct xt_tee_tginfo),
+               .usersize   = offsetof(struct xt_tee_tginfo, priv),
                .checkentry = tee_tg_check,
                .destroy    = tee_tg_destroy,
                .me         = THIS_MODULE,
index 2dedaa23ab0aa035982b6fcb33666cba5e8a54f6..38986a95216cd2c3f7a0f83deedcb42153f5a937 100644 (file)
@@ -110,6 +110,7 @@ static struct xt_match bpf_mt_reg[] __read_mostly = {
                .match          = bpf_mt,
                .destroy        = bpf_mt_destroy,
                .matchsize      = sizeof(struct xt_bpf_info),
+               .usersize       = offsetof(struct xt_bpf_info, filter),
                .me             = THIS_MODULE,
        },
        {
@@ -120,6 +121,7 @@ static struct xt_match bpf_mt_reg[] __read_mostly = {
                .match          = bpf_mt_v1,
                .destroy        = bpf_mt_destroy_v1,
                .matchsize      = sizeof(struct xt_bpf_info_v1),
+               .usersize       = offsetof(struct xt_bpf_info_v1, filter),
                .me             = THIS_MODULE,
        },
 };
index a086a914865f7ed305c1f9cff78210613eebb855..1db1ce59079fba704ed7f5333dd4ebf014383d71 100644 (file)
@@ -122,6 +122,7 @@ static struct xt_match cgroup_mt_reg[] __read_mostly = {
                .checkentry     = cgroup_mt_check_v1,
                .match          = cgroup_mt_v1,
                .matchsize      = sizeof(struct xt_cgroup_info_v1),
+               .usersize       = offsetof(struct xt_cgroup_info_v1, priv),
                .destroy        = cgroup_mt_destroy_v1,
                .me             = THIS_MODULE,
                .hooks          = (1 << NF_INET_LOCAL_OUT) |
index 2aff2b7c4689c897be7c0250d5c514e5ee8014d5..b8fd4ab762edba5061e36e305c8048cf7a6fe846 100644 (file)
@@ -218,7 +218,7 @@ count_tree(struct net *net, struct rb_root *root,
                int diff;
                bool addit;
 
-               rbconn = container_of(*rbnode, struct xt_connlimit_rb, node);
+               rbconn = rb_entry(*rbnode, struct xt_connlimit_rb, node);
 
                parent = *rbnode;
                diff = same_source_net(addr, mask, &rbconn->addr, family);
@@ -398,7 +398,7 @@ static void destroy_tree(struct rb_root *r)
        struct rb_node *node;
 
        while ((node = rb_first(r)) != NULL) {
-               rbconn = container_of(node, struct xt_connlimit_rb, node);
+               rbconn = rb_entry(node, struct xt_connlimit_rb, node);
 
                rb_erase(node, r);
 
@@ -431,6 +431,7 @@ static struct xt_match connlimit_mt_reg __read_mostly = {
        .checkentry = connlimit_mt_check,
        .match      = connlimit_mt,
        .matchsize  = sizeof(struct xt_connlimit_info),
+       .usersize   = offsetof(struct xt_connlimit_info, data),
        .destroy    = connlimit_mt_destroy,
        .me         = THIS_MODULE,
 };
index 10063408141d25bdd0f1a1241ffe6395bae753f6..26ef70c50e3b3e6b1b2fa71f76a3ecb25768f140 100644 (file)
@@ -838,6 +838,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
                .family         = NFPROTO_IPV4,
                .match          = hashlimit_mt_v1,
                .matchsize      = sizeof(struct xt_hashlimit_mtinfo1),
+               .usersize       = offsetof(struct xt_hashlimit_mtinfo1, hinfo),
                .checkentry     = hashlimit_mt_check_v1,
                .destroy        = hashlimit_mt_destroy_v1,
                .me             = THIS_MODULE,
@@ -848,6 +849,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
                .family         = NFPROTO_IPV4,
                .match          = hashlimit_mt,
                .matchsize      = sizeof(struct xt_hashlimit_mtinfo2),
+               .usersize       = offsetof(struct xt_hashlimit_mtinfo2, hinfo),
                .checkentry     = hashlimit_mt_check,
                .destroy        = hashlimit_mt_destroy,
                .me             = THIS_MODULE,
@@ -859,6 +861,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
                .family         = NFPROTO_IPV6,
                .match          = hashlimit_mt_v1,
                .matchsize      = sizeof(struct xt_hashlimit_mtinfo1),
+               .usersize       = offsetof(struct xt_hashlimit_mtinfo1, hinfo),
                .checkentry     = hashlimit_mt_check_v1,
                .destroy        = hashlimit_mt_destroy_v1,
                .me             = THIS_MODULE,
@@ -869,6 +872,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
                .family         = NFPROTO_IPV6,
                .match          = hashlimit_mt,
                .matchsize      = sizeof(struct xt_hashlimit_mtinfo2),
+               .usersize       = offsetof(struct xt_hashlimit_mtinfo2, hinfo),
                .checkentry     = hashlimit_mt_check,
                .destroy        = hashlimit_mt_destroy,
                .me             = THIS_MODULE,
index bef85059655892982da73cc694183afc7399782c..dab962df178795612580a1c8e22257213bdab07d 100644 (file)
@@ -192,6 +192,8 @@ static struct xt_match limit_mt_reg __read_mostly = {
        .compatsize       = sizeof(struct compat_xt_rateinfo),
        .compat_from_user = limit_mt_compat_from_user,
        .compat_to_user   = limit_mt_compat_to_user,
+#else
+       .usersize         = offsetof(struct xt_rateinfo, prev),
 #endif
        .me               = THIS_MODULE,
 };
index 57efb703ff18019ec0f341c56062f9a6b264cd79..1ef99151b3ba98da41e8af2bfbb5d85948608701 100644 (file)
@@ -33,8 +33,7 @@ pkttype_mt(const struct sk_buff *skb, struct xt_action_param *par)
        else if (xt_family(par) == NFPROTO_IPV4 &&
            ipv4_is_multicast(ip_hdr(skb)->daddr))
                type = PACKET_MULTICAST;
-       else if (xt_family(par) == NFPROTO_IPV6 &&
-           ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF)
+       else if (xt_family(par) == NFPROTO_IPV6)
                type = PACKET_MULTICAST;
        else
                type = PACKET_BROADCAST;
index 44c8eb4c9d6680aef8f3473ca0b677f3888a68ca..10d61a6eed712442c14cc1011341c2dff5890c2d 100644 (file)
@@ -73,6 +73,7 @@ static struct xt_match quota_mt_reg __read_mostly = {
        .checkentry = quota_mt_check,
        .destroy    = quota_mt_destroy,
        .matchsize  = sizeof(struct xt_quota_info),
+       .usersize   = offsetof(struct xt_quota_info, master),
        .me         = THIS_MODULE,
 };
 
index 1db02f6fca54d7eb5d60c2d8c5cb8ab11656fbcb..755d2f6693a2ff39b975996b8d0a2fb65073a812 100644 (file)
@@ -133,6 +133,7 @@ static struct xt_match xt_rateest_mt_reg __read_mostly = {
        .checkentry = xt_rateest_mt_checkentry,
        .destroy    = xt_rateest_mt_destroy,
        .matchsize  = sizeof(struct xt_rateest_match_info),
+       .usersize   = offsetof(struct xt_rateest_match_info, est1),
        .me         = THIS_MODULE,
 };
 
index 0bc3460319c8bd565cc11564180d566f3f89488e..423293ee57c22f1bc4ed445e525c11eadc2c2272 100644 (file)
@@ -77,6 +77,7 @@ static struct xt_match xt_string_mt_reg __read_mostly = {
        .match      = string_mt,
        .destroy    = string_mt_destroy,
        .matchsize  = sizeof(struct xt_string_info),
+       .usersize   = offsetof(struct xt_string_info, config),
        .me         = THIS_MODULE,
 };
 
index 161b628ab2b08bf4321dbe617022c4c50486534d..7b73c7c161a9680b8691a712c31073b7789620f7 100644 (file)
@@ -1210,9 +1210,9 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
                skb = nskb;
        }
 
-       if (!pskb_expand_head(skb, 0, -delta, allocation))
-               skb->truesize -= delta;
-
+       pskb_expand_head(skb, 0, -delta,
+                        (allocation & ~__GFP_DIRECT_RECLAIM) |
+                        __GFP_NOWARN | __GFP_NORETRY);
        return skb;
 }
 
index 514f7bcf7c63ce0f3148e42726df74ff77c0c8af..b1beb2b94ec76c6a8e415019439d09e47d11a7df 100644 (file)
@@ -1074,6 +1074,8 @@ static int execute_masked_set_action(struct sk_buff *skb,
        case OVS_KEY_ATTR_CT_ZONE:
        case OVS_KEY_ATTR_CT_MARK:
        case OVS_KEY_ATTR_CT_LABELS:
+       case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4:
+       case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6:
                err = -EINVAL;
                break;
        }
@@ -1141,12 +1143,6 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
                              struct sw_flow_key *key,
                              const struct nlattr *attr, int len)
 {
-       /* Every output action needs a separate clone of 'skb', but the common
-        * case is just a single output action, so that doing a clone and
-        * then freeing the original skbuff is wasteful.  So the following code
-        * is slightly obscure just to avoid that.
-        */
-       int prev_port = -1;
        const struct nlattr *a;
        int rem;
 
@@ -1154,20 +1150,28 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
             a = nla_next(a, &rem)) {
                int err = 0;
 
-               if (unlikely(prev_port != -1)) {
-                       struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
-
-                       if (out_skb)
-                               do_output(dp, out_skb, prev_port, key);
+               switch (nla_type(a)) {
+               case OVS_ACTION_ATTR_OUTPUT: {
+                       int port = nla_get_u32(a);
+                       struct sk_buff *clone;
+
+                       /* Every output action needs a separate clone
+                        * of 'skb', In case the output action is the
+                        * last action, cloning can be avoided.
+                        */
+                       if (nla_is_last(a, rem)) {
+                               do_output(dp, skb, port, key);
+                               /* 'skb' has been used for output.
+                                */
+                               return 0;
+                       }
 
+                       clone = skb_clone(skb, GFP_ATOMIC);
+                       if (clone)
+                               do_output(dp, clone, port, key);
                        OVS_CB(skb)->cutlen = 0;
-                       prev_port = -1;
-               }
-
-               switch (nla_type(a)) {
-               case OVS_ACTION_ATTR_OUTPUT:
-                       prev_port = nla_get_u32(a);
                        break;
+               }
 
                case OVS_ACTION_ATTR_TRUNC: {
                        struct ovs_action_trunc *trunc = nla_data(a);
@@ -1257,11 +1261,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
                }
        }
 
-       if (prev_port != -1)
-               do_output(dp, skb, prev_port, key);
-       else
-               consume_skb(skb);
-
+       consume_skb(skb);
        return 0;
 }
 
index 54253ea5976e694ba93a9145e575197fce9df69a..c2d452eab0c5d7a240795d53c9c694ea635d8487 100644 (file)
@@ -65,6 +65,7 @@ struct ovs_conntrack_info {
        struct nf_conn *ct;
        u8 commit : 1;
        u8 nat : 3;                 /* enum ovs_ct_nat */
+       u8 force : 1;
        u16 family;
        struct md_mark mark;
        struct md_labels labels;
@@ -73,6 +74,8 @@ struct ovs_conntrack_info {
 #endif
 };
 
+static bool labels_nonzero(const struct ovs_key_ct_labels *labels);
+
 static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info);
 
 static u16 key_to_nfproto(const struct sw_flow_key *key)
@@ -129,21 +132,33 @@ static u32 ovs_ct_get_mark(const struct nf_conn *ct)
 #endif
 }
 
+/* Guard against conntrack labels max size shrinking below 128 bits. */
+#if NF_CT_LABELS_MAX_SIZE < 16
+#error NF_CT_LABELS_MAX_SIZE must be at least 16 bytes
+#endif
+
 static void ovs_ct_get_labels(const struct nf_conn *ct,
                              struct ovs_key_ct_labels *labels)
 {
        struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
 
-       if (cl) {
-               size_t len = sizeof(cl->bits);
+       if (cl)
+               memcpy(labels, cl->bits, OVS_CT_LABELS_LEN);
+       else
+               memset(labels, 0, OVS_CT_LABELS_LEN);
+}
 
-               if (len > OVS_CT_LABELS_LEN)
-                       len = OVS_CT_LABELS_LEN;
-               else if (len < OVS_CT_LABELS_LEN)
-                       memset(labels, 0, OVS_CT_LABELS_LEN);
-               memcpy(labels, cl->bits, len);
+static void __ovs_ct_update_key_orig_tp(struct sw_flow_key *key,
+                                       const struct nf_conntrack_tuple *orig,
+                                       u8 icmp_proto)
+{
+       key->ct_orig_proto = orig->dst.protonum;
+       if (orig->dst.protonum == icmp_proto) {
+               key->ct.orig_tp.src = htons(orig->dst.u.icmp.type);
+               key->ct.orig_tp.dst = htons(orig->dst.u.icmp.code);
        } else {
-               memset(labels, 0, OVS_CT_LABELS_LEN);
+               key->ct.orig_tp.src = orig->src.u.all;
+               key->ct.orig_tp.dst = orig->dst.u.all;
        }
 }
 
@@ -151,13 +166,42 @@ static void __ovs_ct_update_key(struct sw_flow_key *key, u8 state,
                                const struct nf_conntrack_zone *zone,
                                const struct nf_conn *ct)
 {
-       key->ct.state = state;
-       key->ct.zone = zone->id;
+       key->ct_state = state;
+       key->ct_zone = zone->id;
        key->ct.mark = ovs_ct_get_mark(ct);
        ovs_ct_get_labels(ct, &key->ct.labels);
+
+       if (ct) {
+               const struct nf_conntrack_tuple *orig;
+
+               /* Use the master if we have one. */
+               if (ct->master)
+                       ct = ct->master;
+               orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+
+               /* IP version must match with the master connection. */
+               if (key->eth.type == htons(ETH_P_IP) &&
+                   nf_ct_l3num(ct) == NFPROTO_IPV4) {
+                       key->ipv4.ct_orig.src = orig->src.u3.ip;
+                       key->ipv4.ct_orig.dst = orig->dst.u3.ip;
+                       __ovs_ct_update_key_orig_tp(key, orig, IPPROTO_ICMP);
+                       return;
+               } else if (key->eth.type == htons(ETH_P_IPV6) &&
+                          !sw_flow_key_is_nd(key) &&
+                          nf_ct_l3num(ct) == NFPROTO_IPV6) {
+                       key->ipv6.ct_orig.src = orig->src.u3.in6;
+                       key->ipv6.ct_orig.dst = orig->dst.u3.in6;
+                       __ovs_ct_update_key_orig_tp(key, orig, NEXTHDR_ICMP);
+                       return;
+               }
+       }
+       /* Clear 'ct_orig_proto' to mark the non-existence of conntrack
+        * original direction key fields.
+        */
+       key->ct_orig_proto = 0;
 }
 
-/* Update 'key' based on skb->nfct.  If 'post_ct' is true, then OVS has
+/* Update 'key' based on skb->_nfct.  If 'post_ct' is true, then OVS has
  * previously sent the packet to conntrack via the ct action.  If
  * 'keep_nat_flags' is true, the existing NAT flags retained, else they are
  * initialized from the connection status.
@@ -184,7 +228,7 @@ static void ovs_ct_update_key(const struct sk_buff *skb,
                if (ct->master)
                        state |= OVS_CS_F_RELATED;
                if (keep_nat_flags) {
-                       state |= key->ct.state & OVS_CS_F_NAT_MASK;
+                       state |= key->ct_state & OVS_CS_F_NAT_MASK;
                } else {
                        if (ct->status & IPS_SRC_NAT)
                                state |= OVS_CS_F_SRC_NAT;
@@ -208,44 +252,69 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
        ovs_ct_update_key(skb, NULL, key, false, false);
 }
 
-int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb)
+#define IN6_ADDR_INITIALIZER(ADDR) \
+       { (ADDR).s6_addr32[0], (ADDR).s6_addr32[1], \
+         (ADDR).s6_addr32[2], (ADDR).s6_addr32[3] }
+
+int ovs_ct_put_key(const struct sw_flow_key *swkey,
+                  const struct sw_flow_key *output, struct sk_buff *skb)
 {
-       if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state))
+       if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, output->ct_state))
                return -EMSGSIZE;
 
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
-           nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, key->ct.zone))
+           nla_put_u16(skb, OVS_KEY_ATTR_CT_ZONE, output->ct_zone))
                return -EMSGSIZE;
 
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
-           nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, key->ct.mark))
+           nla_put_u32(skb, OVS_KEY_ATTR_CT_MARK, output->ct.mark))
                return -EMSGSIZE;
 
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
-           nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(key->ct.labels),
-                   &key->ct.labels))
+           nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(output->ct.labels),
+                   &output->ct.labels))
                return -EMSGSIZE;
 
+       if (swkey->ct_orig_proto) {
+               if (swkey->eth.type == htons(ETH_P_IP)) {
+                       struct ovs_key_ct_tuple_ipv4 orig = {
+                               output->ipv4.ct_orig.src,
+                               output->ipv4.ct_orig.dst,
+                               output->ct.orig_tp.src,
+                               output->ct.orig_tp.dst,
+                               output->ct_orig_proto,
+                       };
+                       if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4,
+                                   sizeof(orig), &orig))
+                               return -EMSGSIZE;
+               } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+                       struct ovs_key_ct_tuple_ipv6 orig = {
+                               IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.src),
+                               IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.dst),
+                               output->ct.orig_tp.src,
+                               output->ct.orig_tp.dst,
+                               output->ct_orig_proto,
+                       };
+                       if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6,
+                                   sizeof(orig), &orig))
+                               return -EMSGSIZE;
+               }
+       }
+
        return 0;
 }
 
-static int ovs_ct_set_mark(struct sk_buff *skb, struct sw_flow_key *key,
+static int ovs_ct_set_mark(struct nf_conn *ct, struct sw_flow_key *key,
                           u32 ct_mark, u32 mask)
 {
 #if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
-       enum ip_conntrack_info ctinfo;
-       struct nf_conn *ct;
        u32 new_mark;
 
-       /* The connection could be invalid, in which case set_mark is no-op. */
-       ct = nf_ct_get(skb, &ctinfo);
-       if (!ct)
-               return 0;
-
        new_mark = ct_mark | (ct->mark & ~(mask));
        if (ct->mark != new_mark) {
                ct->mark = new_mark;
-               nf_conntrack_event_cache(IPCT_MARK, ct);
+               if (nf_ct_is_confirmed(ct))
+                       nf_conntrack_event_cache(IPCT_MARK, ct);
                key->ct.mark = new_mark;
        }
 
@@ -255,34 +324,80 @@ static int ovs_ct_set_mark(struct sk_buff *skb, struct sw_flow_key *key,
 #endif
 }
 
-static int ovs_ct_set_labels(struct sk_buff *skb, struct sw_flow_key *key,
-                            const struct ovs_key_ct_labels *labels,
-                            const struct ovs_key_ct_labels *mask)
+static struct nf_conn_labels *ovs_ct_get_conn_labels(struct nf_conn *ct)
 {
-       enum ip_conntrack_info ctinfo;
        struct nf_conn_labels *cl;
-       struct nf_conn *ct;
-       int err;
-
-       /* The connection could be invalid, in which case set_label is no-op.*/
-       ct = nf_ct_get(skb, &ctinfo);
-       if (!ct)
-               return 0;
 
        cl = nf_ct_labels_find(ct);
        if (!cl) {
                nf_ct_labels_ext_add(ct);
                cl = nf_ct_labels_find(ct);
        }
-       if (!cl || sizeof(cl->bits) < OVS_CT_LABELS_LEN)
+
+       return cl;
+}
+
+/* Initialize labels for a new, yet to be committed conntrack entry.  Note that
+ * since the new connection is not yet confirmed, and thus no-one else has
+ * access to it's labels, we simply write them over.  Also, we refrain from
+ * triggering events, as receiving change events before the create event would
+ * be confusing.
+ */
+static int ovs_ct_init_labels(struct nf_conn *ct, struct sw_flow_key *key,
+                             const struct ovs_key_ct_labels *labels,
+                             const struct ovs_key_ct_labels *mask)
+{
+       struct nf_conn_labels *cl, *master_cl;
+       bool have_mask = labels_nonzero(mask);
+
+       /* Inherit master's labels to the related connection? */
+       master_cl = ct->master ? nf_ct_labels_find(ct->master) : NULL;
+
+       if (!master_cl && !have_mask)
+               return 0;   /* Nothing to do. */
+
+       cl = ovs_ct_get_conn_labels(ct);
+       if (!cl)
+               return -ENOSPC;
+
+       /* Inherit the master's labels, if any. */
+       if (master_cl)
+               *cl = *master_cl;
+
+       if (have_mask) {
+               u32 *dst = (u32 *)cl->bits;
+               int i;
+
+               for (i = 0; i < OVS_CT_LABELS_LEN_32; i++)
+                       dst[i] = (dst[i] & ~mask->ct_labels_32[i]) |
+                               (labels->ct_labels_32[i]
+                                & mask->ct_labels_32[i]);
+       }
+
+       memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
+
+       return 0;
+}
+
+static int ovs_ct_set_labels(struct nf_conn *ct, struct sw_flow_key *key,
+                            const struct ovs_key_ct_labels *labels,
+                            const struct ovs_key_ct_labels *mask)
+{
+       struct nf_conn_labels *cl;
+       int err;
+
+       cl = ovs_ct_get_conn_labels(ct);
+       if (!cl)
                return -ENOSPC;
 
-       err = nf_connlabels_replace(ct, (u32 *)labels, (u32 *)mask,
-                                   OVS_CT_LABELS_LEN / sizeof(u32));
+       err = nf_connlabels_replace(ct, labels->ct_labels_32,
+                                   mask->ct_labels_32,
+                                   OVS_CT_LABELS_LEN_32);
        if (err)
                return err;
 
-       ovs_ct_get_labels(ct, &key->ct.labels);
+       memcpy(&key->ct.labels, cl->bits, OVS_CT_LABELS_LEN);
+
        return 0;
 }
 
@@ -421,16 +536,16 @@ ovs_ct_get_info(const struct nf_conntrack_tuple_hash *h)
 
 /* Find an existing connection which this packet belongs to without
  * re-attributing statistics or modifying the connection state.  This allows an
- * skb->nfct lost due to an upcall to be recovered during actions execution.
+ * skb->_nfct lost due to an upcall to be recovered during actions execution.
  *
  * Must be called with rcu_read_lock.
  *
- * On success, populates skb->nfct and skb->nfctinfo, and returns the
- * connection.  Returns NULL if there is no existing entry.
+ * On success, populates skb->_nfct and returns the connection.  Returns NULL
+ * if there is no existing entry.
  */
 static struct nf_conn *
 ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
-                    u8 l3num, struct sk_buff *skb)
+                    u8 l3num, struct sk_buff *skb, bool natted)
 {
        struct nf_conntrack_l3proto *l3proto;
        struct nf_conntrack_l4proto *l4proto;
@@ -453,6 +568,17 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
                return NULL;
        }
 
+       /* Must invert the tuple if skb has been transformed by NAT. */
+       if (natted) {
+               struct nf_conntrack_tuple inverse;
+
+               if (!nf_ct_invert_tuple(&inverse, &tuple, l3proto, l4proto)) {
+                       pr_debug("ovs_ct_find_existing: Inversion failed!\n");
+                       return NULL;
+               }
+               tuple = inverse;
+       }
+
        /* look for tuple match */
        h = nf_conntrack_find_get(net, zone, &tuple);
        if (!h)
@@ -460,12 +586,18 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
 
        ct = nf_ct_tuplehash_to_ctrack(h);
 
-       skb->nfct = &ct->ct_general;
-       skb->nfctinfo = ovs_ct_get_info(h);
+       /* Inverted packet tuple matches the reverse direction conntrack tuple,
+        * select the other tuplehash to get the right 'ctinfo' bits for this
+        * packet.
+        */
+       if (natted)
+               h = &ct->tuplehash[!h->tuple.dst.dir];
+
+       nf_ct_set(skb, ct, ovs_ct_get_info(h));
        return ct;
 }
 
-/* Determine whether skb->nfct is equal to the result of conntrack lookup. */
+/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
 static bool skb_nfct_cached(struct net *net,
                            const struct sw_flow_key *key,
                            const struct ovs_conntrack_info *info,
@@ -476,14 +608,19 @@ static bool skb_nfct_cached(struct net *net,
 
        ct = nf_ct_get(skb, &ctinfo);
        /* If no ct, check if we have evidence that an existing conntrack entry
-        * might be found for this skb.  This happens when we lose a skb->nfct
+        * might be found for this skb.  This happens when we lose a skb->_nfct
         * due to an upcall.  If the connection was not confirmed, it is not
         * cached and needs to be run through conntrack again.
         */
-       if (!ct && key->ct.state & OVS_CS_F_TRACKED &&
-           !(key->ct.state & OVS_CS_F_INVALID) &&
-           key->ct.zone == info->zone.id)
-               ct = ovs_ct_find_existing(net, &info->zone, info->family, skb);
+       if (!ct && key->ct_state & OVS_CS_F_TRACKED &&
+           !(key->ct_state & OVS_CS_F_INVALID) &&
+           key->ct_zone == info->zone.id) {
+               ct = ovs_ct_find_existing(net, &info->zone, info->family, skb,
+                                         !!(key->ct_state
+                                            & OVS_CS_F_NAT_MASK));
+               if (ct)
+                       nf_ct_get(skb, &ctinfo);
+       }
        if (!ct)
                return false;
        if (!net_eq(net, read_pnet(&ct->ct_net)))
@@ -497,6 +634,18 @@ static bool skb_nfct_cached(struct net *net,
                if (help && rcu_access_pointer(help->helper) != info->helper)
                        return false;
        }
+       /* Force conntrack entry direction to the current packet? */
+       if (info->force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
+               /* Delete the conntrack entry if confirmed, else just release
+                * the reference.
+                */
+               if (nf_ct_is_confirmed(ct))
+                       nf_ct_delete(ct, 0, 0);
+               else
+                       nf_conntrack_put(&ct->ct_general);
+               nf_ct_set(skb, NULL, 0);
+               return false;
+       }
 
        return true;
 }
@@ -591,7 +740,7 @@ static void ovs_nat_update_key(struct sw_flow_key *key,
        if (maniptype == NF_NAT_MANIP_SRC) {
                __be16 src;
 
-               key->ct.state |= OVS_CS_F_SRC_NAT;
+               key->ct_state |= OVS_CS_F_SRC_NAT;
                if (key->eth.type == htons(ETH_P_IP))
                        key->ipv4.addr.src = ip_hdr(skb)->saddr;
                else if (key->eth.type == htons(ETH_P_IPV6))
@@ -613,7 +762,7 @@ static void ovs_nat_update_key(struct sw_flow_key *key,
        } else {
                __be16 dst;
 
-               key->ct.state |= OVS_CS_F_DST_NAT;
+               key->ct_state |= OVS_CS_F_DST_NAT;
                if (key->eth.type == htons(ETH_P_IP))
                        key->ipv4.addr.dst = ip_hdr(skb)->daddr;
                else if (key->eth.type == htons(ETH_P_IPV6))
@@ -700,7 +849,7 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key,
 /* Pass 'skb' through conntrack in 'net', using zone configured in 'info', if
  * not done already.  Update key with new CT state after passing the packet
  * through conntrack.
- * Note that if the packet is deemed invalid by conntrack, skb->nfct will be
+ * Note that if the packet is deemed invalid by conntrack, skb->_nfct will be
  * set to NULL and 0 will be returned.
  */
 static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
@@ -722,11 +871,10 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
 
                /* Associate skb with specified zone. */
                if (tmpl) {
-                       if (skb->nfct)
-                               nf_conntrack_put(skb->nfct);
+                       if (skb_nfct(skb))
+                               nf_conntrack_put(skb_nfct(skb));
                        nf_conntrack_get(&tmpl->ct_general);
-                       skb->nfct = &tmpl->ct_general;
-                       skb->nfctinfo = IP_CT_NEW;
+                       nf_ct_set(skb, tmpl, IP_CT_NEW);
                }
 
                err = nf_conntrack_in(net, info->family,
@@ -738,7 +886,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
                 * NAT after the nf_conntrack_in() call.  We can actually clear
                 * the whole state, as it will be re-initialized below.
                 */
-               key->ct.state = 0;
+               key->ct_state = 0;
 
                /* Update the key, but keep the NAT flags. */
                ovs_ct_update_key(skb, info, key, true, true);
@@ -754,9 +902,9 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
                 *
                 * NAT will be done only if the CT action has NAT, and only
                 * once per packet (per zone), as guarded by the NAT bits in
-                * the key->ct.state.
+                * the key->ct_state.
                 */
-               if (info->nat && !(key->ct.state & OVS_CS_F_NAT_MASK) &&
+               if (info->nat && !(key->ct_state & OVS_CS_F_NAT_MASK) &&
                    (nf_ct_is_confirmed(ct) || info->commit) &&
                    ovs_ct_nat(net, key, info, skb, ct, ctinfo) != NF_ACCEPT) {
                        return -EINVAL;
@@ -820,7 +968,7 @@ static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
                if (err)
                        return err;
 
-               ct = (struct nf_conn *)skb->nfct;
+               ct = (struct nf_conn *)skb_nfct(skb);
                if (ct)
                        nf_ct_deliver_cached_events(ct);
        }
@@ -832,8 +980,8 @@ static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
 {
        size_t i;
 
-       for (i = 0; i < sizeof(*labels); i++)
-               if (labels->ct_labels[i])
+       for (i = 0; i < OVS_CT_LABELS_LEN_32; i++)
+               if (labels->ct_labels_32[i])
                        return true;
 
        return false;
@@ -844,24 +992,36 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
                         const struct ovs_conntrack_info *info,
                         struct sk_buff *skb)
 {
+       enum ip_conntrack_info ctinfo;
+       struct nf_conn *ct;
        int err;
 
        err = __ovs_ct_lookup(net, key, info, skb);
        if (err)
                return err;
 
+       /* The connection could be invalid, in which case this is a no-op.*/
+       ct = nf_ct_get(skb, &ctinfo);
+       if (!ct)
+               return 0;
+
        /* Apply changes before confirming the connection so that the initial
         * conntrack NEW netlink event carries the values given in the CT
         * action.
         */
        if (info->mark.mask) {
-               err = ovs_ct_set_mark(skb, key, info->mark.value,
+               err = ovs_ct_set_mark(ct, key, info->mark.value,
                                      info->mark.mask);
                if (err)
                        return err;
        }
-       if (labels_nonzero(&info->labels.mask)) {
-               err = ovs_ct_set_labels(skb, key, &info->labels.value,
+       if (!nf_ct_is_confirmed(ct)) {
+               err = ovs_ct_init_labels(ct, key, &info->labels.value,
+                                        &info->labels.mask);
+               if (err)
+                       return err;
+       } else if (labels_nonzero(&info->labels.mask)) {
+               err = ovs_ct_set_labels(ct, key, &info->labels.value,
                                        &info->labels.mask);
                if (err)
                        return err;
@@ -1063,6 +1223,7 @@ static int parse_nat(const struct nlattr *attr,
 
 static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
        [OVS_CT_ATTR_COMMIT]    = { .minlen = 0, .maxlen = 0 },
+       [OVS_CT_ATTR_FORCE_COMMIT]      = { .minlen = 0, .maxlen = 0 },
        [OVS_CT_ATTR_ZONE]      = { .minlen = sizeof(u16),
                                    .maxlen = sizeof(u16) },
        [OVS_CT_ATTR_MARK]      = { .minlen = sizeof(struct md_mark),
@@ -1102,6 +1263,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
                }
 
                switch (type) {
+               case OVS_CT_ATTR_FORCE_COMMIT:
+                       info->force = true;
+                       /* fall through. */
                case OVS_CT_ATTR_COMMIT:
                        info->commit = true;
                        break;
@@ -1328,7 +1492,9 @@ int ovs_ct_action_to_attr(const struct ovs_conntrack_info *ct_info,
        if (!start)
                return -EMSGSIZE;
 
-       if (ct_info->commit && nla_put_flag(skb, OVS_CT_ATTR_COMMIT))
+       if (ct_info->commit && nla_put_flag(skb, ct_info->force
+                                           ? OVS_CT_ATTR_FORCE_COMMIT
+                                           : OVS_CT_ATTR_COMMIT))
                return -EMSGSIZE;
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
            nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
index 8f6230bd618333561b2c4636a3f8f52356a6bd6c..bc7efd1867ab4cf5b81680ae60061775b09fdb34 100644 (file)
@@ -32,7 +32,8 @@ int ovs_ct_execute(struct net *, struct sk_buff *, struct sw_flow_key *,
                   const struct ovs_conntrack_info *);
 
 void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key);
-int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb);
+int ovs_ct_put_key(const struct sw_flow_key *swkey,
+                  const struct sw_flow_key *output, struct sk_buff *skb);
 void ovs_ct_free_action(const struct nlattr *a);
 
 #define CT_SUPPORTED_MASK (OVS_CS_F_NEW | OVS_CS_F_ESTABLISHED | \
@@ -75,13 +76,18 @@ static inline int ovs_ct_execute(struct net *net, struct sk_buff *skb,
 static inline void ovs_ct_fill_key(const struct sk_buff *skb,
                                   struct sw_flow_key *key)
 {
-       key->ct.state = 0;
-       key->ct.zone = 0;
+       key->ct_state = 0;
+       key->ct_zone = 0;
        key->ct.mark = 0;
        memset(&key->ct.labels, 0, sizeof(key->ct.labels));
+       /* Clear 'ct_orig_proto' to mark the non-existence of original
+        * direction key fields.
+        */
+       key->ct_orig_proto = 0;
 }
 
-static inline int ovs_ct_put_key(const struct sw_flow_key *key,
+static inline int ovs_ct_put_key(const struct sw_flow_key *swkey,
+                                const struct sw_flow_key *output,
                                 struct sk_buff *skb)
 {
        return 0;
index 2c0a00f7f1b7d195b98f70e6ec0235b650a63311..9d4bb8eb63f25c2e9e9e5f4190e6c943a32be547 100644 (file)
@@ -765,7 +765,7 @@ static int key_extract_mac_proto(struct sk_buff *skb)
 int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
                         struct sk_buff *skb, struct sw_flow_key *key)
 {
-       int res;
+       int res, err;
 
        /* Extract metadata from packet. */
        if (tun_info) {
@@ -792,7 +792,6 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
        key->phy.priority = skb->priority;
        key->phy.in_port = OVS_CB(skb)->input_vport->port_no;
        key->phy.skb_mark = skb->mark;
-       ovs_ct_fill_key(skb, key);
        key->ovs_flow_hash = 0;
        res = key_extract_mac_proto(skb);
        if (res < 0)
@@ -800,17 +799,26 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
        key->mac_proto = res;
        key->recirc_id = 0;
 
-       return key_extract(skb, key);
+       err = key_extract(skb, key);
+       if (!err)
+               ovs_ct_fill_key(skb, key);   /* Must be after key_extract(). */
+       return err;
 }
 
 int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
                                   struct sk_buff *skb,
                                   struct sw_flow_key *key, bool log)
 {
+       const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
+       u64 attrs = 0;
        int err;
 
+       err = parse_flow_nlattrs(attr, a, &attrs, log);
+       if (err)
+               return -EINVAL;
+
        /* Extract metadata from netlink attributes. */
-       err = ovs_nla_get_flow_metadata(net, attr, key, log);
+       err = ovs_nla_get_flow_metadata(net, a, attrs, key, log);
        if (err)
                return err;
 
@@ -824,5 +832,21 @@ int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr,
         */
 
        skb->protocol = key->eth.type;
-       return key_extract(skb, key);
+       err = key_extract(skb, key);
+       if (err)
+               return err;
+
+       /* Check that we have conntrack original direction tuple metadata only
+        * for packets for which it makes sense.  Otherwise the key may be
+        * corrupted due to overlapping key fields.
+        */
+       if (attrs & (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4) &&
+           key->eth.type != htons(ETH_P_IP))
+               return -EINVAL;
+       if (attrs & (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6) &&
+           (key->eth.type != htons(ETH_P_IPV6) ||
+            sw_flow_key_is_nd(key)))
+               return -EINVAL;
+
+       return 0;
 }
index f61cae7f9030df68c3001e5e63dc2c903b3d1d3f..a9bc1c875965c1814cd816446598e9602e8cc13a 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007-2014 Nicira, Inc.
+ * Copyright (c) 2007-2017 Nicira, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -85,6 +85,11 @@ struct sw_flow_key {
                struct vlan_head cvlan;
                __be16 type;            /* Ethernet frame type. */
        } eth;
+       /* Filling a hole of two bytes. */
+       u8 ct_state;
+       u8 ct_orig_proto;               /* CT original direction tuple IP
+                                        * protocol.
+                                        */
        union {
                struct {
                        __be32 top_lse; /* top label stack entry */
@@ -96,6 +101,7 @@ struct sw_flow_key {
                        u8     frag;    /* One of OVS_FRAG_TYPE_*. */
                } ip;
        };
+       u16 ct_zone;                    /* Conntrack zone. */
        struct {
                __be16 src;             /* TCP/UDP/SCTP source port. */
                __be16 dst;             /* TCP/UDP/SCTP destination port. */
@@ -107,10 +113,16 @@ struct sw_flow_key {
                                __be32 src;     /* IP source address. */
                                __be32 dst;     /* IP destination address. */
                        } addr;
-                       struct {
-                               u8 sha[ETH_ALEN];       /* ARP source hardware address. */
-                               u8 tha[ETH_ALEN];       /* ARP target hardware address. */
-                       } arp;
+                       union {
+                               struct {
+                                       __be32 src;
+                                       __be32 dst;
+                               } ct_orig;      /* Conntrack original direction fields. */
+                               struct {
+                                       u8 sha[ETH_ALEN];       /* ARP source hardware address. */
+                                       u8 tha[ETH_ALEN];       /* ARP target hardware address. */
+                               } arp;
+                       };
                } ipv4;
                struct {
                        struct {
@@ -118,23 +130,40 @@ struct sw_flow_key {
                                struct in6_addr dst;    /* IPv6 destination address. */
                        } addr;
                        __be32 label;                   /* IPv6 flow label. */
-                       struct {
-                               struct in6_addr target; /* ND target address. */
-                               u8 sll[ETH_ALEN];       /* ND source link layer address. */
-                               u8 tll[ETH_ALEN];       /* ND target link layer address. */
-                       } nd;
+                       union {
+                               struct {
+                                       struct in6_addr src;
+                                       struct in6_addr dst;
+                               } ct_orig;      /* Conntrack original direction fields. */
+                               struct {
+                                       struct in6_addr target; /* ND target address. */
+                                       u8 sll[ETH_ALEN];       /* ND source link layer address. */
+                                       u8 tll[ETH_ALEN];       /* ND target link layer address. */
+                               } nd;
+                       };
                } ipv6;
        };
        struct {
-               /* Connection tracking fields. */
-               u16 zone;
+               /* Connection tracking fields not packed above. */
+               struct {
+                       __be16 src;     /* CT orig tuple tp src port. */
+                       __be16 dst;     /* CT orig tuple tp dst port. */
+               } orig_tp;
                u32 mark;
-               u8 state;
                struct ovs_key_ct_labels labels;
        } ct;
 
 } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
 
+static inline bool sw_flow_key_is_nd(const struct sw_flow_key *key)
+{
+       return key->eth.type == htons(ETH_P_IPV6) &&
+               key->ip.proto == NEXTHDR_ICMP &&
+               key->tp.dst == 0 &&
+               (key->tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
+                key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT));
+}
+
 struct sw_flow_key_range {
        unsigned short int start;
        unsigned short int end;
index c87d359b9b37a07711c65e61a1cb882148f76ee0..6f5fa50f716d066333b30edde43e5165b9fe94be 100644 (file)
@@ -129,7 +129,9 @@ static bool match_validate(const struct sw_flow_match *match,
        /* The following mask attributes allowed only if they
         * pass the validation tests. */
        mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4)
+                       | (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)
                        | (1 << OVS_KEY_ATTR_IPV6)
+                       | (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)
                        | (1 << OVS_KEY_ATTR_TCP)
                        | (1 << OVS_KEY_ATTR_TCP_FLAGS)
                        | (1 << OVS_KEY_ATTR_UDP)
@@ -161,8 +163,10 @@ static bool match_validate(const struct sw_flow_match *match,
 
        if (match->key->eth.type == htons(ETH_P_IP)) {
                key_expected |= 1 << OVS_KEY_ATTR_IPV4;
-               if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+               if (match->mask && match->mask->key.eth.type == htons(0xffff)) {
                        mask_allowed |= 1 << OVS_KEY_ATTR_IPV4;
+                       mask_allowed |= 1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4;
+               }
 
                if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
                        if (match->key->ip.proto == IPPROTO_UDP) {
@@ -196,8 +200,10 @@ static bool match_validate(const struct sw_flow_match *match,
 
        if (match->key->eth.type == htons(ETH_P_IPV6)) {
                key_expected |= 1 << OVS_KEY_ATTR_IPV6;
-               if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+               if (match->mask && match->mask->key.eth.type == htons(0xffff)) {
                        mask_allowed |= 1 << OVS_KEY_ATTR_IPV6;
+                       mask_allowed |= 1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6;
+               }
 
                if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
                        if (match->key->ip.proto == IPPROTO_UDP) {
@@ -230,6 +236,12 @@ static bool match_validate(const struct sw_flow_match *match,
                                                htons(NDISC_NEIGHBOUR_SOLICITATION) ||
                                    match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
                                        key_expected |= 1 << OVS_KEY_ATTR_ND;
+                                       /* Original direction conntrack tuple
+                                        * uses the same space as the ND fields
+                                        * in the key, so both are not allowed
+                                        * at the same time.
+                                        */
+                                       mask_allowed &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
                                        if (match->mask && (match->mask->key.tp.src == htons(0xff)))
                                                mask_allowed |= 1 << OVS_KEY_ATTR_ND;
                                }
@@ -282,7 +294,7 @@ size_t ovs_key_attr_size(void)
        /* Whenever adding new OVS_KEY_ FIELDS, we should consider
         * updating this function.
         */
-       BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 26);
+       BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 28);
 
        return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
                + nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
@@ -295,6 +307,7 @@ size_t ovs_key_attr_size(void)
                + nla_total_size(2)   /* OVS_KEY_ATTR_CT_ZONE */
                + nla_total_size(4)   /* OVS_KEY_ATTR_CT_MARK */
                + nla_total_size(16)  /* OVS_KEY_ATTR_CT_LABELS */
+               + nla_total_size(40)  /* OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6 */
                + nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
                + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
                + nla_total_size(4)   /* OVS_KEY_ATTR_VLAN */
@@ -355,6 +368,10 @@ static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
        [OVS_KEY_ATTR_CT_ZONE]   = { .len = sizeof(u16) },
        [OVS_KEY_ATTR_CT_MARK]   = { .len = sizeof(u32) },
        [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
+       [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4] = {
+               .len = sizeof(struct ovs_key_ct_tuple_ipv4) },
+       [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6] = {
+               .len = sizeof(struct ovs_key_ct_tuple_ipv6) },
 };
 
 static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
@@ -430,9 +447,8 @@ static int parse_flow_mask_nlattrs(const struct nlattr *attr,
        return __parse_flow_nlattrs(attr, a, attrsp, log, true);
 }
 
-static int parse_flow_nlattrs(const struct nlattr *attr,
-                             const struct nlattr *a[], u64 *attrsp,
-                             bool log)
+int parse_flow_nlattrs(const struct nlattr *attr, const struct nlattr *a[],
+                      u64 *attrsp, bool log)
 {
        return __parse_flow_nlattrs(attr, a, attrsp, log, false);
 }
@@ -1056,14 +1072,14 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
                        return -EINVAL;
                }
 
-               SW_FLOW_KEY_PUT(match, ct.state, ct_state, is_mask);
+               SW_FLOW_KEY_PUT(match, ct_state, ct_state, is_mask);
                *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE);
        }
        if (*attrs & (1 << OVS_KEY_ATTR_CT_ZONE) &&
            ovs_ct_verify(net, OVS_KEY_ATTR_CT_ZONE)) {
                u16 ct_zone = nla_get_u16(a[OVS_KEY_ATTR_CT_ZONE]);
 
-               SW_FLOW_KEY_PUT(match, ct.zone, ct_zone, is_mask);
+               SW_FLOW_KEY_PUT(match, ct_zone, ct_zone, is_mask);
                *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ZONE);
        }
        if (*attrs & (1 << OVS_KEY_ATTR_CT_MARK) &&
@@ -1082,6 +1098,34 @@ static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
                                   sizeof(*cl), is_mask);
                *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS);
        }
+       if (*attrs & (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4)) {
+               const struct ovs_key_ct_tuple_ipv4 *ct;
+
+               ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4]);
+
+               SW_FLOW_KEY_PUT(match, ipv4.ct_orig.src, ct->ipv4_src, is_mask);
+               SW_FLOW_KEY_PUT(match, ipv4.ct_orig.dst, ct->ipv4_dst, is_mask);
+               SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask);
+               SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask);
+               SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv4_proto, is_mask);
+               *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4);
+       }
+       if (*attrs & (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6)) {
+               const struct ovs_key_ct_tuple_ipv6 *ct;
+
+               ct = nla_data(a[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6]);
+
+               SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.src, &ct->ipv6_src,
+                                  sizeof(match->key->ipv6.ct_orig.src),
+                                  is_mask);
+               SW_FLOW_KEY_MEMCPY(match, ipv6.ct_orig.dst, &ct->ipv6_dst,
+                                  sizeof(match->key->ipv6.ct_orig.dst),
+                                  is_mask);
+               SW_FLOW_KEY_PUT(match, ct.orig_tp.src, ct->src_port, is_mask);
+               SW_FLOW_KEY_PUT(match, ct.orig_tp.dst, ct->dst_port, is_mask);
+               SW_FLOW_KEY_PUT(match, ct_orig_proto, ct->ipv6_proto, is_mask);
+               *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6);
+       }
 
        /* For layer 3 packets the Ethernet type is provided
         * and treated as metadata but no MAC addresses are provided.
@@ -1493,9 +1537,12 @@ u32 ovs_nla_get_ufid_flags(const struct nlattr *attr)
 
 /**
  * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
- * @key: Receives extracted in_port, priority, tun_key and skb_mark.
- * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
- * sequence.
+ * @net: Network namespace.
+ * @key: Receives extracted in_port, priority, tun_key, skb_mark and conntrack
+ * metadata.
+ * @a: Array of netlink attributes holding parsed %OVS_KEY_ATTR_* Netlink
+ * attributes.
+ * @attrs: Bit mask for the netlink attributes included in @a.
  * @log: Boolean to allow kernel error logging.  Normally true, but when
  * probing for feature compatibility this should be passed in as false to
  * suppress unnecessary error logging.
@@ -1504,25 +1551,26 @@ u32 ovs_nla_get_ufid_flags(const struct nlattr *attr)
  * take the same form accepted by flow_from_nlattrs(), but only enough of it to
  * get the metadata, that is, the parts of the flow key that cannot be
  * extracted from the packet itself.
+ *
+ * This must be called before the packet key fields are filled in 'key'.
  */
 
-int ovs_nla_get_flow_metadata(struct net *net, const struct nlattr *attr,
-                             struct sw_flow_key *key,
-                             bool log)
+int ovs_nla_get_flow_metadata(struct net *net,
+                             const struct nlattr *a[OVS_KEY_ATTR_MAX + 1],
+                             u64 attrs, struct sw_flow_key *key, bool log)
 {
-       const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
        struct sw_flow_match match;
-       u64 attrs = 0;
-       int err;
-
-       err = parse_flow_nlattrs(attr, a, &attrs, log);
-       if (err)
-               return -EINVAL;
 
        memset(&match, 0, sizeof(match));
        match.key = key;
 
+       key->ct_state = 0;
+       key->ct_zone = 0;
+       key->ct_orig_proto = 0;
        memset(&key->ct, 0, sizeof(key->ct));
+       memset(&key->ipv4.ct_orig, 0, sizeof(key->ipv4.ct_orig));
+       memset(&key->ipv6.ct_orig, 0, sizeof(key->ipv6.ct_orig));
+
        key->phy.in_port = DP_MAX_PORTS;
 
        return metadata_from_nlattrs(net, &match, &attrs, a, false, log);
@@ -1584,7 +1632,7 @@ static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
        if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
                goto nla_put_failure;
 
-       if (ovs_ct_put_key(output, skb))
+       if (ovs_ct_put_key(swkey, output, skb))
                goto nla_put_failure;
 
        if (ovs_key_mac_proto(swkey) == MAC_PROTO_ETHERNET) {
index 45f9769e5aaccdc46f60eaff5672034b48622ebd..929c665ac3aa99c8eb9635ea72a45c50743d60af 100644 (file)
@@ -46,8 +46,11 @@ void ovs_match_init(struct sw_flow_match *match,
 
 int ovs_nla_put_key(const struct sw_flow_key *, const struct sw_flow_key *,
                    int attr, bool is_mask, struct sk_buff *);
-int ovs_nla_get_flow_metadata(struct net *, const struct nlattr *,
-                             struct sw_flow_key *, bool log);
+int parse_flow_nlattrs(const struct nlattr *attr, const struct nlattr *a[],
+                      u64 *attrsp, bool log);
+int ovs_nla_get_flow_metadata(struct net *net,
+                             const struct nlattr *a[OVS_KEY_ATTR_MAX + 1],
+                             u64 attrs, struct sw_flow_key *key, bool log);
 
 int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb);
 int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb);
index d5d6caecd0726c1223f78031bbd7f8a00b9ce0f9..89193a634da45bb78498ecd28ca39c455e32f2e4 100644 (file)
@@ -97,7 +97,7 @@ static void internal_dev_destructor(struct net_device *dev)
        free_netdev(dev);
 }
 
-static struct rtnl_link_stats64 *
+static void
 internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        int i;
@@ -125,8 +125,6 @@ internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
                stats->tx_bytes         += local_stats.tx_bytes;
                stats->tx_packets       += local_stats.tx_packets;
        }
-
-       return stats;
 }
 
 static void internal_set_rx_headroom(struct net_device *dev, int new_hr)
@@ -151,6 +149,8 @@ static void do_setup(struct net_device *netdev)
 {
        ether_setup(netdev);
 
+       netdev->max_mtu = ETH_MAX_MTU;
+
        netdev->netdev_ops = &internal_dev_netdev_ops;
 
        netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
index 70f5b6a4683c29351284e3385f56caf1bce37ac7..2bd0d1949312c3d71c4b33529316dcfe76fa28f1 100644 (file)
@@ -409,6 +409,9 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
                flush_dcache_page(pgv_to_page(&h.h2->tp_status));
                break;
        case TPACKET_V3:
+               h.h3->tp_status = status;
+               flush_dcache_page(pgv_to_page(&h.h3->tp_status));
+               break;
        default:
                WARN(1, "TPACKET version not supported.\n");
                BUG();
@@ -432,6 +435,8 @@ static int __packet_get_status(struct packet_sock *po, void *frame)
                flush_dcache_page(pgv_to_page(&h.h2->tp_status));
                return h.h2->tp_status;
        case TPACKET_V3:
+               flush_dcache_page(pgv_to_page(&h.h3->tp_status));
+               return h.h3->tp_status;
        default:
                WARN(1, "TPACKET version not supported.\n");
                BUG();
@@ -476,6 +481,9 @@ static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
                h.h2->tp_nsec = ts.tv_nsec;
                break;
        case TPACKET_V3:
+               h.h3->tp_sec = ts.tv_sec;
+               h.h3->tp_nsec = ts.tv_nsec;
+               break;
        default:
                WARN(1, "TPACKET version not supported.\n");
                BUG();
@@ -2510,6 +2518,13 @@ static int tpacket_parse_header(struct packet_sock *po, void *frame,
        ph.raw = frame;
 
        switch (po->tp_version) {
+       case TPACKET_V3:
+               if (ph.h3->tp_next_offset != 0) {
+                       pr_warn_once("variable sized slot not supported");
+                       return -EINVAL;
+               }
+               tp_len = ph.h3->tp_len;
+               break;
        case TPACKET_V2:
                tp_len = ph.h2->tp_len;
                break;
@@ -2529,6 +2544,9 @@ static int tpacket_parse_header(struct packet_sock *po, void *frame,
                off_max = po->tx_ring.frame_size - tp_len;
                if (po->sk.sk_type == SOCK_DGRAM) {
                        switch (po->tp_version) {
+                       case TPACKET_V3:
+                               off = ph.h3->tp_net;
+                               break;
                        case TPACKET_V2:
                                off = ph.h2->tp_net;
                                break;
@@ -2538,6 +2556,9 @@ static int tpacket_parse_header(struct packet_sock *po, void *frame,
                        }
                } else {
                        switch (po->tp_version) {
+                       case TPACKET_V3:
+                               off = ph.h3->tp_mac;
+                               break;
                        case TPACKET_V2:
                                off = ph.h2->tp_mac;
                                break;
@@ -4132,11 +4153,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        struct tpacket_req *req = &req_u->req;
 
        lock_sock(sk);
-       /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
-       if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
-               net_warn_ratelimited("Tx-ring is not supported.\n");
-               goto out;
-       }
 
        rb = tx_ring ? &po->tx_ring : &po->rx_ring;
        rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
@@ -4196,11 +4212,19 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                        goto out;
                switch (po->tp_version) {
                case TPACKET_V3:
-               /* Transmit path is not supported. We checked
-                * it above but just being paranoid
-                */
-                       if (!tx_ring)
+                       /* Block transmit is not supported yet */
+                       if (!tx_ring) {
                                init_prb_bdqc(po, rb, pg_vec, req_u);
+                       } else {
+                               struct tpacket_req3 *req3 = &req_u->req3;
+
+                               if (req3->tp_retire_blk_tov ||
+                                   req3->tp_sizeof_priv ||
+                                   req3->tp_feature_req_word) {
+                                       err = -EINVAL;
+                                       goto out;
+                               }
+                       }
                        break;
                default:
                        break;
index 0ed68f0238bf9416f1cc92928b61a385a1055886..7ef1c881ae7417e500630df398a900dddd598092 100644 (file)
@@ -73,8 +73,7 @@ static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
 {
        struct packet_diag_ring pdr;
 
-       if (!ring->pg_vec || ((ver > TPACKET_V2) &&
-                               (nl_type == PACKET_DIAG_TX_RING)))
+       if (!ring->pg_vec)
                return 0;
 
        pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
diff --git a/net/psample/Kconfig b/net/psample/Kconfig
new file mode 100644 (file)
index 0000000..d850246
--- /dev/null
@@ -0,0 +1,15 @@
+#
+# psample packet sampling configuration
+#
+
+menuconfig PSAMPLE
+       depends on NET
+       tristate "Packet-sampling netlink channel"
+       default n
+       help
+         Say Y here to add support for packet-sampling netlink channel
+         This netlink channel allows transferring packets alongside some
+         metadata to userspace.
+
+         To compile this support as a module, choose M here: the module will
+         be called psample.
diff --git a/net/psample/Makefile b/net/psample/Makefile
new file mode 100644 (file)
index 0000000..609b0a7
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the psample netlink channel
+#
+
+obj-$(CONFIG_PSAMPLE) += psample.o
diff --git a/net/psample/psample.c b/net/psample/psample.c
new file mode 100644 (file)
index 0000000..8aa58a9
--- /dev/null
@@ -0,0 +1,301 @@
+/*
+ * net/psample/psample.c - Netlink channel for packet sampling
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#include <net/psample.h>
+#include <linux/spinlock.h>
+
+#define PSAMPLE_MAX_PACKET_SIZE 0xffff
+
+static LIST_HEAD(psample_groups_list);
+static DEFINE_SPINLOCK(psample_groups_lock);
+
+/* multicast groups */
+enum psample_nl_multicast_groups {
+       PSAMPLE_NL_MCGRP_CONFIG,
+       PSAMPLE_NL_MCGRP_SAMPLE,
+};
+
+static const struct genl_multicast_group psample_nl_mcgrps[] = {
+       [PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
+       [PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
+};
+
+static struct genl_family psample_nl_family __ro_after_init;
+
+static int psample_group_nl_fill(struct sk_buff *msg,
+                                struct psample_group *group,
+                                enum psample_command cmd, u32 portid, u32 seq,
+                                int flags)
+{
+       void *hdr;
+       int ret;
+
+       hdr = genlmsg_put(msg, portid, seq, &psample_nl_family, flags, cmd);
+       if (!hdr)
+               return -EMSGSIZE;
+
+       ret = nla_put_u32(msg, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
+       if (ret < 0)
+               goto error;
+
+       ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_REFCOUNT, group->refcount);
+       if (ret < 0)
+               goto error;
+
+       ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_SEQ, group->seq);
+       if (ret < 0)
+               goto error;
+
+       genlmsg_end(msg, hdr);
+       return 0;
+
+error:
+       genlmsg_cancel(msg, hdr);
+       return -EMSGSIZE;
+}
+
+static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg,
+                                          struct netlink_callback *cb)
+{
+       struct psample_group *group;
+       int start = cb->args[0];
+       int idx = 0;
+       int err;
+
+       spin_lock(&psample_groups_lock);
+       list_for_each_entry(group, &psample_groups_list, list) {
+               if (!net_eq(group->net, sock_net(msg->sk)))
+                       continue;
+               if (idx < start) {
+                       idx++;
+                       continue;
+               }
+               err = psample_group_nl_fill(msg, group, PSAMPLE_CMD_NEW_GROUP,
+                                           NETLINK_CB(cb->skb).portid,
+                                           cb->nlh->nlmsg_seq, NLM_F_MULTI);
+               if (err)
+                       break;
+               idx++;
+       }
+
+       spin_unlock(&psample_groups_lock);
+       cb->args[0] = idx;
+       return msg->len;
+}
+
+static const struct genl_ops psample_nl_ops[] = {
+       {
+               .cmd = PSAMPLE_CMD_GET_GROUP,
+               .dumpit = psample_nl_cmd_get_group_dumpit,
+               /* can be retrieved by unprivileged users */
+       }
+};
+
+static struct genl_family psample_nl_family __ro_after_init = {
+       .name           = PSAMPLE_GENL_NAME,
+       .version        = PSAMPLE_GENL_VERSION,
+       .maxattr        = PSAMPLE_ATTR_MAX,
+       .netnsok        = true,
+       .module         = THIS_MODULE,
+       .mcgrps         = psample_nl_mcgrps,
+       .ops            = psample_nl_ops,
+       .n_ops          = ARRAY_SIZE(psample_nl_ops),
+       .n_mcgrps       = ARRAY_SIZE(psample_nl_mcgrps),
+};
+
+static void psample_group_notify(struct psample_group *group,
+                                enum psample_command cmd)
+{
+       struct sk_buff *msg;
+       int err;
+
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+       if (!msg)
+               return;
+
+       err = psample_group_nl_fill(msg, group, cmd, 0, 0, NLM_F_MULTI);
+       if (!err)
+               genlmsg_multicast_netns(&psample_nl_family, group->net, msg, 0,
+                                       PSAMPLE_NL_MCGRP_CONFIG, GFP_ATOMIC);
+       else
+               nlmsg_free(msg);
+}
+
+static struct psample_group *psample_group_create(struct net *net,
+                                                 u32 group_num)
+{
+       struct psample_group *group;
+
+       group = kzalloc(sizeof(*group), GFP_ATOMIC);
+       if (!group)
+               return NULL;
+
+       group->net = net;
+       group->group_num = group_num;
+       list_add_tail(&group->list, &psample_groups_list);
+
+       psample_group_notify(group, PSAMPLE_CMD_NEW_GROUP);
+       return group;
+}
+
+static void psample_group_destroy(struct psample_group *group)
+{
+       psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
+       list_del(&group->list);
+       kfree(group);
+}
+
+static struct psample_group *
+psample_group_lookup(struct net *net, u32 group_num)
+{
+       struct psample_group *group;
+
+       list_for_each_entry(group, &psample_groups_list, list)
+               if ((group->group_num == group_num) && (group->net == net))
+                       return group;
+       return NULL;
+}
+
+struct psample_group *psample_group_get(struct net *net, u32 group_num)
+{
+       struct psample_group *group;
+
+       spin_lock(&psample_groups_lock);
+
+       group = psample_group_lookup(net, group_num);
+       if (!group) {
+               group = psample_group_create(net, group_num);
+               if (!group)
+                       goto out;
+       }
+       group->refcount++;
+
+out:
+       spin_unlock(&psample_groups_lock);
+       return group;
+}
+EXPORT_SYMBOL_GPL(psample_group_get);
+
+void psample_group_put(struct psample_group *group)
+{
+       spin_lock(&psample_groups_lock);
+
+       if (--group->refcount == 0)
+               psample_group_destroy(group);
+
+       spin_unlock(&psample_groups_lock);
+}
+EXPORT_SYMBOL_GPL(psample_group_put);
+
+void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
+                          u32 trunc_size, int in_ifindex, int out_ifindex,
+                          u32 sample_rate)
+{
+       struct sk_buff *nl_skb;
+       int data_len;
+       int meta_len;
+       void *data;
+       int ret;
+
+       meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) +
+                  (out_ifindex ? nla_total_size(sizeof(u16)) : 0) +
+                  nla_total_size(sizeof(u32)) +        /* sample_rate */
+                  nla_total_size(sizeof(u32)) +        /* orig_size */
+                  nla_total_size(sizeof(u32)) +        /* group_num */
+                  nla_total_size(sizeof(u32));         /* seq */
+
+       data_len = min(skb->len, trunc_size);
+       if (meta_len + nla_total_size(data_len) > PSAMPLE_MAX_PACKET_SIZE)
+               data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
+                           - NLA_ALIGNTO;
+
+       nl_skb = genlmsg_new(meta_len + data_len, GFP_ATOMIC);
+       if (unlikely(!nl_skb))
+               return;
+
+       data = genlmsg_put(nl_skb, 0, 0, &psample_nl_family, 0,
+                          PSAMPLE_CMD_SAMPLE);
+       if (unlikely(!data))
+               goto error;
+
+       if (in_ifindex) {
+               ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_IIFINDEX, in_ifindex);
+               if (unlikely(ret < 0))
+                       goto error;
+       }
+
+       if (out_ifindex) {
+               ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OIFINDEX, out_ifindex);
+               if (unlikely(ret < 0))
+                       goto error;
+       }
+
+       ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_RATE, sample_rate);
+       if (unlikely(ret < 0))
+               goto error;
+
+       ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_ORIGSIZE, skb->len);
+       if (unlikely(ret < 0))
+               goto error;
+
+       ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
+       if (unlikely(ret < 0))
+               goto error;
+
+       ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_GROUP_SEQ, group->seq++);
+       if (unlikely(ret < 0))
+               goto error;
+
+       if (data_len) {
+               int nla_len = nla_total_size(data_len);
+               struct nlattr *nla;
+
+               nla = (struct nlattr *)skb_put(nl_skb, nla_len);
+               nla->nla_type = PSAMPLE_ATTR_DATA;
+               nla->nla_len = nla_attr_size(data_len);
+
+               if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
+                       goto error;
+       }
+
+       genlmsg_end(nl_skb, data);
+       genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
+                               PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC);
+
+       return;
+error:
+       pr_err_ratelimited("Could not create psample log message\n");
+       nlmsg_free(nl_skb);
+}
+EXPORT_SYMBOL_GPL(psample_sample_packet);
+
+static int __init psample_module_init(void)
+{
+       return genl_register_family(&psample_nl_family);
+}
+
+static void __exit psample_module_exit(void)
+{
+       genl_unregister_family(&psample_nl_family);
+}
+
+module_init(psample_module_init);
+module_exit(psample_module_exit);
+
+MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>");
+MODULE_DESCRIPTION("netlink channel for packet sampling");
+MODULE_LICENSE("GPL v2");
index 2ac1e6194be35fced1706ebf5351bedb7cf01411..b405f77d664ca0a10472f78c66294c21fcc03680 100644 (file)
@@ -298,6 +298,33 @@ static int rds_enable_recvtstamp(struct sock *sk, char __user *optval,
        return 0;
 }
 
+static int rds_recv_track_latency(struct rds_sock *rs, char __user *optval,
+                                 int optlen)
+{
+       struct rds_rx_trace_so trace;
+       int i;
+
+       if (optlen != sizeof(struct rds_rx_trace_so))
+               return -EFAULT;
+
+       if (copy_from_user(&trace, optval, sizeof(trace)))
+               return -EFAULT;
+
+       if (trace.rx_traces > RDS_MSG_RX_DGRAM_TRACE_MAX)
+               return -EFAULT;
+
+       rs->rs_rx_traces = trace.rx_traces;
+       for (i = 0; i < rs->rs_rx_traces; i++) {
+               if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) {
+                       rs->rs_rx_traces = 0;
+                       return -EFAULT;
+               }
+               rs->rs_rx_trace[i] = trace.rx_trace_pos[i];
+       }
+
+       return 0;
+}
+
 static int rds_setsockopt(struct socket *sock, int level, int optname,
                          char __user *optval, unsigned int optlen)
 {
@@ -338,6 +365,9 @@ static int rds_setsockopt(struct socket *sock, int level, int optname,
                ret = rds_enable_recvtstamp(sock->sk, optval, optlen);
                release_sock(sock->sk);
                break;
+       case SO_RDS_MSG_RXPATH_LATENCY:
+               ret = rds_recv_track_latency(rs, optval, optlen);
+               break;
        default:
                ret = -ENOPROTOOPT;
        }
@@ -484,6 +514,7 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
        INIT_LIST_HEAD(&rs->rs_cong_list);
        spin_lock_init(&rs->rs_rdma_lock);
        rs->rs_rdma_keys = RB_ROOT;
+       rs->rs_rx_traces = 0;
 
        spin_lock_bh(&rds_sock_lock);
        list_add_tail(&rs->rs_item, &rds_sock_list);
index 095f6ce583fee33eee431dc88e04a26c01db2d4d..3a915bedb76c52995fdf5c09fa747f3a236a169b 100644 (file)
@@ -176,8 +176,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        if (!trans) {
                ret = -EADDRNOTAVAIL;
                rds_remove_bound(rs);
-               printk_ratelimited(KERN_INFO "RDS: rds_bind() could not find a transport, "
-                               "load rds_tcp or rds_rdma?\n");
+               pr_info_ratelimited("RDS: %s could not find a transport for %pI4, load rds_tcp or rds_rdma?\n",
+                                   __func__, &sin->sin_addr.s_addr);
                goto out;
        }
 
index fe9d31c0b22d40ef8b45fd0f17cccbcfd893de16..0e04dcceb1d416438be8bb40fc68253f336f631d 100644 (file)
@@ -545,11 +545,11 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
 }
 EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
 
-void rds_walk_conn_path_info(struct socket *sock, unsigned int len,
-                            struct rds_info_iterator *iter,
-                            struct rds_info_lengths *lens,
-                            int (*visitor)(struct rds_conn_path *, void *),
-                            size_t item_len)
+static void rds_walk_conn_path_info(struct socket *sock, unsigned int len,
+                                   struct rds_info_iterator *iter,
+                                   struct rds_info_lengths *lens,
+                                   int (*visitor)(struct rds_conn_path *, void *),
+                                   size_t item_len)
 {
        u64  buffer[(item_len + 7) / 8];
        struct hlist_head *head;
index 5680d90b0b779ec41d019f1d0797dca7b5072ece..8d70884d7bb60294c1402892bff3ebe4c81d3663 100644 (file)
@@ -111,6 +111,9 @@ static void rds_ib_dev_free(struct work_struct *work)
                kfree(i_ipaddr);
        }
 
+       if (rds_ibdev->vector_load)
+               kfree(rds_ibdev->vector_load);
+
        kfree(rds_ibdev);
 }
 
@@ -159,6 +162,14 @@ static void rds_ib_add_one(struct ib_device *device)
        rds_ibdev->max_initiator_depth = device->attrs.max_qp_init_rd_atom;
        rds_ibdev->max_responder_resources = device->attrs.max_qp_rd_atom;
 
+       rds_ibdev->vector_load = kzalloc(sizeof(int) * device->num_comp_vectors,
+                                        GFP_KERNEL);
+       if (!rds_ibdev->vector_load) {
+               pr_err("RDS/IB: %s failed to allocate vector memory\n",
+                       __func__);
+               goto put_dev;
+       }
+
        rds_ibdev->dev = device;
        rds_ibdev->pd = ib_alloc_pd(device, 0);
        if (IS_ERR(rds_ibdev->pd)) {
index 45ac8e8e58f412267e134bb5ae1fab2b59809c09..540458928f3c8eab4529b4072826b882563c941e 100644 (file)
 
 #define RDS_IB_DEFAULT_RECV_WR         1024
 #define RDS_IB_DEFAULT_SEND_WR         256
-#define RDS_IB_DEFAULT_FR_WR           512
+#define RDS_IB_DEFAULT_FR_WR           256
+#define RDS_IB_DEFAULT_FR_INV_WR       256
 
-#define RDS_IB_DEFAULT_RETRY_COUNT     2
+#define RDS_IB_DEFAULT_RETRY_COUNT     1
 
 #define RDS_IB_SUPPORTED_PROTOCOLS     0x00000003      /* minor versions supported */
 
@@ -125,6 +126,7 @@ struct rds_ib_connection {
 
        /* To control the number of wrs from fastreg */
        atomic_t                i_fastreg_wrs;
+       atomic_t                i_fastunreg_wrs;
 
        /* interrupt handling */
        struct tasklet_struct   i_send_tasklet;
@@ -149,6 +151,7 @@ struct rds_ib_connection {
        u64                     i_ack_recv;     /* last ACK received */
        struct rds_ib_refill_cache i_cache_incs;
        struct rds_ib_refill_cache i_cache_frags;
+       atomic_t                i_cache_allocs;
 
        /* sending acks */
        unsigned long           i_ack_flags;
@@ -179,6 +182,14 @@ struct rds_ib_connection {
 
        /* Batched completions */
        unsigned int            i_unsignaled_wrs;
+
+       /* Endpoint role in connection */
+       bool                    i_active_side;
+       atomic_t                i_cq_quiesce;
+
+       /* Send/Recv vectors */
+       int                     i_scq_vector;
+       int                     i_rcq_vector;
 };
 
 /* This assumes that atomic_t is at least 32 bits */
@@ -221,6 +232,7 @@ struct rds_ib_device {
        spinlock_t              spinlock;       /* protect the above */
        atomic_t                refcount;
        struct work_struct      free_work;
+       int                     *vector_load;
 };
 
 #define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
@@ -249,6 +261,8 @@ struct rds_ib_statistics {
        uint64_t        s_ib_rx_refill_from_cq;
        uint64_t        s_ib_rx_refill_from_thread;
        uint64_t        s_ib_rx_alloc_limit;
+       uint64_t        s_ib_rx_total_frags;
+       uint64_t        s_ib_rx_total_incs;
        uint64_t        s_ib_rx_credit_updates;
        uint64_t        s_ib_ack_sent;
        uint64_t        s_ib_ack_send_failure;
@@ -271,6 +285,8 @@ struct rds_ib_statistics {
        uint64_t        s_ib_rdma_mr_1m_reused;
        uint64_t        s_ib_atomic_cswp;
        uint64_t        s_ib_atomic_fadd;
+       uint64_t        s_ib_recv_added_to_cache;
+       uint64_t        s_ib_recv_removed_from_cache;
 };
 
 extern struct workqueue_struct *rds_ib_wq;
@@ -401,6 +417,8 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
 /* ib_stats.c */
 DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
 #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
+#define rds_ib_stats_add(member, count) \
+               rds_stats_add_which(rds_ib_stats, member, count)
 unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
                                    unsigned int avail);
 
index 5b2ab95afa072f4970e25bbc856e916fe28187aa..ce3775abc6e7a1d30e335aaea749a9840e949786 100644 (file)
@@ -113,24 +113,26 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
        }
 
        if (conn->c_version < RDS_PROTOCOL(3, 1)) {
-               printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
-                      " no longer supported\n",
-                      &conn->c_faddr,
-                      RDS_PROTOCOL_MAJOR(conn->c_version),
-                      RDS_PROTOCOL_MINOR(conn->c_version));
+               pr_notice("RDS/IB: Connection <%pI4,%pI4> version %u.%u no longer supported\n",
+                         &conn->c_laddr, &conn->c_faddr,
+                         RDS_PROTOCOL_MAJOR(conn->c_version),
+                         RDS_PROTOCOL_MINOR(conn->c_version));
                rds_conn_destroy(conn);
                return;
        } else {
-               printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n",
-                      &conn->c_faddr,
-                      RDS_PROTOCOL_MAJOR(conn->c_version),
-                      RDS_PROTOCOL_MINOR(conn->c_version),
-                      ic->i_flowctl ? ", flow control" : "");
+               pr_notice("RDS/IB: %s conn connected <%pI4,%pI4> version %u.%u%s\n",
+                         ic->i_active_side ? "Active" : "Passive",
+                         &conn->c_laddr, &conn->c_faddr,
+                         RDS_PROTOCOL_MAJOR(conn->c_version),
+                         RDS_PROTOCOL_MINOR(conn->c_version),
+                         ic->i_flowctl ? ", flow control" : "");
        }
 
-       /*
-        * Init rings and fill recv. this needs to wait until protocol negotiation
-        * is complete, since ring layout is different from 3.0 to 3.1.
+       atomic_set(&ic->i_cq_quiesce, 0);
+
+       /* Init rings and fill recv. this needs to wait until protocol
+        * negotiation is complete, since ring layout is different
+        * from 3.1 to 4.1.
         */
        rds_ib_send_init_ring(ic);
        rds_ib_recv_init_ring(ic);
@@ -267,6 +269,10 @@ static void rds_ib_tasklet_fn_send(unsigned long data)
 
        rds_ib_stats_inc(s_ib_tasklet_call);
 
+       /* if cq has been already reaped, ignore incoming cq event */
+       if (atomic_read(&ic->i_cq_quiesce))
+               return;
+
        poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
        ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
        poll_scq(ic, ic->i_send_cq, ic->i_send_wc);
@@ -308,6 +314,10 @@ static void rds_ib_tasklet_fn_recv(unsigned long data)
 
        rds_ib_stats_inc(s_ib_tasklet_call);
 
+       /* if cq has been already reaped, ignore incoming cq event */
+       if (atomic_read(&ic->i_cq_quiesce))
+               return;
+
        memset(&state, 0, sizeof(state));
        poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state);
        ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
@@ -358,6 +368,28 @@ static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context)
        tasklet_schedule(&ic->i_send_tasklet);
 }
 
+static inline int ibdev_get_unused_vector(struct rds_ib_device *rds_ibdev)
+{
+       int min = rds_ibdev->vector_load[rds_ibdev->dev->num_comp_vectors - 1];
+       int index = rds_ibdev->dev->num_comp_vectors - 1;
+       int i;
+
+       for (i = rds_ibdev->dev->num_comp_vectors - 1; i >= 0; i--) {
+               if (rds_ibdev->vector_load[i] < min) {
+                       index = i;
+                       min = rds_ibdev->vector_load[i];
+               }
+       }
+
+       rds_ibdev->vector_load[index]++;
+       return index;
+}
+
+static inline void ibdev_put_vector(struct rds_ib_device *rds_ibdev, int index)
+{
+       rds_ibdev->vector_load[index]--;
+}
+
 /*
  * This needs to be very careful to not leave IS_ERR pointers around for
  * cleanup to trip over.
@@ -383,7 +415,10 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
         * completion queue and send queue. This extra space is used for FRMR
         * registration and invalidation work requests
         */
-       fr_queue_space = (rds_ibdev->use_fastreg ? RDS_IB_DEFAULT_FR_WR : 0);
+       fr_queue_space = rds_ibdev->use_fastreg ?
+                        (RDS_IB_DEFAULT_FR_WR + 1) +
+                        (RDS_IB_DEFAULT_FR_INV_WR + 1)
+                        : 0;
 
        /* add the conn now so that connection establishment has the dev */
        rds_ib_add_conn(rds_ibdev, conn);
@@ -396,25 +431,30 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        /* Protection domain and memory range */
        ic->i_pd = rds_ibdev->pd;
 
+       ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev);
        cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1;
-
+       cq_attr.comp_vector = ic->i_scq_vector;
        ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send,
                                     rds_ib_cq_event_handler, conn,
                                     &cq_attr);
        if (IS_ERR(ic->i_send_cq)) {
                ret = PTR_ERR(ic->i_send_cq);
                ic->i_send_cq = NULL;
+               ibdev_put_vector(rds_ibdev, ic->i_scq_vector);
                rdsdebug("ib_create_cq send failed: %d\n", ret);
                goto out;
        }
 
+       ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev);
        cq_attr.cqe = ic->i_recv_ring.w_nr;
+       cq_attr.comp_vector = ic->i_rcq_vector;
        ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv,
                                     rds_ib_cq_event_handler, conn,
                                     &cq_attr);
        if (IS_ERR(ic->i_recv_cq)) {
                ret = PTR_ERR(ic->i_recv_cq);
                ic->i_recv_cq = NULL;
+               ibdev_put_vector(rds_ibdev, ic->i_rcq_vector);
                rdsdebug("ib_create_cq recv failed: %d\n", ret);
                goto out;
        }
@@ -445,6 +485,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
        attr.send_cq = ic->i_send_cq;
        attr.recv_cq = ic->i_recv_cq;
        atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR);
+       atomic_set(&ic->i_fastunreg_wrs, RDS_IB_DEFAULT_FR_INV_WR);
 
        /*
         * XXX this can fail if max_*_wr is too large?  Are we supposed
@@ -682,6 +723,7 @@ out:
                if (ic->i_cm_id == cm_id)
                        ret = 0;
        }
+       ic->i_active_side = true;
        return ret;
 }
 
@@ -767,17 +809,27 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
                wait_event(rds_ib_ring_empty_wait,
                           rds_ib_ring_empty(&ic->i_recv_ring) &&
                           (atomic_read(&ic->i_signaled_sends) == 0) &&
-                          (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR));
+                          (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR) &&
+                          (atomic_read(&ic->i_fastunreg_wrs) == RDS_IB_DEFAULT_FR_INV_WR));
                tasklet_kill(&ic->i_send_tasklet);
                tasklet_kill(&ic->i_recv_tasklet);
 
+               atomic_set(&ic->i_cq_quiesce, 1);
+
                /* first destroy the ib state that generates callbacks */
                if (ic->i_cm_id->qp)
                        rdma_destroy_qp(ic->i_cm_id);
-               if (ic->i_send_cq)
+               if (ic->i_send_cq) {
+                       if (ic->rds_ibdev)
+                               ibdev_put_vector(ic->rds_ibdev, ic->i_scq_vector);
                        ib_destroy_cq(ic->i_send_cq);
-               if (ic->i_recv_cq)
+               }
+
+               if (ic->i_recv_cq) {
+                       if (ic->rds_ibdev)
+                               ibdev_put_vector(ic->rds_ibdev, ic->i_rcq_vector);
                        ib_destroy_cq(ic->i_recv_cq);
+               }
 
                /* then free the resources that ib callbacks use */
                if (ic->i_send_hdrs)
@@ -855,6 +907,7 @@ void rds_ib_conn_path_shutdown(struct rds_conn_path *cp)
        ic->i_sends = NULL;
        vfree(ic->i_recvs);
        ic->i_recvs = NULL;
+       ic->i_active_side = false;
 }
 
 int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
index d921adc62765dba0bbaec9a7489227348405082e..48332a6ed7383c51def7402dcfef1e581fa677f7 100644 (file)
@@ -104,14 +104,15 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
        struct rds_ib_frmr *frmr = &ibmr->u.frmr;
        struct ib_send_wr *failed_wr;
        struct ib_reg_wr reg_wr;
-       int ret;
+       int ret, off = 0;
 
        while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
                atomic_inc(&ibmr->ic->i_fastreg_wrs);
                cpu_relax();
        }
 
-       ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, 0, PAGE_SIZE);
+       ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len,
+                               &off, PAGE_SIZE);
        if (unlikely(ret != ibmr->sg_len))
                return ret < 0 ? ret : -EINVAL;
 
@@ -240,8 +241,8 @@ static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
        if (frmr->fr_state != FRMR_IS_INUSE)
                goto out;
 
-       while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
-               atomic_inc(&ibmr->ic->i_fastreg_wrs);
+       while (atomic_dec_return(&ibmr->ic->i_fastunreg_wrs) <= 0) {
+               atomic_inc(&ibmr->ic->i_fastunreg_wrs);
                cpu_relax();
        }
 
@@ -260,7 +261,7 @@ static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
        if (unlikely(ret)) {
                frmr->fr_state = FRMR_IS_STALE;
                frmr->fr_inv = false;
-               atomic_inc(&ibmr->ic->i_fastreg_wrs);
+               atomic_inc(&ibmr->ic->i_fastunreg_wrs);
                pr_err("RDS/IB: %s returned error(%d)\n", __func__, ret);
                goto out;
        }
@@ -288,9 +289,10 @@ void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
        if (frmr->fr_inv) {
                frmr->fr_state = FRMR_IS_FREE;
                frmr->fr_inv = false;
+               atomic_inc(&ic->i_fastreg_wrs);
+       } else {
+               atomic_inc(&ic->i_fastunreg_wrs);
        }
-
-       atomic_inc(&ic->i_fastreg_wrs);
 }
 
 void rds_ib_unreg_frmr(struct list_head *list, unsigned int *nfreed,
index 606a11f681d28b18879629e758b89f8a9ae8b33f..e10624aa6959b596a2629a9f18bb25504428545f 100644 (file)
@@ -194,6 +194,8 @@ static void rds_ib_frag_free(struct rds_ib_connection *ic,
        rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
 
        rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
+       atomic_add(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
+       rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE);
 }
 
 /* Recycle inc after freeing attached frags */
@@ -261,6 +263,7 @@ static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *i
                        atomic_dec(&rds_ib_allocation);
                        return NULL;
                }
+               rds_ib_stats_inc(s_ib_rx_total_incs);
        }
        INIT_LIST_HEAD(&ibinc->ii_frags);
        rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr);
@@ -278,6 +281,8 @@ static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic
        cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
        if (cache_item) {
                frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
+               atomic_sub(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs);
+               rds_ib_stats_add(s_ib_recv_added_to_cache, RDS_FRAG_SIZE);
        } else {
                frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
                if (!frag)
@@ -290,6 +295,7 @@ static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic
                        kmem_cache_free(rds_ib_frag_slab, frag);
                        return NULL;
                }
+               rds_ib_stats_inc(s_ib_rx_total_frags);
        }
 
        INIT_LIST_HEAD(&frag->f_item);
@@ -905,8 +911,12 @@ static void rds_ib_process_recv(struct rds_connection *conn,
                ic->i_ibinc = ibinc;
 
                hdr = &ibinc->ii_inc.i_hdr;
+               ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
+                               local_clock();
                memcpy(hdr, ihdr, sizeof(*hdr));
                ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
+               ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_START] =
+                               local_clock();
 
                rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
                         ic->i_recv_data_rem, hdr->h_flags);
@@ -980,8 +990,8 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
        } else {
                /* We expect errors as the qp is drained during shutdown */
                if (rds_conn_up(conn) || rds_conn_connecting(conn))
-                       rds_ib_conn_error(conn, "recv completion on %pI4 had status %u (%s), disconnecting and reconnecting\n",
-                                         &conn->c_faddr,
+                       rds_ib_conn_error(conn, "recv completion on <%pI4,%pI4> had status %u (%s), disconnecting and reconnecting\n",
+                                         &conn->c_laddr, &conn->c_faddr,
                                          wc->status,
                                          ib_wc_status_msg(wc->status));
        }
index 84d90c97332f9178552f34f2e13973a351f6213a..6ab39dbcca0197339cb8c2e9ec5676a778c4d23e 100644 (file)
@@ -69,16 +69,6 @@ static void rds_ib_send_complete(struct rds_message *rm,
        complete(rm, notify_status);
 }
 
-static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
-                                  struct rm_data_op *op,
-                                  int wc_status)
-{
-       if (op->op_nents)
-               ib_dma_unmap_sg(ic->i_cm_id->device,
-                               op->op_sg, op->op_nents,
-                               DMA_TO_DEVICE);
-}
-
 static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
                                   struct rm_rdma_op *op,
                                   int wc_status)
@@ -139,6 +129,21 @@ static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
                rds_ib_stats_inc(s_ib_atomic_fadd);
 }
 
+static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
+                                  struct rm_data_op *op,
+                                  int wc_status)
+{
+       struct rds_message *rm = container_of(op, struct rds_message, data);
+
+       if (op->op_nents)
+               ib_dma_unmap_sg(ic->i_cm_id->device,
+                               op->op_sg, op->op_nents,
+                               DMA_TO_DEVICE);
+
+       if (rm->rdma.op_active && rm->data.op_notify)
+               rds_ib_send_unmap_rdma(ic, &rm->rdma, wc_status);
+}
+
 /*
  * Unmap the resources associated with a struct send_work.
  *
@@ -300,8 +305,8 @@ void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc)
 
        /* We expect errors as the qp is drained during shutdown */
        if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
-               rds_ib_conn_error(conn, "send completion on %pI4 had status %u (%s), disconnecting and reconnecting\n",
-                                 &conn->c_faddr, wc->status,
+               rds_ib_conn_error(conn, "send completion on <%pI4,%pI4> had status %u (%s), disconnecting and reconnecting\n",
+                                 &conn->c_laddr, &conn->c_faddr, wc->status,
                                  ib_wc_status_msg(wc->status));
        }
 }
@@ -765,7 +770,6 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
 
        work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
        if (work_alloc != 1) {
-               rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
                rds_ib_stats_inc(s_ib_tx_ring_full);
                ret = -ENOMEM;
                goto out;
index 7e78dca1f252c671741443cdf660f38da6c575a0..9252ad126335971fa202e5aee6e2babb55d969e0 100644 (file)
@@ -55,6 +55,8 @@ static const char *const rds_ib_stat_names[] = {
        "ib_rx_refill_from_cq",
        "ib_rx_refill_from_thread",
        "ib_rx_alloc_limit",
+       "ib_rx_total_frags",
+       "ib_rx_total_incs",
        "ib_rx_credit_updates",
        "ib_ack_sent",
        "ib_ack_send_failure",
index ea961144084fadb3ee98abd708ce5cd31eba42a5..f06fac4886b090f346090ac26dad8e1eaa8da471 100644 (file)
@@ -40,7 +40,6 @@
 /*
  * XXX
  *  - build with sparse
- *  - should we limit the size of a mr region?  let transport return failure?
  *  - should we detect duplicate keys on a socket?  hmm.
  *  - an rdma is an mlock, apply rlimit?
  */
@@ -200,6 +199,14 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
                goto out;
        }
 
+       /* Restrict the size of mr irrespective of underlying transport
+        * To account for unaligned mr regions, subtract one from nr_pages
+        */
+       if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
+               ret = -EMSGSIZE;
+               goto out;
+       }
+
        rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
                args->vec.addr, args->vec.bytes, nr_pages);
 
@@ -415,7 +422,8 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
        spin_lock_irqsave(&rs->rs_rdma_lock, flags);
        mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
        if (!mr) {
-               printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key);
+               pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
+                        r_key);
                spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
                return;
        }
@@ -626,6 +634,16 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
                }
                op->op_notifier->n_user_token = args->user_token;
                op->op_notifier->n_status = RDS_RDMA_SUCCESS;
+
+               /* Enable rmda notification on data operation for composite
+                * rds messages and make sure notification is enabled only
+                * for the data operation which follows it so that application
+                * gets notified only after full message gets delivered.
+                */
+               if (rm->data.op_sg) {
+                       rm->rdma.op_notify = 0;
+                       rm->data.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
+               }
        }
 
        /* The cookie contains the R_Key of the remote memory region, and
index d5f3117671575f84550c4bee7b4aef7753bd9f59..fc59821f0a27bd2a529c17a36c5b06a1d5d91a8e 100644 (file)
@@ -206,18 +206,13 @@ static int rds_rdma_init(void)
 {
        int ret;
 
-       ret = rds_rdma_listen_init();
+       ret = rds_ib_init();
        if (ret)
                goto out;
 
-       ret = rds_ib_init();
+       ret = rds_rdma_listen_init();
        if (ret)
-               goto err_ib_init;
-
-       goto out;
-
-err_ib_init:
-       rds_rdma_listen_stop();
+               rds_ib_exit();
 out:
        return ret;
 }
index ebbf909b87ec3f62abec2573dcd55f4054138848..07fff73dd4f3f956c2cab393a9e834bb1215fc24 100644 (file)
@@ -50,6 +50,9 @@ void rdsdebug(char *fmt, ...)
 #define RDS_FRAG_SHIFT 12
 #define RDS_FRAG_SIZE  ((unsigned int)(1 << RDS_FRAG_SHIFT))
 
+/* Used to limit both RDMA and non-RDMA RDS message to 1MB */
+#define RDS_MAX_MSG_SIZE       ((unsigned int)(1 << 20))
+
 #define RDS_CONG_MAP_BYTES     (65536 / 8)
 #define RDS_CONG_MAP_PAGES     (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
 #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
@@ -250,6 +253,11 @@ struct rds_ext_header_rdma_dest {
 #define RDS_EXTHDR_GEN_NUM     6
 
 #define __RDS_EXTHDR_MAX       16 /* for now */
+#define RDS_RX_MAX_TRACES      (RDS_MSG_RX_DGRAM_TRACE_MAX + 1)
+#define        RDS_MSG_RX_HDR          0
+#define        RDS_MSG_RX_START        1
+#define        RDS_MSG_RX_END          2
+#define        RDS_MSG_RX_CMSG         3
 
 struct rds_incoming {
        atomic_t                i_refcount;
@@ -262,6 +270,7 @@ struct rds_incoming {
 
        rds_rdma_cookie_t       i_rdma_cookie;
        struct timeval          i_rx_tstamp;
+       u64                     i_rx_lat_trace[RDS_RX_MAX_TRACES];
 };
 
 struct rds_mr {
@@ -419,6 +428,7 @@ struct rds_message {
                } rdma;
                struct rm_data_op {
                        unsigned int            op_active:1;
+                       unsigned int            op_notify:1;
                        unsigned int            op_nents;
                        unsigned int            op_count;
                        unsigned int            op_dmasg;
@@ -571,6 +581,10 @@ struct rds_sock {
        unsigned char           rs_recverr,
                                rs_cong_monitor;
        u32                     rs_hash_initval;
+
+       /* Socket receive path trace points*/
+       u8                      rs_rx_traces;
+       u8                      rs_rx_trace[RDS_MSG_RX_DGRAM_TRACE_MAX];
 };
 
 static inline struct rds_sock *rds_sk_to_rs(const struct sock *sk)
@@ -630,6 +644,9 @@ struct rds_statistics {
        uint64_t        s_cong_update_received;
        uint64_t        s_cong_send_error;
        uint64_t        s_cong_send_blocked;
+       uint64_t        s_recv_bytes_added_to_socket;
+       uint64_t        s_recv_bytes_removed_from_socket;
+
 };
 
 /* af_rds.c */
index 9d0666e5fe35db4215ef9c7aae9903d73f1fc5b2..8b7e7b7f2c2dbebee31dd7626540b797f1fa353f 100644 (file)
@@ -43,6 +43,8 @@
 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
                  __be32 saddr)
 {
+       int i;
+
        atomic_set(&inc->i_refcount, 1);
        INIT_LIST_HEAD(&inc->i_item);
        inc->i_conn = conn;
@@ -50,6 +52,9 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
        inc->i_rdma_cookie = 0;
        inc->i_rx_tstamp.tv_sec = 0;
        inc->i_rx_tstamp.tv_usec = 0;
+
+       for (i = 0; i < RDS_RX_MAX_TRACES; i++)
+               inc->i_rx_lat_trace[i] = 0;
 }
 EXPORT_SYMBOL_GPL(rds_inc_init);
 
@@ -94,6 +99,10 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
                return;
 
        rs->rs_rcv_bytes += delta;
+       if (delta > 0)
+               rds_stats_add(s_recv_bytes_added_to_socket, delta);
+       else
+               rds_stats_add(s_recv_bytes_removed_from_socket, -delta);
        now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
 
        rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
@@ -369,6 +378,7 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
                if (sock_flag(sk, SOCK_RCVTSTAMP))
                        do_gettimeofday(&inc->i_rx_tstamp);
                rds_inc_addref(inc);
+               inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock();
                list_add_tail(&inc->i_item, &rs->rs_recv_queue);
                __rds_wake_sk_sleep(sk);
        } else {
@@ -530,7 +540,7 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
                ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST,
                                sizeof(inc->i_rdma_cookie), &inc->i_rdma_cookie);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
        if ((inc->i_rx_tstamp.tv_sec != 0) &&
@@ -539,10 +549,30 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
                               sizeof(struct timeval),
                               &inc->i_rx_tstamp);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
-       return 0;
+       if (rs->rs_rx_traces) {
+               struct rds_cmsg_rx_trace t;
+               int i, j;
+
+               inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
+               t.rx_traces =  rs->rs_rx_traces;
+               for (i = 0; i < rs->rs_rx_traces; i++) {
+                       j = rs->rs_rx_trace[i];
+                       t.rx_trace_pos[i] = j;
+                       t.rx_trace[i] = inc->i_rx_lat_trace[j + 1] -
+                                         inc->i_rx_lat_trace[j];
+               }
+
+               ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RXPATH_LATENCY,
+                              sizeof(t), &t);
+               if (ret)
+                       goto out;
+       }
+
+out:
+       return ret;
 }
 
 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
index 77c8c6e613adf65057d3696806024fdf2cb15247..5cc64039caf71d2378d8f7609ab77331e733c19d 100644 (file)
@@ -476,12 +476,14 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
        struct rm_rdma_op *ro;
        struct rds_notifier *notifier;
        unsigned long flags;
+       unsigned int notify = 0;
 
        spin_lock_irqsave(&rm->m_rs_lock, flags);
 
+       notify =  rm->rdma.op_notify | rm->data.op_notify;
        ro = &rm->rdma;
        if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
-           ro->op_active && ro->op_notify && ro->op_notifier) {
+           ro->op_active && notify && ro->op_notifier) {
                notifier = ro->op_notifier;
                rs = rm->m_rs;
                sock_hold(rds_rs_to_sk(rs));
@@ -945,6 +947,11 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
                        ret = rds_cmsg_rdma_map(rs, rm, cmsg);
                        if (!ret)
                                *allocated_mr = 1;
+                       else if (ret == -ENODEV)
+                               /* Accommodate the get_mr() case which can fail
+                                * if connection isn't established yet.
+                                */
+                               ret = -EAGAIN;
                        break;
                case RDS_CMSG_ATOMIC_CSWP:
                case RDS_CMSG_ATOMIC_FADD:
@@ -987,6 +994,26 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
        return hash;
 }
 
+static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
+{
+       struct rds_rdma_args *args;
+       struct cmsghdr *cmsg;
+
+       for_each_cmsghdr(cmsg, msg) {
+               if (!CMSG_OK(msg, cmsg))
+                       return -EINVAL;
+
+               if (cmsg->cmsg_level != SOL_RDS)
+                       continue;
+
+               if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
+                       args = CMSG_DATA(cmsg);
+                       *rdma_bytes += args->remote_vec.bytes;
+               }
+       }
+       return 0;
+}
+
 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 {
        struct sock *sk = sock->sk;
@@ -1001,6 +1028,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
        int nonblock = msg->msg_flags & MSG_DONTWAIT;
        long timeo = sock_sndtimeo(sk, nonblock);
        struct rds_conn_path *cpath;
+       size_t total_payload_len = payload_len, rdma_payload_len = 0;
 
        /* Mirror Linux UDP mirror of BSD error message compatibility */
        /* XXX: Perhaps MSG_MORE someday */
@@ -1033,6 +1061,16 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
        }
        release_sock(sk);
 
+       ret = rds_rdma_bytes(msg, &rdma_payload_len);
+       if (ret)
+               goto out;
+
+       total_payload_len += rdma_payload_len;
+       if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) {
+               ret = -EMSGSIZE;
+               goto out;
+       }
+
        if (payload_len > rds_sk_sndbuf(rs)) {
                ret = -EMSGSIZE;
                goto out;
@@ -1082,8 +1120,12 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
 
        /* Parse any control messages the user may have included. */
        ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
-       if (ret)
+       if (ret) {
+               /* Trigger connection so that its ready for the next retry */
+               if (ret ==  -EAGAIN)
+                       rds_conn_connect_if_down(conn);
                goto out;
+       }
 
        if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
                printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
@@ -1169,7 +1211,7 @@ out:
  * or
  *   RDS_FLAG_HB_PONG|RDS_FLAG_ACK_REQUIRED
  */
-int
+static int
 rds_send_probe(struct rds_conn_path *cp, __be16 sport,
               __be16 dport, u8 h_flags)
 {
@@ -1238,7 +1280,7 @@ rds_send_pong(struct rds_conn_path *cp, __be16 dport)
        return rds_send_probe(cp, 0, dport, 0);
 }
 
-void
+static void
 rds_send_ping(struct rds_connection *conn)
 {
        unsigned long flags;
index f74bab3ecdca69b0b59e18341a15ab8fe095b16c..67d0929c7d3d0c97ed209af9a67b4d83343c3de1 100644 (file)
@@ -79,6 +79,7 @@ bail:
  * smaller ip address, we recycle conns in RDS_CONN_ERROR on the passive side
  * by moving them to CONNECTING in this function.
  */
+static
 struct rds_tcp_connection *rds_tcp_accept_one_path(struct rds_connection *conn)
 {
        int i;
index ad4892e97f91b3fdd4928072ea6f9a9aeaf11352..e006ef8e6d404195f19e5d8b9bbf6683b504a7cc 100644 (file)
@@ -180,6 +180,9 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
                        rdsdebug("alloced tinc %p\n", tinc);
                        rds_inc_path_init(&tinc->ti_inc, cp,
                                          cp->cp_conn->c_faddr);
+                       tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] =
+                                       local_clock();
+
                        /*
                         * XXX * we might be able to use the __ variants when
                         * we've already serialized at a higher level.
@@ -204,6 +207,8 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
                                /* could be 0 for a 0 len message */
                                tc->t_tinc_data_rem =
                                        be32_to_cpu(tinc->ti_inc.i_hdr.h_len);
+                               tinc->ti_inc.i_rx_lat_trace[RDS_MSG_RX_START] =
+                                       local_clock();
                        }
                }
 
index 868f1ad0415a4ea728989908ed49858b6dfccc01..060600b03fad14bad59b83a25e9b28aee4bc7da8 100644 (file)
@@ -23,17 +23,6 @@ config RFKILL_INPUT
        depends on INPUT = y || RFKILL = INPUT
        default y if !EXPERT
 
-config RFKILL_REGULATOR
-       tristate "Generic rfkill regulator driver"
-       depends on RFKILL || !RFKILL
-       depends on REGULATOR
-       help
-          This options enable controlling radio transmitters connected to
-          voltage regulator using the regulator framework.
-
-          To compile this driver as a module, choose M here: the module will
-          be called rfkill-regulator.
-
 config RFKILL_GPIO
        tristate "GPIO RFKILL driver"
        depends on RFKILL
index 311768783f4a116843349687c99a3cf65f6c6314..87a80aded0b33b37a522be66b138697dc6dd7a61 100644 (file)
@@ -5,5 +5,4 @@
 rfkill-y                       += core.o
 rfkill-$(CONFIG_RFKILL_INPUT)  += input.o
 obj-$(CONFIG_RFKILL)           += rfkill.o
-obj-$(CONFIG_RFKILL_REGULATOR) += rfkill-regulator.o
 obj-$(CONFIG_RFKILL_GPIO)      += rfkill-gpio.o
index 884027f62783a6a975b42c0bdd5f39010df9834a..2064c3a35ef84d4c54c065e8b54d524cdb407736 100644 (file)
@@ -176,6 +176,50 @@ static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
 {
        led_trigger_unregister(&rfkill->led_trigger);
 }
+
+static struct led_trigger rfkill_any_led_trigger;
+static struct work_struct rfkill_any_work;
+
+static void rfkill_any_led_trigger_worker(struct work_struct *work)
+{
+       enum led_brightness brightness = LED_OFF;
+       struct rfkill *rfkill;
+
+       mutex_lock(&rfkill_global_mutex);
+       list_for_each_entry(rfkill, &rfkill_list, node) {
+               if (!(rfkill->state & RFKILL_BLOCK_ANY)) {
+                       brightness = LED_FULL;
+                       break;
+               }
+       }
+       mutex_unlock(&rfkill_global_mutex);
+
+       led_trigger_event(&rfkill_any_led_trigger, brightness);
+}
+
+static void rfkill_any_led_trigger_event(void)
+{
+       schedule_work(&rfkill_any_work);
+}
+
+static void rfkill_any_led_trigger_activate(struct led_classdev *led_cdev)
+{
+       rfkill_any_led_trigger_event();
+}
+
+static int rfkill_any_led_trigger_register(void)
+{
+       INIT_WORK(&rfkill_any_work, rfkill_any_led_trigger_worker);
+       rfkill_any_led_trigger.name = "rfkill-any";
+       rfkill_any_led_trigger.activate = rfkill_any_led_trigger_activate;
+       return led_trigger_register(&rfkill_any_led_trigger);
+}
+
+static void rfkill_any_led_trigger_unregister(void)
+{
+       led_trigger_unregister(&rfkill_any_led_trigger);
+       cancel_work_sync(&rfkill_any_work);
+}
 #else
 static void rfkill_led_trigger_event(struct rfkill *rfkill)
 {
@@ -189,6 +233,19 @@ static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
 static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
 {
 }
+
+static void rfkill_any_led_trigger_event(void)
+{
+}
+
+static int rfkill_any_led_trigger_register(void)
+{
+       return 0;
+}
+
+static void rfkill_any_led_trigger_unregister(void)
+{
+}
 #endif /* CONFIG_RFKILL_LEDS */
 
 static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
@@ -297,6 +354,7 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
        spin_unlock_irqrestore(&rfkill->lock, flags);
 
        rfkill_led_trigger_event(rfkill);
+       rfkill_any_led_trigger_event();
 
        if (prev != curr)
                rfkill_event(rfkill);
@@ -477,11 +535,9 @@ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
        spin_unlock_irqrestore(&rfkill->lock, flags);
 
        rfkill_led_trigger_event(rfkill);
+       rfkill_any_led_trigger_event();
 
-       if (!rfkill->registered)
-               return ret;
-
-       if (prev != blocked)
+       if (rfkill->registered && prev != blocked)
                schedule_work(&rfkill->uevent_work);
 
        return ret;
@@ -523,6 +579,7 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
                schedule_work(&rfkill->uevent_work);
 
        rfkill_led_trigger_event(rfkill);
+       rfkill_any_led_trigger_event();
 
        return blocked;
 }
@@ -572,6 +629,7 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
                        schedule_work(&rfkill->uevent_work);
 
                rfkill_led_trigger_event(rfkill);
+               rfkill_any_led_trigger_event();
        }
 }
 EXPORT_SYMBOL(rfkill_set_states);
@@ -988,6 +1046,7 @@ int __must_check rfkill_register(struct rfkill *rfkill)
 #endif
        }
 
+       rfkill_any_led_trigger_event();
        rfkill_send_events(rfkill, RFKILL_OP_ADD);
 
        mutex_unlock(&rfkill_global_mutex);
@@ -1020,6 +1079,7 @@ void rfkill_unregister(struct rfkill *rfkill)
        mutex_lock(&rfkill_global_mutex);
        rfkill_send_events(rfkill, RFKILL_OP_DEL);
        list_del_init(&rfkill->node);
+       rfkill_any_led_trigger_event();
        mutex_unlock(&rfkill_global_mutex);
 
        rfkill_led_trigger_unregister(rfkill);
@@ -1266,24 +1326,33 @@ static int __init rfkill_init(void)
 
        error = class_register(&rfkill_class);
        if (error)
-               goto out;
+               goto error_class;
 
        error = misc_register(&rfkill_miscdev);
-       if (error) {
-               class_unregister(&rfkill_class);
-               goto out;
-       }
+       if (error)
+               goto error_misc;
+
+       error = rfkill_any_led_trigger_register();
+       if (error)
+               goto error_led_trigger;
 
 #ifdef CONFIG_RFKILL_INPUT
        error = rfkill_handler_init();
-       if (error) {
-               misc_deregister(&rfkill_miscdev);
-               class_unregister(&rfkill_class);
-               goto out;
-       }
+       if (error)
+               goto error_input;
 #endif
 
- out:
+       return 0;
+
+#ifdef CONFIG_RFKILL_INPUT
+error_input:
+       rfkill_any_led_trigger_unregister();
+#endif
+error_led_trigger:
+       misc_deregister(&rfkill_miscdev);
+error_misc:
+       class_unregister(&rfkill_class);
+error_class:
        return error;
 }
 subsys_initcall(rfkill_init);
@@ -1293,6 +1362,7 @@ static void __exit rfkill_exit(void)
 #ifdef CONFIG_RFKILL_INPUT
        rfkill_handler_exit();
 #endif
+       rfkill_any_led_trigger_unregister();
        misc_deregister(&rfkill_miscdev);
        class_unregister(&rfkill_class);
 }
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
deleted file mode 100644 (file)
index 50cd26a..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * rfkill-regulator.c - Regulator consumer driver for rfkill
- *
- * Copyright (C) 2009  Guiming Zhuo <gmzhuo@gmail.com>
- * Copyright (C) 2011  Antonio Ospite <ospite@studenti.unina.it>
- *
- * Implementation inspired by leds-regulator driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/consumer.h>
-#include <linux/rfkill.h>
-#include <linux/rfkill-regulator.h>
-
-struct rfkill_regulator_data {
-       struct rfkill *rf_kill;
-       bool reg_enabled;
-
-       struct regulator *vcc;
-};
-
-static int rfkill_regulator_set_block(void *data, bool blocked)
-{
-       struct rfkill_regulator_data *rfkill_data = data;
-       int ret = 0;
-
-       pr_debug("%s: blocked: %d\n", __func__, blocked);
-
-       if (blocked) {
-               if (rfkill_data->reg_enabled) {
-                       regulator_disable(rfkill_data->vcc);
-                       rfkill_data->reg_enabled = false;
-               }
-       } else {
-               if (!rfkill_data->reg_enabled) {
-                       ret = regulator_enable(rfkill_data->vcc);
-                       if (!ret)
-                               rfkill_data->reg_enabled = true;
-               }
-       }
-
-       pr_debug("%s: regulator_is_enabled after set_block: %d\n", __func__,
-               regulator_is_enabled(rfkill_data->vcc));
-
-       return ret;
-}
-
-static struct rfkill_ops rfkill_regulator_ops = {
-       .set_block = rfkill_regulator_set_block,
-};
-
-static int rfkill_regulator_probe(struct platform_device *pdev)
-{
-       struct rfkill_regulator_platform_data *pdata = pdev->dev.platform_data;
-       struct rfkill_regulator_data *rfkill_data;
-       struct regulator *vcc;
-       struct rfkill *rf_kill;
-       int ret = 0;
-
-       if (pdata == NULL) {
-               dev_err(&pdev->dev, "no platform data\n");
-               return -ENODEV;
-       }
-
-       if (pdata->name == NULL || pdata->type == 0) {
-               dev_err(&pdev->dev, "invalid name or type in platform data\n");
-               return -EINVAL;
-       }
-
-       vcc = regulator_get_exclusive(&pdev->dev, "vrfkill");
-       if (IS_ERR(vcc)) {
-               dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name);
-               ret = PTR_ERR(vcc);
-               goto out;
-       }
-
-       rfkill_data = kzalloc(sizeof(*rfkill_data), GFP_KERNEL);
-       if (rfkill_data == NULL) {
-               ret = -ENOMEM;
-               goto err_data_alloc;
-       }
-
-       rf_kill = rfkill_alloc(pdata->name, &pdev->dev,
-                               pdata->type,
-                               &rfkill_regulator_ops, rfkill_data);
-       if (rf_kill == NULL) {
-               ret = -ENOMEM;
-               goto err_rfkill_alloc;
-       }
-
-       if (regulator_is_enabled(vcc)) {
-               dev_dbg(&pdev->dev, "Regulator already enabled\n");
-               rfkill_data->reg_enabled = true;
-       }
-       rfkill_data->vcc = vcc;
-       rfkill_data->rf_kill = rf_kill;
-
-       ret = rfkill_register(rf_kill);
-       if (ret) {
-               dev_err(&pdev->dev, "Cannot register rfkill device\n");
-               goto err_rfkill_register;
-       }
-
-       platform_set_drvdata(pdev, rfkill_data);
-       dev_info(&pdev->dev, "%s initialized\n", pdata->name);
-
-       return 0;
-
-err_rfkill_register:
-       rfkill_destroy(rf_kill);
-err_rfkill_alloc:
-       kfree(rfkill_data);
-err_data_alloc:
-       regulator_put(vcc);
-out:
-       return ret;
-}
-
-static int rfkill_regulator_remove(struct platform_device *pdev)
-{
-       struct rfkill_regulator_data *rfkill_data = platform_get_drvdata(pdev);
-       struct rfkill *rf_kill = rfkill_data->rf_kill;
-
-       rfkill_unregister(rf_kill);
-       rfkill_destroy(rf_kill);
-       regulator_put(rfkill_data->vcc);
-       kfree(rfkill_data);
-
-       return 0;
-}
-
-static struct platform_driver rfkill_regulator_driver = {
-       .probe = rfkill_regulator_probe,
-       .remove = rfkill_regulator_remove,
-       .driver = {
-               .name = "rfkill-regulator",
-       },
-};
-
-module_platform_driver(rfkill_regulator_driver);
-
-MODULE_AUTHOR("Guiming Zhuo <gmzhuo@gmail.com>");
-MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
-MODULE_DESCRIPTION("Regulator consumer driver for rfkill");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:rfkill-regulator");
index 8fc6ea347182cd7423291f89b5b8cacaaf69cc95..b9da4d6b914f9ddb7b7c9b52167fcbce57c9e884 100644 (file)
@@ -2,7 +2,9 @@
 # Makefile for Linux kernel RxRPC
 #
 
-af-rxrpc-y := \
+obj-$(CONFIG_AF_RXRPC) += rxrpc.o
+
+rxrpc-y := \
        af_rxrpc.o \
        call_accept.o \
        call_event.o \
@@ -26,8 +28,6 @@ af-rxrpc-y := \
        skbuff.o \
        utils.o
 
-af-rxrpc-$(CONFIG_PROC_FS) += proc.o
-af-rxrpc-$(CONFIG_RXKAD) += rxkad.o
-af-rxrpc-$(CONFIG_SYSCTL) += sysctl.o
-
-obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o
+rxrpc-$(CONFIG_PROC_FS) += proc.o
+rxrpc-$(CONFIG_RXKAD) += rxkad.o
+rxrpc-$(CONFIG_SYSCTL) += sysctl.o
index 5f63f6dcaabb6422306895f01ff7dc3f0de8ee60..199b46e93e64ee7786e8a8d441ba5eb5b02bf31f 100644 (file)
@@ -224,6 +224,14 @@ static int rxrpc_listen(struct socket *sock, int backlog)
                else
                        sk->sk_max_ack_backlog = old;
                break;
+       case RXRPC_SERVER_LISTENING:
+               if (backlog == 0) {
+                       rx->sk.sk_state = RXRPC_SERVER_LISTEN_DISABLED;
+                       sk->sk_max_ack_backlog = 0;
+                       rxrpc_discard_prealloc(rx);
+                       ret = 0;
+                       break;
+               }
        default:
                ret = -EBUSY;
                break;
index f60e355765269e1192f6b464b7c735a26576d70c..12be432be9b2feb5dc9a85716c676888599b5624 100644 (file)
@@ -60,6 +60,7 @@ enum {
        RXRPC_CLIENT_BOUND,             /* client local address bound */
        RXRPC_SERVER_BOUND,             /* server local address bound */
        RXRPC_SERVER_LISTENING,         /* server listening for connections */
+       RXRPC_SERVER_LISTEN_DISABLED,   /* server listening disabled */
        RXRPC_CLOSE,                    /* socket is being closed */
 };
 
@@ -593,200 +594,6 @@ struct rxrpc_ack_summary {
        u8                      cumulative_acks;
 };
 
-enum rxrpc_skb_trace {
-       rxrpc_skb_rx_cleaned,
-       rxrpc_skb_rx_freed,
-       rxrpc_skb_rx_got,
-       rxrpc_skb_rx_lost,
-       rxrpc_skb_rx_received,
-       rxrpc_skb_rx_rotated,
-       rxrpc_skb_rx_purged,
-       rxrpc_skb_rx_seen,
-       rxrpc_skb_tx_cleaned,
-       rxrpc_skb_tx_freed,
-       rxrpc_skb_tx_got,
-       rxrpc_skb_tx_new,
-       rxrpc_skb_tx_rotated,
-       rxrpc_skb_tx_seen,
-       rxrpc_skb__nr_trace
-};
-
-extern const char rxrpc_skb_traces[rxrpc_skb__nr_trace][7];
-
-enum rxrpc_conn_trace {
-       rxrpc_conn_new_client,
-       rxrpc_conn_new_service,
-       rxrpc_conn_queued,
-       rxrpc_conn_seen,
-       rxrpc_conn_got,
-       rxrpc_conn_put_client,
-       rxrpc_conn_put_service,
-       rxrpc_conn__nr_trace
-};
-
-extern const char rxrpc_conn_traces[rxrpc_conn__nr_trace][4];
-
-enum rxrpc_client_trace {
-       rxrpc_client_activate_chans,
-       rxrpc_client_alloc,
-       rxrpc_client_chan_activate,
-       rxrpc_client_chan_disconnect,
-       rxrpc_client_chan_pass,
-       rxrpc_client_chan_unstarted,
-       rxrpc_client_cleanup,
-       rxrpc_client_count,
-       rxrpc_client_discard,
-       rxrpc_client_duplicate,
-       rxrpc_client_exposed,
-       rxrpc_client_replace,
-       rxrpc_client_to_active,
-       rxrpc_client_to_culled,
-       rxrpc_client_to_idle,
-       rxrpc_client_to_inactive,
-       rxrpc_client_to_waiting,
-       rxrpc_client_uncount,
-       rxrpc_client__nr_trace
-};
-
-extern const char rxrpc_client_traces[rxrpc_client__nr_trace][7];
-extern const char rxrpc_conn_cache_states[RXRPC_CONN__NR_CACHE_STATES][5];
-
-enum rxrpc_call_trace {
-       rxrpc_call_new_client,
-       rxrpc_call_new_service,
-       rxrpc_call_queued,
-       rxrpc_call_queued_ref,
-       rxrpc_call_seen,
-       rxrpc_call_connected,
-       rxrpc_call_release,
-       rxrpc_call_got,
-       rxrpc_call_got_userid,
-       rxrpc_call_got_kernel,
-       rxrpc_call_put,
-       rxrpc_call_put_userid,
-       rxrpc_call_put_kernel,
-       rxrpc_call_put_noqueue,
-       rxrpc_call_error,
-       rxrpc_call__nr_trace
-};
-
-extern const char rxrpc_call_traces[rxrpc_call__nr_trace][4];
-
-enum rxrpc_transmit_trace {
-       rxrpc_transmit_wait,
-       rxrpc_transmit_queue,
-       rxrpc_transmit_queue_last,
-       rxrpc_transmit_rotate,
-       rxrpc_transmit_rotate_last,
-       rxrpc_transmit_await_reply,
-       rxrpc_transmit_end,
-       rxrpc_transmit__nr_trace
-};
-
-extern const char rxrpc_transmit_traces[rxrpc_transmit__nr_trace][4];
-
-enum rxrpc_receive_trace {
-       rxrpc_receive_incoming,
-       rxrpc_receive_queue,
-       rxrpc_receive_queue_last,
-       rxrpc_receive_front,
-       rxrpc_receive_rotate,
-       rxrpc_receive_end,
-       rxrpc_receive__nr_trace
-};
-
-extern const char rxrpc_receive_traces[rxrpc_receive__nr_trace][4];
-
-enum rxrpc_recvmsg_trace {
-       rxrpc_recvmsg_enter,
-       rxrpc_recvmsg_wait,
-       rxrpc_recvmsg_dequeue,
-       rxrpc_recvmsg_hole,
-       rxrpc_recvmsg_next,
-       rxrpc_recvmsg_cont,
-       rxrpc_recvmsg_full,
-       rxrpc_recvmsg_data_return,
-       rxrpc_recvmsg_terminal,
-       rxrpc_recvmsg_to_be_accepted,
-       rxrpc_recvmsg_return,
-       rxrpc_recvmsg__nr_trace
-};
-
-extern const char rxrpc_recvmsg_traces[rxrpc_recvmsg__nr_trace][5];
-
-enum rxrpc_rtt_tx_trace {
-       rxrpc_rtt_tx_ping,
-       rxrpc_rtt_tx_data,
-       rxrpc_rtt_tx__nr_trace
-};
-
-extern const char rxrpc_rtt_tx_traces[rxrpc_rtt_tx__nr_trace][5];
-
-enum rxrpc_rtt_rx_trace {
-       rxrpc_rtt_rx_ping_response,
-       rxrpc_rtt_rx_requested_ack,
-       rxrpc_rtt_rx__nr_trace
-};
-
-extern const char rxrpc_rtt_rx_traces[rxrpc_rtt_rx__nr_trace][5];
-
-enum rxrpc_timer_trace {
-       rxrpc_timer_begin,
-       rxrpc_timer_init_for_reply,
-       rxrpc_timer_init_for_send_reply,
-       rxrpc_timer_expired,
-       rxrpc_timer_set_for_ack,
-       rxrpc_timer_set_for_ping,
-       rxrpc_timer_set_for_resend,
-       rxrpc_timer_set_for_send,
-       rxrpc_timer__nr_trace
-};
-
-extern const char rxrpc_timer_traces[rxrpc_timer__nr_trace][8];
-
-enum rxrpc_propose_ack_trace {
-       rxrpc_propose_ack_client_tx_end,
-       rxrpc_propose_ack_input_data,
-       rxrpc_propose_ack_ping_for_lost_ack,
-       rxrpc_propose_ack_ping_for_lost_reply,
-       rxrpc_propose_ack_ping_for_params,
-       rxrpc_propose_ack_processing_op,
-       rxrpc_propose_ack_respond_to_ack,
-       rxrpc_propose_ack_respond_to_ping,
-       rxrpc_propose_ack_retry_tx,
-       rxrpc_propose_ack_rotate_rx,
-       rxrpc_propose_ack_terminal_ack,
-       rxrpc_propose_ack__nr_trace
-};
-
-enum rxrpc_propose_ack_outcome {
-       rxrpc_propose_ack_use,
-       rxrpc_propose_ack_update,
-       rxrpc_propose_ack_subsume,
-       rxrpc_propose_ack__nr_outcomes
-};
-
-extern const char rxrpc_propose_ack_traces[rxrpc_propose_ack__nr_trace][8];
-extern const char *const rxrpc_propose_ack_outcomes[rxrpc_propose_ack__nr_outcomes];
-
-enum rxrpc_congest_change {
-       rxrpc_cong_begin_retransmission,
-       rxrpc_cong_cleared_nacks,
-       rxrpc_cong_new_low_nack,
-       rxrpc_cong_no_change,
-       rxrpc_cong_progress,
-       rxrpc_cong_retransmit_again,
-       rxrpc_cong_rtt_window_end,
-       rxrpc_cong_saw_nack,
-       rxrpc_congest__nr_change
-};
-
-extern const char rxrpc_congest_modes[NR__RXRPC_CONGEST_MODES][10];
-extern const char rxrpc_congest_changes[rxrpc_congest__nr_change][9];
-
-extern const char *const rxrpc_pkts[];
-extern const char rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4];
-
 #include <trace/events/rxrpc.h>
 
 /*
index 832d854c2d5c409faa0e487ea9b3bac709d4031b..7c4c64ab8da2e241d63ee16a0ae3521c98dc2e5c 100644 (file)
@@ -349,7 +349,8 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
 
 found_service:
        spin_lock(&rx->incoming_lock);
-       if (rx->sk.sk_state == RXRPC_CLOSE) {
+       if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
+           rx->sk.sk_state == RXRPC_CLOSE) {
                trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber,
                                  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
                skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
index 1ed18d8c9c9fa31ac46028089184519624625a51..8b94db3c9b2ecb5f093798eeae0e8630ac0114ab 100644 (file)
@@ -43,24 +43,6 @@ const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
        [RXRPC_CALL_NETWORK_ERROR]              = "NetError",
 };
 
-const char rxrpc_call_traces[rxrpc_call__nr_trace][4] = {
-       [rxrpc_call_new_client]         = "NWc",
-       [rxrpc_call_new_service]        = "NWs",
-       [rxrpc_call_queued]             = "QUE",
-       [rxrpc_call_queued_ref]         = "QUR",
-       [rxrpc_call_connected]          = "CON",
-       [rxrpc_call_release]            = "RLS",
-       [rxrpc_call_seen]               = "SEE",
-       [rxrpc_call_got]                = "GOT",
-       [rxrpc_call_got_userid]         = "Gus",
-       [rxrpc_call_got_kernel]         = "Gke",
-       [rxrpc_call_put]                = "PUT",
-       [rxrpc_call_put_userid]         = "Pus",
-       [rxrpc_call_put_kernel]         = "Pke",
-       [rxrpc_call_put_noqueue]        = "PNQ",
-       [rxrpc_call_error]              = "*E*",
-};
-
 struct kmem_cache *rxrpc_call_jar;
 LIST_HEAD(rxrpc_calls);
 DEFINE_RWLOCK(rxrpc_call_lock);
index 6cbcdcc298534a4704f6383c3486ce6b82f432c9..40a1ef2adeb45c18e55eabaf066264061ba7133b 100644 (file)
@@ -105,14 +105,6 @@ static void rxrpc_discard_expired_client_conns(struct work_struct *);
 static DECLARE_DELAYED_WORK(rxrpc_client_conn_reap,
                            rxrpc_discard_expired_client_conns);
 
-const char rxrpc_conn_cache_states[RXRPC_CONN__NR_CACHE_STATES][5] = {
-       [RXRPC_CONN_CLIENT_INACTIVE]    = "Inac",
-       [RXRPC_CONN_CLIENT_WAITING]     = "Wait",
-       [RXRPC_CONN_CLIENT_ACTIVE]      = "Actv",
-       [RXRPC_CONN_CLIENT_CULLED]      = "Cull",
-       [RXRPC_CONN_CLIENT_IDLE]        = "Idle",
-};
-
 /*
  * Get a connection ID and epoch for a client connection from the global pool.
  * The connection struct pointer is then recorded in the idr radix tree.  The
index e1e83af478666be9e5f9373b64f0dcfab748e24d..b0ecb770fdcebaaa1dd5258e5d9e2d7c937e6edf 100644 (file)
@@ -173,6 +173,7 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
                /* Save the result of the call so that we can repeat it if necessary
                 * through the channel, whilst disposing of the actual call record.
                 */
+               trace_rxrpc_disconnect_call(call);
                chan->last_service_id = call->service_id;
                if (call->abort_code) {
                        chan->last_abort = call->abort_code;
index 1d87b5453ef7802a7f5ca6e7c2cbcff3be31159c..78ec33477adf6c516fc26fd3c4991280164a6666 100644 (file)
@@ -481,6 +481,7 @@ next_subpacket:
                        return rxrpc_proto_abort("LSA", call, seq);
        }
 
+       trace_rxrpc_rx_data(call, seq, serial, flags, annotation);
        if (before_eq(seq, hard_ack)) {
                ack = RXRPC_ACK_DUPLICATE;
                ack_serial = serial;
@@ -765,16 +766,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
        summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
                              buf.ack.reason : RXRPC_ACK__INVALID);
 
-       trace_rxrpc_rx_ack(call, first_soft_ack, summary.ack_reason, nr_acks);
-
-       _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
-              sp->hdr.serial,
-              ntohs(buf.ack.maxSkew),
-              first_soft_ack,
-              ntohl(buf.ack.previousPacket),
-              acked_serial,
-              rxrpc_ack_names[summary.ack_reason],
-              buf.ack.nAcks);
+       trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
+                          first_soft_ack, ntohl(buf.ack.previousPacket),
+                          summary.ack_reason, nr_acks);
 
        if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
                rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
@@ -931,7 +925,6 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
                break;
 
        default:
-               _proto("Rx %s %%%u", rxrpc_pkts[sp->hdr.type], sp->hdr.serial);
                break;
        }
 
@@ -961,6 +954,7 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
                break;
        }
 
+       trace_rxrpc_improper_term(call);
        __rxrpc_disconnect_call(conn, call);
        rxrpc_notify_socket(call);
 }
index 6dee55fad2d33a2df1a1cbf9eaf035b2c8e861b0..1a2d4b1120649ad4055b138ec556dad854bf37a6 100644 (file)
@@ -77,12 +77,6 @@ unsigned int rxrpc_rx_jumbo_max = 4;
  */
 unsigned int rxrpc_resend_timeout = 4 * 1000;
 
-const char *const rxrpc_pkts[] = {
-       "?00",
-       "DATA", "ACK", "BUSY", "ABORT", "ACKALL", "CHALL", "RESP", "DEBUG",
-       "?09", "?10", "?11", "?12", "VERSION", "?14", "?15"
-};
-
 const s8 rxrpc_ack_priority[] = {
        [0]                             = 0,
        [RXRPC_ACK_DELAY]               = 1,
@@ -94,148 +88,3 @@ const s8 rxrpc_ack_priority[] = {
        [RXRPC_ACK_NOSPACE]             = 7,
        [RXRPC_ACK_PING_RESPONSE]       = 8,
 };
-
-const char rxrpc_ack_names[RXRPC_ACK__INVALID + 1][4] = {
-       "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY",
-       "IDL", "-?-"
-};
-
-const char rxrpc_skb_traces[rxrpc_skb__nr_trace][7] = {
-       [rxrpc_skb_rx_cleaned]          = "Rx CLN",
-       [rxrpc_skb_rx_freed]            = "Rx FRE",
-       [rxrpc_skb_rx_got]              = "Rx GOT",
-       [rxrpc_skb_rx_lost]             = "Rx *L*",
-       [rxrpc_skb_rx_received]         = "Rx RCV",
-       [rxrpc_skb_rx_purged]           = "Rx PUR",
-       [rxrpc_skb_rx_rotated]          = "Rx ROT",
-       [rxrpc_skb_rx_seen]             = "Rx SEE",
-       [rxrpc_skb_tx_cleaned]          = "Tx CLN",
-       [rxrpc_skb_tx_freed]            = "Tx FRE",
-       [rxrpc_skb_tx_got]              = "Tx GOT",
-       [rxrpc_skb_tx_new]              = "Tx NEW",
-       [rxrpc_skb_tx_rotated]          = "Tx ROT",
-       [rxrpc_skb_tx_seen]             = "Tx SEE",
-};
-
-const char rxrpc_conn_traces[rxrpc_conn__nr_trace][4] = {
-       [rxrpc_conn_new_client]         = "NWc",
-       [rxrpc_conn_new_service]        = "NWs",
-       [rxrpc_conn_queued]             = "QUE",
-       [rxrpc_conn_seen]               = "SEE",
-       [rxrpc_conn_got]                = "GOT",
-       [rxrpc_conn_put_client]         = "PTc",
-       [rxrpc_conn_put_service]        = "PTs",
-};
-
-const char rxrpc_client_traces[rxrpc_client__nr_trace][7] = {
-       [rxrpc_client_activate_chans]   = "Activa",
-       [rxrpc_client_alloc]            = "Alloc ",
-       [rxrpc_client_chan_activate]    = "ChActv",
-       [rxrpc_client_chan_disconnect]  = "ChDisc",
-       [rxrpc_client_chan_pass]        = "ChPass",
-       [rxrpc_client_chan_unstarted]   = "ChUnst",
-       [rxrpc_client_cleanup]          = "Clean ",
-       [rxrpc_client_count]            = "Count ",
-       [rxrpc_client_discard]          = "Discar",
-       [rxrpc_client_duplicate]        = "Duplic",
-       [rxrpc_client_exposed]          = "Expose",
-       [rxrpc_client_replace]          = "Replac",
-       [rxrpc_client_to_active]        = "->Actv",
-       [rxrpc_client_to_culled]        = "->Cull",
-       [rxrpc_client_to_idle]          = "->Idle",
-       [rxrpc_client_to_inactive]      = "->Inac",
-       [rxrpc_client_to_waiting]       = "->Wait",
-       [rxrpc_client_uncount]          = "Uncoun",
-};
-
-const char rxrpc_transmit_traces[rxrpc_transmit__nr_trace][4] = {
-       [rxrpc_transmit_wait]           = "WAI",
-       [rxrpc_transmit_queue]          = "QUE",
-       [rxrpc_transmit_queue_last]     = "QLS",
-       [rxrpc_transmit_rotate]         = "ROT",
-       [rxrpc_transmit_rotate_last]    = "RLS",
-       [rxrpc_transmit_await_reply]    = "AWR",
-       [rxrpc_transmit_end]            = "END",
-};
-
-const char rxrpc_receive_traces[rxrpc_receive__nr_trace][4] = {
-       [rxrpc_receive_incoming]        = "INC",
-       [rxrpc_receive_queue]           = "QUE",
-       [rxrpc_receive_queue_last]      = "QLS",
-       [rxrpc_receive_front]           = "FRN",
-       [rxrpc_receive_rotate]          = "ROT",
-       [rxrpc_receive_end]             = "END",
-};
-
-const char rxrpc_recvmsg_traces[rxrpc_recvmsg__nr_trace][5] = {
-       [rxrpc_recvmsg_enter]           = "ENTR",
-       [rxrpc_recvmsg_wait]            = "WAIT",
-       [rxrpc_recvmsg_dequeue]         = "DEQU",
-       [rxrpc_recvmsg_hole]            = "HOLE",
-       [rxrpc_recvmsg_next]            = "NEXT",
-       [rxrpc_recvmsg_cont]            = "CONT",
-       [rxrpc_recvmsg_full]            = "FULL",
-       [rxrpc_recvmsg_data_return]     = "DATA",
-       [rxrpc_recvmsg_terminal]        = "TERM",
-       [rxrpc_recvmsg_to_be_accepted]  = "TBAC",
-       [rxrpc_recvmsg_return]          = "RETN",
-};
-
-const char rxrpc_rtt_tx_traces[rxrpc_rtt_tx__nr_trace][5] = {
-       [rxrpc_rtt_tx_ping]             = "PING",
-       [rxrpc_rtt_tx_data]             = "DATA",
-};
-
-const char rxrpc_rtt_rx_traces[rxrpc_rtt_rx__nr_trace][5] = {
-       [rxrpc_rtt_rx_ping_response]    = "PONG",
-       [rxrpc_rtt_rx_requested_ack]    = "RACK",
-};
-
-const char rxrpc_timer_traces[rxrpc_timer__nr_trace][8] = {
-       [rxrpc_timer_begin]                     = "Begin ",
-       [rxrpc_timer_expired]                   = "*EXPR*",
-       [rxrpc_timer_init_for_reply]            = "IniRpl",
-       [rxrpc_timer_init_for_send_reply]       = "SndRpl",
-       [rxrpc_timer_set_for_ack]               = "SetAck",
-       [rxrpc_timer_set_for_ping]              = "SetPng",
-       [rxrpc_timer_set_for_send]              = "SetTx ",
-       [rxrpc_timer_set_for_resend]            = "SetRTx",
-};
-
-const char rxrpc_propose_ack_traces[rxrpc_propose_ack__nr_trace][8] = {
-       [rxrpc_propose_ack_client_tx_end]       = "ClTxEnd",
-       [rxrpc_propose_ack_input_data]          = "DataIn ",
-       [rxrpc_propose_ack_ping_for_lost_ack]   = "LostAck",
-       [rxrpc_propose_ack_ping_for_lost_reply] = "LostRpl",
-       [rxrpc_propose_ack_ping_for_params]     = "Params ",
-       [rxrpc_propose_ack_processing_op]       = "ProcOp ",
-       [rxrpc_propose_ack_respond_to_ack]      = "Rsp2Ack",
-       [rxrpc_propose_ack_respond_to_ping]     = "Rsp2Png",
-       [rxrpc_propose_ack_retry_tx]            = "RetryTx",
-       [rxrpc_propose_ack_rotate_rx]           = "RxAck  ",
-       [rxrpc_propose_ack_terminal_ack]        = "ClTerm ",
-};
-
-const char *const rxrpc_propose_ack_outcomes[rxrpc_propose_ack__nr_outcomes] = {
-       [rxrpc_propose_ack_use]                 = "",
-       [rxrpc_propose_ack_update]              = " Update",
-       [rxrpc_propose_ack_subsume]             = " Subsume",
-};
-
-const char rxrpc_congest_modes[NR__RXRPC_CONGEST_MODES][10] = {
-       [RXRPC_CALL_SLOW_START]         = "SlowStart",
-       [RXRPC_CALL_CONGEST_AVOIDANCE]  = "CongAvoid",
-       [RXRPC_CALL_PACKET_LOSS]        = "PktLoss  ",
-       [RXRPC_CALL_FAST_RETRANSMIT]    = "FastReTx ",
-};
-
-const char rxrpc_congest_changes[rxrpc_congest__nr_change][9] = {
-       [rxrpc_cong_begin_retransmission]       = " Retrans",
-       [rxrpc_cong_cleared_nacks]              = " Cleared",
-       [rxrpc_cong_new_low_nack]               = " NewLowN",
-       [rxrpc_cong_no_change]                  = "",
-       [rxrpc_cong_progress]                   = " Progres",
-       [rxrpc_cong_retransmit_again]           = " ReTxAgn",
-       [rxrpc_cong_rtt_window_end]             = " RttWinE",
-       [rxrpc_cong_saw_nack]                   = " SawNack",
-};
index 65cd980767fa9b8a9f1475d0a5433d44c960f30d..b9bcfbfb095c5f0fdb4e939c9873ff8a2f7fa667 100644 (file)
@@ -52,6 +52,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
        struct rxrpc_sock *rx;
        struct rxrpc_peer *peer;
        struct rxrpc_call *call;
+       rxrpc_seq_t tx_hard_ack, rx_hard_ack;
        char lbuff[50], rbuff[50];
 
        if (v == &rxrpc_calls) {
@@ -82,9 +83,11 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
        else
                strcpy(rbuff, "no_connection");
 
+       tx_hard_ack = READ_ONCE(call->tx_hard_ack);
+       rx_hard_ack = READ_ONCE(call->rx_hard_ack);
        seq_printf(seq,
                   "UDP   %-47.47s %-47.47s %4x %08x %08x %s %3u"
-                  " %-8.8s %08x %lx\n",
+                  " %-8.8s %08x %lx %08x %02x %08x %02x\n",
                   lbuff,
                   rbuff,
                   call->service_id,
@@ -94,7 +97,9 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
                   atomic_read(&call->usage),
                   rxrpc_call_states[call->state],
                   call->abort_code,
-                  call->user_call_ID);
+                  call->user_call_ID,
+                  tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack,
+                  rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack);
 
        return 0;
 }
index b214a4d4a64137923d6736aec9b0f7ce45e92142..0a6ef217aa8ada693f570ae03e9bede1e261e687 100644 (file)
@@ -376,7 +376,7 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg,
                if (!CMSG_OK(msg, cmsg))
                        return -EINVAL;
 
-               len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
+               len = cmsg->cmsg_len - sizeof(struct cmsghdr);
                _debug("CMSG %d, %d, %d",
                       cmsg->cmsg_level, cmsg->cmsg_type, len);
 
index 87956a768d1b7f829876ea64614d3a2ab8efe6ea..403790cce7d2324054aa48b49c70f7d4deff8a2a 100644 (file)
@@ -650,6 +650,18 @@ config NET_ACT_MIRRED
          To compile this code as a module, choose M here: the
          module will be called act_mirred.
 
+config NET_ACT_SAMPLE
+        tristate "Traffic Sampling"
+        depends on NET_CLS_ACT
+        select PSAMPLE
+        ---help---
+         Say Y here to allow packet sampling tc action. The packet sample
+         action consists of statistically choosing packets and sampling
+         them using the psample module.
+
+         To compile this code as a module, choose M here: the
+         module will be called act_sample.
+
 config NET_ACT_IPT
         tristate "IPtables targets"
         depends on NET_CLS_ACT && NETFILTER && IP_NF_IPTABLES
@@ -707,6 +719,7 @@ config NET_ACT_SKBEDIT
 config NET_ACT_CSUM
         tristate "Checksum Updating"
         depends on NET_CLS_ACT && INET
+        select LIBCRC32C
         ---help---
          Say Y here to update some common checksum after some direct
          packet alterations.
@@ -763,6 +776,7 @@ config NET_ACT_SKBMOD
 config NET_ACT_IFE
         tristate "Inter-FE action based on IETF ForCES InterFE LFB"
         depends on NET_CLS_ACT
+        select NET_IFE
         ---help---
          Say Y here to allow for sourcing and terminating metadata
          For details refer to netdev01 paper:
index 4bdda3634e0b9eb463ff403de440fcd6d881d249..7b915d226de77ab9991997a214bc1716cccf99c9 100644 (file)
@@ -10,6 +10,7 @@ obj-$(CONFIG_NET_CLS_ACT)     += act_api.o
 obj-$(CONFIG_NET_ACT_POLICE)   += act_police.o
 obj-$(CONFIG_NET_ACT_GACT)     += act_gact.o
 obj-$(CONFIG_NET_ACT_MIRRED)   += act_mirred.o
+obj-$(CONFIG_NET_ACT_SAMPLE)   += act_sample.o
 obj-$(CONFIG_NET_ACT_IPT)      += act_ipt.o
 obj-$(CONFIG_NET_ACT_NAT)      += act_nat.o
 obj-$(CONFIG_NET_ACT_PEDIT)    += act_pedit.o
index e10456ef6f7a43c1b1a3c153012805015c51e9f5..f219ff325ed4b816866e46a66c97c6f7724ec81f 100644 (file)
@@ -24,6 +24,7 @@
 #include <net/net_namespace.h>
 #include <net/sock.h>
 #include <net/sch_generic.h>
+#include <net/pkt_cls.h>
 #include <net/act_api.h>
 #include <net/netlink.h>
 
@@ -33,6 +34,12 @@ static void free_tcf(struct rcu_head *head)
 
        free_percpu(p->cpu_bstats);
        free_percpu(p->cpu_qstats);
+
+       if (p->act_cookie) {
+               kfree(p->act_cookie->data);
+               kfree(p->act_cookie);
+       }
+
        kfree(p);
 }
 
@@ -426,11 +433,9 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
 {
        int ret = -1, i;
 
-       if (skb->tc_verd & TC_NCLS) {
-               skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
-               ret = TC_ACT_OK;
-               goto exec_done;
-       }
+       if (skb_skip_tc_classify(skb))
+               return TC_ACT_OK;
+
        for (i = 0; i < nr_actions; i++) {
                const struct tc_action *a = actions[i];
 
@@ -439,9 +444,8 @@ repeat:
                if (ret == TC_ACT_REPEAT)
                        goto repeat;    /* we need a ttl - JHS */
                if (ret != TC_ACT_PIPE)
-                       goto exec_done;
+                       break;
        }
-exec_done:
        return ret;
 }
 EXPORT_SYMBOL(tcf_action_exec);
@@ -478,6 +482,12 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
                goto nla_put_failure;
        if (tcf_action_copy_stats(skb, a, 0))
                goto nla_put_failure;
+       if (a->act_cookie) {
+               if (nla_put(skb, TCA_ACT_COOKIE, a->act_cookie->len,
+                           a->act_cookie->data))
+                       goto nla_put_failure;
+       }
+
        nest = nla_nest_start(skb, TCA_OPTIONS);
        if (nest == NULL)
                goto nla_put_failure;
@@ -519,6 +529,22 @@ errout:
        return err;
 }
 
+static int nla_memdup_cookie(struct tc_action *a, struct nlattr **tb)
+{
+       a->act_cookie = kzalloc(sizeof(*a->act_cookie), GFP_KERNEL);
+       if (!a->act_cookie)
+               return -ENOMEM;
+
+       a->act_cookie->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
+       if (!a->act_cookie->data) {
+               kfree(a->act_cookie);
+               return -ENOMEM;
+       }
+       a->act_cookie->len = nla_len(tb[TCA_ACT_COOKIE]);
+
+       return 0;
+}
+
 struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
                                    struct nlattr *est, char *name, int ovr,
                                    int bind)
@@ -578,6 +604,22 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
        if (err < 0)
                goto err_mod;
 
+       if (tb[TCA_ACT_COOKIE]) {
+               int cklen = nla_len(tb[TCA_ACT_COOKIE]);
+
+               if (cklen > TC_COOKIE_MAX_SIZE) {
+                       err = -EINVAL;
+                       tcf_hash_release(a, bind);
+                       goto err_mod;
+               }
+
+               err = nla_memdup_cookie(a, tb);
+               if (err < 0) {
+                       tcf_hash_release(a, bind);
+                       goto err_mod;
+               }
+       }
+
        /* module count goes up only when brand new policy is created
         * if it exists and is only bound to in a_o->init() then
         * ACT_P_CREATED is not returned (a zero is).
index a0edd80a44db4ad09862ad8335340180839e83d3..e978ccd4402cbc68ba1c46e20909a047978df1c2 100644 (file)
@@ -30,6 +30,7 @@
 #include <net/tcp.h>
 #include <net/udp.h>
 #include <net/ip6_checksum.h>
+#include <net/sctp/checksum.h>
 
 #include <net/act_api.h>
 
@@ -322,6 +323,25 @@ ignore_obscure_skb:
        return 1;
 }
 
+static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
+                        unsigned int ipl)
+{
+       struct sctphdr *sctph;
+
+       if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_SCTP)
+               return 1;
+
+       sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
+       if (!sctph)
+               return 0;
+
+       sctph->checksum = sctp_compute_cksum(skb,
+                                            skb_network_offset(skb) + ihl);
+       skb->ip_summed = CHECKSUM_NONE;
+
+       return 1;
+}
+
 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
 {
        const struct iphdr *iph;
@@ -365,6 +385,11 @@ static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
                                               ntohs(iph->tot_len), 1))
                                goto fail;
                break;
+       case IPPROTO_SCTP:
+               if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
+                   !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
+                       goto fail;
+               break;
        }
 
        if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
@@ -481,6 +506,11 @@ static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
                                                       pl + sizeof(*ip6h), 1))
                                        goto fail;
                        goto done;
+               case IPPROTO_SCTP:
+                       if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
+                           !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
+                               goto fail;
+                       goto done;
                default:
                        goto ignore_skb;
                }
index 80b848d3f0964b4a9ff8c726df5731dc39631d19..71e7ff22f7c92a86cacad9a1b8d18d3d726f52fb 100644 (file)
@@ -32,6 +32,7 @@
 #include <uapi/linux/tc_act/tc_ife.h>
 #include <net/tc_act/tc_ife.h>
 #include <linux/etherdevice.h>
+#include <net/ife.h>
 
 #define IFE_TAB_MASK 15
 
@@ -46,23 +47,6 @@ static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = {
        [TCA_IFE_TYPE] = { .type = NLA_U16},
 };
 
-/* Caller takes care of presenting data in network order
-*/
-int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval)
-{
-       u32 *tlv = (u32 *)(skbdata);
-       u16 totlen = nla_total_size(dlen);      /*alignment + hdr */
-       char *dptr = (char *)tlv + NLA_HDRLEN;
-       u32 htlv = attrtype << 16 | (dlen + NLA_HDRLEN);
-
-       *tlv = htonl(htlv);
-       memset(dptr, 0, totlen - NLA_HDRLEN);
-       memcpy(dptr, dval, dlen);
-
-       return totlen;
-}
-EXPORT_SYMBOL_GPL(ife_tlv_meta_encode);
-
 int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi)
 {
        u16 edata = 0;
@@ -637,69 +621,59 @@ int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
        return 0;
 }
 
-struct ifeheadr {
-       __be16 metalen;
-       u8 tlv_data[];
-};
-
-struct meta_tlvhdr {
-       __be16 type;
-       __be16 len;
-};
-
 static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
                          struct tcf_result *res)
 {
        struct tcf_ife_info *ife = to_ife(a);
        int action = ife->tcf_action;
-       struct ifeheadr *ifehdr = (struct ifeheadr *)skb->data;
-       int ifehdrln = (int)ifehdr->metalen;
-       struct meta_tlvhdr *tlv = (struct meta_tlvhdr *)(ifehdr->tlv_data);
+       u8 *ifehdr_end;
+       u8 *tlv_data;
+       u16 metalen;
 
        spin_lock(&ife->tcf_lock);
        bstats_update(&ife->tcf_bstats, skb);
        tcf_lastuse_update(&ife->tcf_tm);
        spin_unlock(&ife->tcf_lock);
 
-       ifehdrln = ntohs(ifehdrln);
-       if (unlikely(!pskb_may_pull(skb, ifehdrln))) {
+       if (skb_at_tc_ingress(skb))
+               skb_push(skb, skb->dev->hard_header_len);
+
+       tlv_data = ife_decode(skb, &metalen);
+       if (unlikely(!tlv_data)) {
                spin_lock(&ife->tcf_lock);
                ife->tcf_qstats.drops++;
                spin_unlock(&ife->tcf_lock);
                return TC_ACT_SHOT;
        }
 
-       skb_set_mac_header(skb, ifehdrln);
-       __skb_pull(skb, ifehdrln);
-       skb->protocol = eth_type_trans(skb, skb->dev);
-       ifehdrln -= IFE_METAHDRLEN;
-
-       while (ifehdrln > 0) {
-               u8 *tlvdata = (u8 *)tlv;
-               u16 mtype = tlv->type;
-               u16 mlen = tlv->len;
-               u16 alen;
+       ifehdr_end = tlv_data + metalen;
+       for (; tlv_data < ifehdr_end; tlv_data = ife_tlv_meta_next(tlv_data)) {
+               u8 *curr_data;
+               u16 mtype;
+               u16 dlen;
 
-               mtype = ntohs(mtype);
-               mlen = ntohs(mlen);
-               alen = NLA_ALIGN(mlen);
+               curr_data = ife_tlv_meta_decode(tlv_data, &mtype, &dlen, NULL);
 
-               if (find_decode_metaid(skb, ife, mtype, (mlen - NLA_HDRLEN),
-                                      (void *)(tlvdata + NLA_HDRLEN))) {
+               if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
                        /* abuse overlimits to count when we receive metadata
                         * but dont have an ops for it
                         */
-                       pr_info_ratelimited("Unknown metaid %d alnlen %d\n",
-                                           mtype, mlen);
+                       pr_info_ratelimited("Unknown metaid %d dlen %d\n",
+                                           mtype, dlen);
                        ife->tcf_qstats.overlimits++;
                }
+       }
 
-               tlvdata += alen;
-               ifehdrln -= alen;
-               tlv = (struct meta_tlvhdr *)tlvdata;
+       if (WARN_ON(tlv_data != ifehdr_end)) {
+               spin_lock(&ife->tcf_lock);
+               ife->tcf_qstats.drops++;
+               spin_unlock(&ife->tcf_lock);
+               return TC_ACT_SHOT;
        }
 
+       skb->protocol = eth_type_trans(skb, skb->dev);
        skb_reset_network_header(skb);
+
        return action;
 }
 
@@ -727,7 +701,6 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
        struct tcf_ife_info *ife = to_ife(a);
        int action = ife->tcf_action;
        struct ethhdr *oethh;   /* outer ether header */
-       struct ethhdr *iethh;   /* inner eth header */
        struct tcf_meta_info *e;
        /*
           OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
@@ -735,13 +708,13 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
         */
        u16 metalen = ife_get_sz(skb, ife);
        int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
-       unsigned int skboff = skb->dev->hard_header_len;
-       u32 at = G_TC_AT(skb->tc_verd);
+       unsigned int skboff = 0;
        int new_len = skb->len + hdrm;
        bool exceed_mtu = false;
-       int err;
+       void *ife_meta;
+       int err = 0;
 
-       if (at & AT_EGRESS) {
+       if (!skb_at_tc_ingress(skb)) {
                if (new_len > skb->dev->mtu)
                        exceed_mtu = true;
        }
@@ -766,27 +739,10 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
                return TC_ACT_SHOT;
        }
 
-       err = skb_cow_head(skb, hdrm);
-       if (unlikely(err)) {
-               ife->tcf_qstats.drops++;
-               spin_unlock(&ife->tcf_lock);
-               return TC_ACT_SHOT;
-       }
-
-       if (!(at & AT_EGRESS))
+       if (skb_at_tc_ingress(skb))
                skb_push(skb, skb->dev->hard_header_len);
 
-       iethh = (struct ethhdr *)skb->data;
-       __skb_push(skb, hdrm);
-       memcpy(skb->data, iethh, skb->mac_len);
-       skb_reset_mac_header(skb);
-       oethh = eth_hdr(skb);
-
-       /*total metadata length */
-       metalen += IFE_METAHDRLEN;
-       metalen = htons(metalen);
-       memcpy((skb->data + skboff), &metalen, IFE_METAHDRLEN);
-       skboff += IFE_METAHDRLEN;
+       ife_meta = ife_encode(skb, metalen);
 
        /* XXX: we dont have a clever way of telling encode to
         * not repeat some of the computations that are done by
@@ -794,7 +750,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
         */
        list_for_each_entry(e, &ife->metalist, metalist) {
                if (e->ops->encode) {
-                       err = e->ops->encode(skb, (void *)(skb->data + skboff),
+                       err = e->ops->encode(skb, (void *)(ife_meta + skboff),
                                             e);
                }
                if (err < 0) {
@@ -805,18 +761,15 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
                }
                skboff += err;
        }
+       oethh = (struct ethhdr *)skb->data;
 
        if (!is_zero_ether_addr(ife->eth_src))
                ether_addr_copy(oethh->h_source, ife->eth_src);
-       else
-               ether_addr_copy(oethh->h_source, iethh->h_source);
        if (!is_zero_ether_addr(ife->eth_dst))
                ether_addr_copy(oethh->h_dest, ife->eth_dst);
-       else
-               ether_addr_copy(oethh->h_dest, iethh->h_dest);
        oethh->h_proto = htons(ife->eth_type);
 
-       if (!(at & AT_EGRESS))
+       if (skb_at_tc_ingress(skb))
                skb_pull(skb, skb->dev->hard_header_len);
 
        spin_unlock(&ife->tcf_lock);
index 2d9fa6e0a1b4a7320a44799a525013273f5cf9eb..af49c7dca8608cebbaa0224976ae7df104e5526f 100644 (file)
@@ -28,8 +28,6 @@
 #include <linux/tc_act/tc_mirred.h>
 #include <net/tc_act/tc_mirred.h>
 
-#include <linux/if_arp.h>
-
 #define MIRRED_TAB_MASK     7
 static LIST_HEAD(mirred_list);
 static DEFINE_SPINLOCK(mirred_list_lock);
@@ -39,15 +37,15 @@ static bool tcf_mirred_is_act_redirect(int action)
        return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
 }
 
-static u32 tcf_mirred_act_direction(int action)
+static bool tcf_mirred_act_wants_ingress(int action)
 {
        switch (action) {
        case TCA_EGRESS_REDIR:
        case TCA_EGRESS_MIRROR:
-               return AT_EGRESS;
+               return false;
        case TCA_INGRESS_REDIR:
        case TCA_INGRESS_MIRROR:
-               return AT_INGRESS;
+               return true;
        default:
                BUG();
        }
@@ -170,7 +168,6 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
        int retval, err = 0;
        int m_eaction;
        int mac_len;
-       u32 at;
 
        tcf_lastuse_update(&m->tcf_tm);
        bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
@@ -191,7 +188,6 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
                goto out;
        }
 
-       at = G_TC_AT(skb->tc_verd);
        skb2 = skb_clone(skb, GFP_ATOMIC);
        if (!skb2)
                goto out;
@@ -200,8 +196,9 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
         * and devices expect a mac header on xmit, then mac push/pull is
         * needed.
         */
-       if (at != tcf_mirred_act_direction(m_eaction) && m_mac_header_xmit) {
-               if (at & AT_EGRESS) {
+       if (skb_at_tc_ingress(skb) != tcf_mirred_act_wants_ingress(m_eaction) &&
+           m_mac_header_xmit) {
+               if (!skb_at_tc_ingress(skb)) {
                        /* caught at egress, act ingress: pull mac */
                        mac_len = skb_network_header(skb) - skb_mac_header(skb);
                        skb_pull_rcsum(skb2, mac_len);
@@ -212,12 +209,14 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
        }
 
        /* mirror is always swallowed */
-       if (tcf_mirred_is_act_redirect(m_eaction))
-               skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at);
+       if (tcf_mirred_is_act_redirect(m_eaction)) {
+               skb2->tc_redirected = 1;
+               skb2->tc_from_ingress = skb2->tc_at_ingress;
+       }
 
        skb2->skb_iif = skb->dev->ifindex;
        skb2->dev = dev;
-       if (tcf_mirred_act_direction(m_eaction) & AT_EGRESS)
+       if (!tcf_mirred_act_wants_ingress(m_eaction))
                err = dev_queue_xmit(skb2);
        else
                err = netif_receive_skb(skb2);
index b27c4daec88fbebaf28a2805dd5f0e1445bbdf7a..c1310472f620fd44b9ae8a6cfeefef5419cad54e 100644 (file)
@@ -22,6 +22,7 @@
 #include <net/pkt_sched.h>
 #include <linux/tc_act/tc_pedit.h>
 #include <net/tc_act/tc_pedit.h>
+#include <uapi/linux/tc_act/tc_pedit.h>
 
 #define PEDIT_TAB_MASK 15
 
@@ -30,18 +31,117 @@ static struct tc_action_ops act_pedit_ops;
 
 static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = {
        [TCA_PEDIT_PARMS]       = { .len = sizeof(struct tc_pedit) },
+       [TCA_PEDIT_KEYS_EX]   = { .type = NLA_NESTED },
 };
 
+static const struct nla_policy pedit_key_ex_policy[TCA_PEDIT_KEY_EX_MAX + 1] = {
+       [TCA_PEDIT_KEY_EX_HTYPE]  = { .type = NLA_U16 },
+       [TCA_PEDIT_KEY_EX_CMD]    = { .type = NLA_U16 },
+};
+
+static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla,
+                                                       u8 n)
+{
+       struct tcf_pedit_key_ex *keys_ex;
+       struct tcf_pedit_key_ex *k;
+       const struct nlattr *ka;
+       int err = -EINVAL;
+       int rem;
+
+       if (!nla || !n)
+               return NULL;
+
+       keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL);
+       if (!keys_ex)
+               return ERR_PTR(-ENOMEM);
+
+       k = keys_ex;
+
+       nla_for_each_nested(ka, nla, rem) {
+               struct nlattr *tb[TCA_PEDIT_KEY_EX_MAX + 1];
+
+               if (!n) {
+                       err = -EINVAL;
+                       goto err_out;
+               }
+               n--;
+
+               if (nla_type(ka) != TCA_PEDIT_KEY_EX) {
+                       err = -EINVAL;
+                       goto err_out;
+               }
+
+               err = nla_parse_nested(tb, TCA_PEDIT_KEY_EX_MAX, ka,
+                                      pedit_key_ex_policy);
+               if (err)
+                       goto err_out;
+
+               if (!tb[TCA_PEDIT_KEY_EX_HTYPE] ||
+                   !tb[TCA_PEDIT_KEY_EX_CMD]) {
+                       err = -EINVAL;
+                       goto err_out;
+               }
+
+               k->htype = nla_get_u16(tb[TCA_PEDIT_KEY_EX_HTYPE]);
+               k->cmd = nla_get_u16(tb[TCA_PEDIT_KEY_EX_CMD]);
+
+               if (k->htype > TCA_PEDIT_HDR_TYPE_MAX ||
+                   k->cmd > TCA_PEDIT_CMD_MAX) {
+                       err = -EINVAL;
+                       goto err_out;
+               }
+
+               k++;
+       }
+
+       if (n)
+               goto err_out;
+
+       return keys_ex;
+
+err_out:
+       kfree(keys_ex);
+       return ERR_PTR(err);
+}
+
+static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
+                                struct tcf_pedit_key_ex *keys_ex, int n)
+{
+       struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX);
+
+       for (; n > 0; n--) {
+               struct nlattr *key_start;
+
+               key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX);
+
+               if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) ||
+                   nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) {
+                       nlmsg_trim(skb, keys_start);
+                       return -EINVAL;
+               }
+
+               nla_nest_end(skb, key_start);
+
+               keys_ex++;
+       }
+
+       nla_nest_end(skb, keys_start);
+
+       return 0;
+}
+
 static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                          struct nlattr *est, struct tc_action **a,
                          int ovr, int bind)
 {
        struct tc_action_net *tn = net_generic(net, pedit_net_id);
        struct nlattr *tb[TCA_PEDIT_MAX + 1];
+       struct nlattr *pattr;
        struct tc_pedit *parm;
        int ret = 0, err;
        struct tcf_pedit *p;
        struct tc_pedit_key *keys = NULL;
+       struct tcf_pedit_key_ex *keys_ex;
        int ksize;
 
        if (nla == NULL)
@@ -51,13 +151,21 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
        if (err < 0)
                return err;
 
-       if (tb[TCA_PEDIT_PARMS] == NULL)
+       pattr = tb[TCA_PEDIT_PARMS];
+       if (!pattr)
+               pattr = tb[TCA_PEDIT_PARMS_EX];
+       if (!pattr)
                return -EINVAL;
-       parm = nla_data(tb[TCA_PEDIT_PARMS]);
+
+       parm = nla_data(pattr);
        ksize = parm->nkeys * sizeof(struct tc_pedit_key);
-       if (nla_len(tb[TCA_PEDIT_PARMS]) < sizeof(*parm) + ksize)
+       if (nla_len(pattr) < sizeof(*parm) + ksize)
                return -EINVAL;
 
+       keys_ex = tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys);
+       if (IS_ERR(keys_ex))
+               return PTR_ERR(keys_ex);
+
        if (!tcf_hash_check(tn, parm->index, a, bind)) {
                if (!parm->nkeys)
                        return -EINVAL;
@@ -69,6 +177,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                keys = kmalloc(ksize, GFP_KERNEL);
                if (keys == NULL) {
                        tcf_hash_cleanup(*a, est);
+                       kfree(keys_ex);
                        return -ENOMEM;
                }
                ret = ACT_P_CREATED;
@@ -81,8 +190,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                p = to_pedit(*a);
                if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
                        keys = kmalloc(ksize, GFP_KERNEL);
-                       if (keys == NULL)
+                       if (!keys) {
+                               kfree(keys_ex);
                                return -ENOMEM;
+                       }
                }
        }
 
@@ -95,6 +206,10 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                p->tcfp_nkeys = parm->nkeys;
        }
        memcpy(p->tcfp_keys, parm->keys, ksize);
+
+       kfree(p->tcfp_keys_ex);
+       p->tcfp_keys_ex = keys_ex;
+
        spin_unlock_bh(&p->tcf_lock);
        if (ret == ACT_P_CREATED)
                tcf_hash_insert(tn, *a);
@@ -106,6 +221,7 @@ static void tcf_pedit_cleanup(struct tc_action *a, int bind)
        struct tcf_pedit *p = to_pedit(a);
        struct tc_pedit_key *keys = p->tcfp_keys;
        kfree(keys);
+       kfree(p->tcfp_keys_ex);
 }
 
 static bool offset_valid(struct sk_buff *skb, int offset)
@@ -119,38 +235,88 @@ static bool offset_valid(struct sk_buff *skb, int offset)
        return true;
 }
 
+static int pedit_skb_hdr_offset(struct sk_buff *skb,
+                               enum pedit_header_type htype, int *hoffset)
+{
+       int ret = -EINVAL;
+
+       switch (htype) {
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+               if (skb_mac_header_was_set(skb)) {
+                       *hoffset = skb_mac_offset(skb);
+                       ret = 0;
+               }
+               break;
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK:
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+               *hoffset = skb_network_offset(skb);
+               ret = 0;
+               break;
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+       case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+               if (skb_transport_header_was_set(skb)) {
+                       *hoffset = skb_transport_offset(skb);
+                       ret = 0;
+               }
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       };
+
+       return ret;
+}
+
 static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
                     struct tcf_result *res)
 {
        struct tcf_pedit *p = to_pedit(a);
        int i;
-       unsigned int off;
 
        if (skb_unclone(skb, GFP_ATOMIC))
                return p->tcf_action;
 
-       off = skb_network_offset(skb);
-
        spin_lock(&p->tcf_lock);
 
        tcf_lastuse_update(&p->tcf_tm);
 
        if (p->tcfp_nkeys > 0) {
                struct tc_pedit_key *tkey = p->tcfp_keys;
+               struct tcf_pedit_key_ex *tkey_ex = p->tcfp_keys_ex;
+               enum pedit_header_type htype = TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+               enum pedit_cmd cmd = TCA_PEDIT_KEY_EX_CMD_SET;
 
                for (i = p->tcfp_nkeys; i > 0; i--, tkey++) {
                        u32 *ptr, _data;
                        int offset = tkey->off;
+                       int hoffset;
+                       u32 val;
+                       int rc;
+
+                       if (tkey_ex) {
+                               htype = tkey_ex->htype;
+                               cmd = tkey_ex->cmd;
+
+                               tkey_ex++;
+                       }
+
+                       rc = pedit_skb_hdr_offset(skb, htype, &hoffset);
+                       if (rc) {
+                               pr_info("tc filter pedit bad header type specified (0x%x)\n",
+                                       htype);
+                               goto bad;
+                       }
 
                        if (tkey->offmask) {
                                char *d, _d;
 
-                               if (!offset_valid(skb, off + tkey->at)) {
+                               if (!offset_valid(skb, hoffset + tkey->at)) {
                                        pr_info("tc filter pedit 'at' offset %d out of bounds\n",
-                                               off + tkey->at);
+                                               hoffset + tkey->at);
                                        goto bad;
                                }
-                               d = skb_header_pointer(skb, off + tkey->at, 1,
+                               d = skb_header_pointer(skb, hoffset + tkey->at, 1,
                                                       &_d);
                                if (!d)
                                        goto bad;
@@ -163,19 +329,32 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
                                goto bad;
                        }
 
-                       if (!offset_valid(skb, off + offset)) {
+                       if (!offset_valid(skb, hoffset + offset)) {
                                pr_info("tc filter pedit offset %d out of bounds\n",
-                                       offset);
+                                       hoffset + offset);
                                goto bad;
                        }
 
-                       ptr = skb_header_pointer(skb, off + offset, 4, &_data);
+                       ptr = skb_header_pointer(skb, hoffset + offset, 4, &_data);
                        if (!ptr)
                                goto bad;
                        /* just do it, baby */
-                       *ptr = ((*ptr & tkey->mask) ^ tkey->val);
+                       switch (cmd) {
+                       case TCA_PEDIT_KEY_EX_CMD_SET:
+                               val = tkey->val;
+                               break;
+                       case TCA_PEDIT_KEY_EX_CMD_ADD:
+                               val = (*ptr + tkey->val) & ~tkey->mask;
+                               break;
+                       default:
+                               pr_info("tc filter pedit bad command (%d)\n",
+                                       cmd);
+                               goto bad;
+                       }
+
+                       *ptr = ((*ptr & tkey->mask) ^ val);
                        if (ptr == &_data)
-                               skb_store_bits(skb, off + offset, ptr, 4);
+                               skb_store_bits(skb, hoffset + offset, ptr, 4);
                }
 
                goto done;
@@ -215,8 +394,15 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
        opt->refcnt = p->tcf_refcnt - ref;
        opt->bindcnt = p->tcf_bindcnt - bind;
 
-       if (nla_put(skb, TCA_PEDIT_PARMS, s, opt))
-               goto nla_put_failure;
+       if (p->tcfp_keys_ex) {
+               tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys);
+
+               if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt))
+                       goto nla_put_failure;
+       } else {
+               if (nla_put(skb, TCA_PEDIT_PARMS, s, opt))
+                       goto nla_put_failure;
+       }
 
        tcf_tm_dump(&t, &p->tcf_tm);
        if (nla_put_64bit(skb, TCA_PEDIT_TM, sizeof(t), &t, TCA_PEDIT_PAD))
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
new file mode 100644 (file)
index 0000000..0b8217b
--- /dev/null
@@ -0,0 +1,276 @@
+/*
+ * net/sched/act_sample.c - Packet sampling tc action
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <net/net_namespace.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <linux/tc_act/tc_sample.h>
+#include <net/tc_act/tc_sample.h>
+#include <net/psample.h>
+
+#include <linux/if_arp.h>
+
+#define SAMPLE_TAB_MASK     7
+static unsigned int sample_net_id;
+static struct tc_action_ops act_sample_ops;
+
+static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = {
+       [TCA_SAMPLE_PARMS]              = { .len = sizeof(struct tc_sample) },
+       [TCA_SAMPLE_RATE]               = { .type = NLA_U32 },
+       [TCA_SAMPLE_TRUNC_SIZE]         = { .type = NLA_U32 },
+       [TCA_SAMPLE_PSAMPLE_GROUP]      = { .type = NLA_U32 },
+};
+
+static int tcf_sample_init(struct net *net, struct nlattr *nla,
+                          struct nlattr *est, struct tc_action **a, int ovr,
+                          int bind)
+{
+       struct tc_action_net *tn = net_generic(net, sample_net_id);
+       struct nlattr *tb[TCA_SAMPLE_MAX + 1];
+       struct psample_group *psample_group;
+       struct tc_sample *parm;
+       struct tcf_sample *s;
+       bool exists = false;
+       int ret;
+
+       if (!nla)
+               return -EINVAL;
+       ret = nla_parse_nested(tb, TCA_SAMPLE_MAX, nla, sample_policy);
+       if (ret < 0)
+               return ret;
+       if (!tb[TCA_SAMPLE_PARMS] || !tb[TCA_SAMPLE_RATE] ||
+           !tb[TCA_SAMPLE_PSAMPLE_GROUP])
+               return -EINVAL;
+
+       parm = nla_data(tb[TCA_SAMPLE_PARMS]);
+
+       exists = tcf_hash_check(tn, parm->index, a, bind);
+       if (exists && bind)
+               return 0;
+
+       if (!exists) {
+               ret = tcf_hash_create(tn, parm->index, est, a,
+                                     &act_sample_ops, bind, false);
+               if (ret)
+                       return ret;
+               ret = ACT_P_CREATED;
+       } else {
+               tcf_hash_release(*a, bind);
+               if (!ovr)
+                       return -EEXIST;
+       }
+       s = to_sample(*a);
+
+       s->tcf_action = parm->action;
+       s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
+       s->psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
+       psample_group = psample_group_get(net, s->psample_group_num);
+       if (!psample_group) {
+               if (ret == ACT_P_CREATED)
+                       tcf_hash_release(*a, bind);
+               return -ENOMEM;
+       }
+       RCU_INIT_POINTER(s->psample_group, psample_group);
+
+       if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
+               s->truncate = true;
+               s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
+       }
+
+       if (ret == ACT_P_CREATED)
+               tcf_hash_insert(tn, *a);
+       return ret;
+}
+
+static void tcf_sample_cleanup_rcu(struct rcu_head *rcu)
+{
+       struct tcf_sample *s = container_of(rcu, struct tcf_sample, rcu);
+       struct psample_group *psample_group;
+
+       psample_group = rcu_dereference_protected(s->psample_group, 1);
+       RCU_INIT_POINTER(s->psample_group, NULL);
+       psample_group_put(psample_group);
+}
+
+static void tcf_sample_cleanup(struct tc_action *a, int bind)
+{
+       struct tcf_sample *s = to_sample(a);
+
+       call_rcu(&s->rcu, tcf_sample_cleanup_rcu);
+}
+
+static bool tcf_sample_dev_ok_push(struct net_device *dev)
+{
+       switch (dev->type) {
+       case ARPHRD_TUNNEL:
+       case ARPHRD_TUNNEL6:
+       case ARPHRD_SIT:
+       case ARPHRD_IPGRE:
+       case ARPHRD_VOID:
+       case ARPHRD_NONE:
+               return false;
+       default:
+               return true;
+       }
+}
+
+static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
+                         struct tcf_result *res)
+{
+       struct tcf_sample *s = to_sample(a);
+       struct psample_group *psample_group;
+       int retval;
+       int size;
+       int iif;
+       int oif;
+
+       tcf_lastuse_update(&s->tcf_tm);
+       bstats_cpu_update(this_cpu_ptr(s->common.cpu_bstats), skb);
+       retval = READ_ONCE(s->tcf_action);
+
+       rcu_read_lock();
+       psample_group = rcu_dereference(s->psample_group);
+
+       /* randomly sample packets according to rate */
+       if (psample_group && (prandom_u32() % s->rate == 0)) {
+               if (!skb_at_tc_ingress(skb)) {
+                       iif = skb->skb_iif;
+                       oif = skb->dev->ifindex;
+               } else {
+                       iif = skb->dev->ifindex;
+                       oif = 0;
+               }
+
+               /* on ingress, the mac header gets popped, so push it back */
+               if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev))
+                       skb_push(skb, skb->mac_len);
+
+               size = s->truncate ? s->trunc_size : skb->len;
+               psample_sample_packet(psample_group, skb, size, iif, oif,
+                                     s->rate);
+
+               if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev))
+                       skb_pull(skb, skb->mac_len);
+       }
+
+       rcu_read_unlock();
+       return retval;
+}
+
+static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a,
+                          int bind, int ref)
+{
+       unsigned char *b = skb_tail_pointer(skb);
+       struct tcf_sample *s = to_sample(a);
+       struct tc_sample opt = {
+               .index      = s->tcf_index,
+               .action     = s->tcf_action,
+               .refcnt     = s->tcf_refcnt - ref,
+               .bindcnt    = s->tcf_bindcnt - bind,
+       };
+       struct tcf_t t;
+
+       if (nla_put(skb, TCA_SAMPLE_PARMS, sizeof(opt), &opt))
+               goto nla_put_failure;
+
+       tcf_tm_dump(&t, &s->tcf_tm);
+       if (nla_put_64bit(skb, TCA_SAMPLE_TM, sizeof(t), &t, TCA_SAMPLE_PAD))
+               goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_SAMPLE_RATE, s->rate))
+               goto nla_put_failure;
+
+       if (s->truncate)
+               if (nla_put_u32(skb, TCA_SAMPLE_TRUNC_SIZE, s->trunc_size))
+                       goto nla_put_failure;
+
+       if (nla_put_u32(skb, TCA_SAMPLE_PSAMPLE_GROUP, s->psample_group_num))
+               goto nla_put_failure;
+       return skb->len;
+
+nla_put_failure:
+       nlmsg_trim(skb, b);
+       return -1;
+}
+
+static int tcf_sample_walker(struct net *net, struct sk_buff *skb,
+                            struct netlink_callback *cb, int type,
+                            const struct tc_action_ops *ops)
+{
+       struct tc_action_net *tn = net_generic(net, sample_net_id);
+
+       return tcf_generic_walker(tn, skb, cb, type, ops);
+}
+
+static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index)
+{
+       struct tc_action_net *tn = net_generic(net, sample_net_id);
+
+       return tcf_hash_search(tn, a, index);
+}
+
+static struct tc_action_ops act_sample_ops = {
+       .kind     = "sample",
+       .type     = TCA_ACT_SAMPLE,
+       .owner    = THIS_MODULE,
+       .act      = tcf_sample_act,
+       .dump     = tcf_sample_dump,
+       .init     = tcf_sample_init,
+       .cleanup  = tcf_sample_cleanup,
+       .walk     = tcf_sample_walker,
+       .lookup   = tcf_sample_search,
+       .size     = sizeof(struct tcf_sample),
+};
+
+static __net_init int sample_init_net(struct net *net)
+{
+       struct tc_action_net *tn = net_generic(net, sample_net_id);
+
+       return tc_action_net_init(tn, &act_sample_ops, SAMPLE_TAB_MASK);
+}
+
+static void __net_exit sample_exit_net(struct net *net)
+{
+       struct tc_action_net *tn = net_generic(net, sample_net_id);
+
+       tc_action_net_exit(tn);
+}
+
+static struct pernet_operations sample_net_ops = {
+       .init = sample_init_net,
+       .exit = sample_exit_net,
+       .id   = &sample_net_id,
+       .size = sizeof(struct tc_action_net),
+};
+
+static int __init sample_init_module(void)
+{
+       return tcf_register_action(&act_sample_ops, &sample_net_ops);
+}
+
+static void __exit sample_cleanup_module(void)
+{
+       tcf_unregister_action(&act_sample_ops, &sample_net_ops);
+}
+
+module_init(sample_init_module);
+module_exit(sample_cleanup_module);
+
+MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>");
+MODULE_DESCRIPTION("Packet sampling action");
+MODULE_LICENSE("GPL v2");
index 1ecdf809b5fa8913d56bb4194ac193274d4d625a..732f7cae459d4656aa8d69020f44a66abb501f34 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/errno.h>
+#include <linux/err.h>
 #include <linux/skbuff.h>
 #include <linux/init.h>
 #include <linux/kmod.h>
@@ -38,14 +39,14 @@ static DEFINE_RWLOCK(cls_mod_lock);
 
 /* Find classifier type by string name */
 
-static const struct tcf_proto_ops *tcf_proto_lookup_ops(struct nlattr *kind)
+static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind)
 {
        const struct tcf_proto_ops *t, *res = NULL;
 
        if (kind) {
                read_lock(&cls_mod_lock);
                list_for_each_entry(t, &tcf_proto_base, head) {
-                       if (nla_strcmp(kind, t->kind) == 0) {
+                       if (strcmp(kind, t->kind) == 0) {
                                if (try_module_get(t->owner))
                                        res = t;
                                break;
@@ -127,6 +128,77 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
        return first;
 }
 
+static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
+                                         u32 prio, u32 parent, struct Qdisc *q)
+{
+       struct tcf_proto *tp;
+       int err;
+
+       tp = kzalloc(sizeof(*tp), GFP_KERNEL);
+       if (!tp)
+               return ERR_PTR(-ENOBUFS);
+
+       err = -ENOENT;
+       tp->ops = tcf_proto_lookup_ops(kind);
+       if (!tp->ops) {
+#ifdef CONFIG_MODULES
+               rtnl_unlock();
+               request_module("cls_%s", kind);
+               rtnl_lock();
+               tp->ops = tcf_proto_lookup_ops(kind);
+               /* We dropped the RTNL semaphore in order to perform
+                * the module load. So, even if we succeeded in loading
+                * the module we have to replay the request. We indicate
+                * this using -EAGAIN.
+                */
+               if (tp->ops) {
+                       module_put(tp->ops->owner);
+                       err = -EAGAIN;
+               } else {
+                       err = -ENOENT;
+               }
+               goto errout;
+#endif
+       }
+       tp->classify = tp->ops->classify;
+       tp->protocol = protocol;
+       tp->prio = prio;
+       tp->classid = parent;
+       tp->q = q;
+
+       err = tp->ops->init(tp);
+       if (err) {
+               module_put(tp->ops->owner);
+               goto errout;
+       }
+       return tp;
+
+errout:
+       kfree(tp);
+       return ERR_PTR(err);
+}
+
+static bool tcf_proto_destroy(struct tcf_proto *tp, bool force)
+{
+       if (tp->ops->destroy(tp, force)) {
+               module_put(tp->ops->owner);
+               kfree_rcu(tp, rcu);
+               return true;
+       }
+       return false;
+}
+
+void tcf_destroy_chain(struct tcf_proto __rcu **fl)
+{
+       struct tcf_proto *tp;
+
+       while ((tp = rtnl_dereference(*fl)) != NULL) {
+               RCU_INIT_POINTER(*fl, tp->next);
+               tcf_proto_destroy(tp, true);
+       }
+}
+EXPORT_SYMBOL(tcf_destroy_chain);
+
 /* Add/change/delete/get a filter node */
 
 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
@@ -142,8 +214,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
        struct Qdisc  *q;
        struct tcf_proto __rcu **back;
        struct tcf_proto __rcu **chain;
+       struct tcf_proto *next;
        struct tcf_proto *tp;
-       const struct tcf_proto_ops *tp_ops;
        const struct Qdisc_class_ops *cops;
        unsigned long cl;
        unsigned long fh;
@@ -222,9 +294,10 @@ replay:
 
        /* And the last stroke */
        chain = cops->tcf_chain(q, cl);
-       err = -EINVAL;
-       if (chain == NULL)
+       if (chain == NULL) {
+               err = -EINVAL;
                goto errout;
+       }
        if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
                tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER);
                tcf_destroy_chain(chain);
@@ -239,10 +312,13 @@ replay:
                if (tp->prio >= prio) {
                        if (tp->prio == prio) {
                                if (!nprio ||
-                                   (tp->protocol != protocol && protocol))
+                                   (tp->protocol != protocol && protocol)) {
+                                       err = -EINVAL;
                                        goto errout;
-                       } else
+                               }
+                       } else {
                                tp = NULL;
+                       }
                        break;
                }
        }
@@ -250,109 +326,69 @@ replay:
        if (tp == NULL) {
                /* Proto-tcf does not exist, create new one */
 
-               if (tca[TCA_KIND] == NULL || !protocol)
+               if (tca[TCA_KIND] == NULL || !protocol) {
+                       err = -EINVAL;
                        goto errout;
+               }
 
-               err = -ENOENT;
                if (n->nlmsg_type != RTM_NEWTFILTER ||
-                   !(n->nlmsg_flags & NLM_F_CREATE))
+                   !(n->nlmsg_flags & NLM_F_CREATE)) {
+                       err = -ENOENT;
                        goto errout;
+               }
 
+               if (!nprio)
+                       nprio = TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back)));
 
-               /* Create new proto tcf */
-
-               err = -ENOBUFS;
-               tp = kzalloc(sizeof(*tp), GFP_KERNEL);
-               if (tp == NULL)
-                       goto errout;
-               err = -ENOENT;
-               tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND]);
-               if (tp_ops == NULL) {
-#ifdef CONFIG_MODULES
-                       struct nlattr *kind = tca[TCA_KIND];
-                       char name[IFNAMSIZ];
-
-                       if (kind != NULL &&
-                           nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
-                               rtnl_unlock();
-                               request_module("cls_%s", name);
-                               rtnl_lock();
-                               tp_ops = tcf_proto_lookup_ops(kind);
-                               /* We dropped the RTNL semaphore in order to
-                                * perform the module load.  So, even if we
-                                * succeeded in loading the module we have to
-                                * replay the request.  We indicate this using
-                                * -EAGAIN.
-                                */
-                               if (tp_ops != NULL) {
-                                       module_put(tp_ops->owner);
-                                       err = -EAGAIN;
-                               }
-                       }
-#endif
-                       kfree(tp);
-                       goto errout;
-               }
-               tp->ops = tp_ops;
-               tp->protocol = protocol;
-               tp->prio = nprio ? :
-                              TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back)));
-               tp->q = q;
-               tp->classify = tp_ops->classify;
-               tp->classid = parent;
-
-               err = tp_ops->init(tp);
-               if (err != 0) {
-                       module_put(tp_ops->owner);
-                       kfree(tp);
+               tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
+                                     protocol, nprio, parent, q);
+               if (IS_ERR(tp)) {
+                       err = PTR_ERR(tp);
                        goto errout;
                }
-
                tp_created = 1;
-
-       } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind))
+       } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
+               err = -EINVAL;
                goto errout;
+       }
 
        fh = tp->ops->get(tp, t->tcm_handle);
 
        if (fh == 0) {
                if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
-                       struct tcf_proto *next = rtnl_dereference(tp->next);
-
+                       next = rtnl_dereference(tp->next);
                        RCU_INIT_POINTER(*back, next);
-
                        tfilter_notify(net, skb, n, tp, fh,
                                       RTM_DELTFILTER, false);
-                       tcf_destroy(tp, true);
+                       tcf_proto_destroy(tp, true);
                        err = 0;
                        goto errout;
                }
 
-               err = -ENOENT;
                if (n->nlmsg_type != RTM_NEWTFILTER ||
-                   !(n->nlmsg_flags & NLM_F_CREATE))
+                   !(n->nlmsg_flags & NLM_F_CREATE)) {
+                       err = -ENOENT;
                        goto errout;
+               }
        } else {
                switch (n->nlmsg_type) {
                case RTM_NEWTFILTER:
-                       err = -EEXIST;
                        if (n->nlmsg_flags & NLM_F_EXCL) {
                                if (tp_created)
-                                       tcf_destroy(tp, true);
+                                       tcf_proto_destroy(tp, true);
+                               err = -EEXIST;
                                goto errout;
                        }
                        break;
                case RTM_DELTFILTER:
                        err = tp->ops->delete(tp, fh);
-                       if (err == 0) {
-                               struct tcf_proto *next = rtnl_dereference(tp->next);
-
-                               tfilter_notify(net, skb, n, tp,
-                                              t->tcm_handle,
-                                              RTM_DELTFILTER, false);
-                               if (tcf_destroy(tp, false))
-                                       RCU_INIT_POINTER(*back, next);
-                       }
+                       if (err)
+                               goto errout;
+                       next = rtnl_dereference(tp->next);
+                       tfilter_notify(net, skb, n, tp, t->tcm_handle,
+                                      RTM_DELTFILTER, false);
+                       if (tcf_proto_destroy(tp, false))
+                               RCU_INIT_POINTER(*back, next);
                        goto errout;
                case RTM_GETTFILTER:
                        err = tfilter_notify(net, skb, n, tp, fh,
@@ -374,7 +410,7 @@ replay:
                tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false);
        } else {
                if (tp_created)
-                       tcf_destroy(tp, true);
+                       tcf_proto_destroy(tp, true);
        }
 
 errout:
index d9c97018317dd7e26b6c99dfa83b8d6daeca6d19..80f688436dd70bae84f0c2fffd3d16c4b1d2c4da 100644 (file)
@@ -148,6 +148,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
        struct net_device *dev = tp->q->dev_queue->dev;
        struct tc_cls_bpf_offload bpf_offload = {};
        struct tc_to_netdev offload;
+       int err;
 
        offload.type = TC_SETUP_CLSBPF;
        offload.cls_bpf = &bpf_offload;
@@ -159,8 +160,13 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
        bpf_offload.exts_integrated = prog->exts_integrated;
        bpf_offload.gen_flags = prog->gen_flags;
 
-       return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
-                                            tp->protocol, &offload);
+       err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
+                                           tp->protocol, &offload);
+
+       if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE))
+               prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
+
+       return err;
 }
 
 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
@@ -511,6 +517,9 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
                return ret;
        }
 
+       if (!tc_in_hw(prog->gen_flags))
+               prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+
        if (oldprog) {
                list_replace_rcu(&oldprog->link, &prog->link);
                tcf_unbind_filter(tp, &oldprog->res);
index 6575aba87630a24052a6374ddfceef1ce597144d..3d6b9286c203f298b14b5254e5c12cb4781eb4b1 100644 (file)
@@ -129,7 +129,7 @@ static u32 flow_get_mark(const struct sk_buff *skb)
 static u32 flow_get_nfct(const struct sk_buff *skb)
 {
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-       return addr_fold(skb->nfct);
+       return addr_fold(skb_nfct(skb));
 #else
        return 0;
 #endif
index 5752789acc135250c312199c2d6e5e15d05fdea0..9d0c99d2e9fbcc35aee5a51274cf9fe7f542a14f 100644 (file)
@@ -40,6 +40,7 @@ struct fl_flow_key {
        };
        struct flow_dissector_key_ports tp;
        struct flow_dissector_key_icmp icmp;
+       struct flow_dissector_key_arp arp;
        struct flow_dissector_key_keyid enc_key_id;
        union {
                struct flow_dissector_key_ipv4_addrs enc_ipv4;
@@ -133,6 +134,14 @@ static void fl_clear_masked_range(struct fl_flow_key *key,
        memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
 }
 
+static struct cls_fl_filter *fl_lookup(struct cls_fl_head *head,
+                                      struct fl_flow_key *mkey)
+{
+       return rhashtable_lookup_fast(&head->ht,
+                                     fl_key_get_start(mkey, &head->mask),
+                                     head->ht_params);
+}
+
 static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                       struct tcf_result *res)
 {
@@ -180,9 +189,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 
        fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
 
-       f = rhashtable_lookup_fast(&head->ht,
-                                  fl_key_get_start(&skb_mkey, &head->mask),
-                                  head->ht_params);
+       f = fl_lookup(head, &skb_mkey);
        if (f && !tc_skip_sw(f->flags)) {
                *res = f->res;
                return tcf_exts_exec(skb, &f->exts, res);
@@ -222,6 +229,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
                return;
 
        offload.command = TC_CLSFLOWER_DESTROY;
+       offload.prio = tp->prio;
        offload.cookie = (unsigned long)f;
 
        tc->type = TC_SETUP_CLSFLOWER;
@@ -253,6 +261,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
        }
 
        offload.command = TC_CLSFLOWER_REPLACE;
+       offload.prio = tp->prio;
        offload.cookie = (unsigned long)f;
        offload.dissector = dissector;
        offload.mask = mask;
@@ -264,6 +273,8 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
 
        err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
                                            tc);
+       if (!err)
+               f->flags |= TCA_CLS_FLAGS_IN_HW;
 
        if (tc_skip_sw(f->flags))
                return err;
@@ -280,6 +291,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
                return;
 
        offload.command = TC_CLSFLOWER_STATS;
+       offload.prio = tp->prio;
        offload.cookie = (unsigned long)f;
        offload.exts = &f->exts;
 
@@ -401,6 +413,16 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
        [TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
        [TCA_FLOWER_KEY_ICMPV6_CODE]    = { .type = NLA_U8 },
        [TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
+       [TCA_FLOWER_KEY_ARP_SIP]        = { .type = NLA_U32 },
+       [TCA_FLOWER_KEY_ARP_SIP_MASK]   = { .type = NLA_U32 },
+       [TCA_FLOWER_KEY_ARP_TIP]        = { .type = NLA_U32 },
+       [TCA_FLOWER_KEY_ARP_TIP_MASK]   = { .type = NLA_U32 },
+       [TCA_FLOWER_KEY_ARP_OP]         = { .type = NLA_U8 },
+       [TCA_FLOWER_KEY_ARP_OP_MASK]    = { .type = NLA_U8 },
+       [TCA_FLOWER_KEY_ARP_SHA]        = { .len = ETH_ALEN },
+       [TCA_FLOWER_KEY_ARP_SHA_MASK]   = { .len = ETH_ALEN },
+       [TCA_FLOWER_KEY_ARP_THA]        = { .len = ETH_ALEN },
+       [TCA_FLOWER_KEY_ARP_THA_MASK]   = { .len = ETH_ALEN },
 };
 
 static void fl_set_key_val(struct nlattr **tb,
@@ -572,6 +594,23 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
                               &mask->icmp.code,
                               TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
                               sizeof(key->icmp.code));
+       } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
+                  key->basic.n_proto == htons(ETH_P_RARP)) {
+               fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
+                              &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
+                              sizeof(key->arp.sip));
+               fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
+                              &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
+                              sizeof(key->arp.tip));
+               fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
+                              &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
+                              sizeof(key->arp.op));
+               fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
+                              mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
+                              sizeof(key->arp.sha));
+               fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
+                              mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
+                              sizeof(key->arp.tha));
        }
 
        if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
@@ -688,6 +727,8 @@ static void fl_init_dissector(struct cls_fl_head *head,
                             FLOW_DISSECTOR_KEY_PORTS, tp);
        FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
                             FLOW_DISSECTOR_KEY_ICMP, icmp);
+       FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+                            FLOW_DISSECTOR_KEY_ARP, arp);
        FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
                             FLOW_DISSECTOR_KEY_VLAN, vlan);
        FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
@@ -796,23 +837,31 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
        struct cls_fl_head *head = rtnl_dereference(tp->root);
        struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
        struct cls_fl_filter *fnew;
-       struct nlattr *tb[TCA_FLOWER_MAX + 1];
+       struct nlattr **tb;
        struct fl_flow_mask mask = {};
        int err;
 
        if (!tca[TCA_OPTIONS])
                return -EINVAL;
 
+       tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
+       if (!tb)
+               return -ENOBUFS;
+
        err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
        if (err < 0)
-               return err;
+               goto errout_tb;
 
-       if (fold && handle && fold->handle != handle)
-               return -EINVAL;
+       if (fold && handle && fold->handle != handle) {
+               err = -EINVAL;
+               goto errout_tb;
+       }
 
        fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
-       if (!fnew)
-               return -ENOBUFS;
+       if (!fnew) {
+               err = -ENOBUFS;
+               goto errout_tb;
+       }
 
        err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
        if (err < 0)
@@ -845,6 +894,11 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
                goto errout;
 
        if (!tc_skip_sw(fnew->flags)) {
+               if (!fold && fl_lookup(head, &fnew->mkey)) {
+                       err = -EEXIST;
+                       goto errout;
+               }
+
                err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
                                             head->ht_params);
                if (err)
@@ -860,6 +914,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
                        goto errout;
        }
 
+       if (!tc_in_hw(fnew->flags))
+               fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+
        if (fold) {
                if (!tc_skip_sw(fold->flags))
                        rhashtable_remove_fast(&head->ht, &fold->ht_node,
@@ -878,11 +935,14 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
                list_add_tail_rcu(&fnew->list, &head->filters);
        }
 
+       kfree(tb);
        return 0;
 
 errout:
        tcf_exts_destroy(&fnew->exts);
        kfree(fnew);
+errout_tb:
+       kfree(tb);
        return err;
 }
 
@@ -1112,6 +1172,27 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
                                  TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
                                  sizeof(key->icmp.code))))
                goto nla_put_failure;
+       else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
+                 key->basic.n_proto == htons(ETH_P_RARP)) &&
+                (fl_dump_key_val(skb, &key->arp.sip,
+                                 TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
+                                 TCA_FLOWER_KEY_ARP_SIP_MASK,
+                                 sizeof(key->arp.sip)) ||
+                 fl_dump_key_val(skb, &key->arp.tip,
+                                 TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
+                                 TCA_FLOWER_KEY_ARP_TIP_MASK,
+                                 sizeof(key->arp.tip)) ||
+                 fl_dump_key_val(skb, &key->arp.op,
+                                 TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
+                                 TCA_FLOWER_KEY_ARP_OP_MASK,
+                                 sizeof(key->arp.op)) ||
+                 fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
+                                 mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
+                                 sizeof(key->arp.sha)) ||
+                 fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
+                                 mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
+                                 sizeof(key->arp.tha))))
+               goto nla_put_failure;
 
        if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
            (fl_dump_key_val(skb, &key->enc_ipv4.src,
@@ -1153,7 +1234,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
        if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
                goto nla_put_failure;
 
-       nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags);
+       if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
+               goto nla_put_failure;
 
        if (tcf_exts_dump(skb, &f->exts))
                goto nla_put_failure;
index b12bc2abea931a7defd1e23eb86a20fe09e76388..224eb2c143462a39f3a05e78203344e44e6eb578 100644 (file)
@@ -56,6 +56,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
        struct net_device *dev = tp->q->dev_queue->dev;
        struct tc_to_netdev offload;
        struct tc_cls_matchall_offload mall_offload = {0};
+       int err;
 
        offload.type = TC_SETUP_MATCHALL;
        offload.cls_mall = &mall_offload;
@@ -63,8 +64,12 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
        offload.cls_mall->exts = &head->exts;
        offload.cls_mall->cookie = cookie;
 
-       return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
-                                            &offload);
+       err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
+                                           &offload);
+       if (!err)
+               head->flags |= TCA_CLS_FLAGS_IN_HW;
+
+       return err;
 }
 
 static void mall_destroy_hw_filter(struct tcf_proto *tp,
@@ -118,10 +123,12 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
        struct tcf_exts e;
        int err;
 
-       tcf_exts_init(&e, TCA_MATCHALL_ACT, 0);
+       err = tcf_exts_init(&e, TCA_MATCHALL_ACT, 0);
+       if (err)
+               return err;
        err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
        if (err < 0)
-               return err;
+               goto errout;
 
        if (tb[TCA_MATCHALL_CLASSID]) {
                head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
@@ -131,6 +138,9 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp,
        tcf_exts_change(tp, &head->exts, &e);
 
        return 0;
+errout:
+       tcf_exts_destroy(&e);
+       return err;
 }
 
 static int mall_change(struct net *net, struct sk_buff *in_skb,
@@ -166,7 +176,9 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
        if (!new)
                return -ENOBUFS;
 
-       tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
+       err = tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
+       if (err)
+               goto err_exts_init;
 
        if (!handle)
                handle = 1;
@@ -175,25 +187,31 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
 
        err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr);
        if (err)
-               goto errout;
+               goto err_set_parms;
 
        if (tc_should_offload(dev, tp, flags)) {
                err = mall_replace_hw_filter(tp, new, (unsigned long) new);
                if (err) {
                        if (tc_skip_sw(flags))
-                               goto errout;
+                               goto err_replace_hw_filter;
                        else
                                err = 0;
                }
        }
 
+       if (!tc_in_hw(new->flags))
+               new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+
        *arg = (unsigned long) head;
        rcu_assign_pointer(tp->root, new);
        if (head)
                call_rcu(&head->rcu, mall_destroy_rcu);
        return 0;
 
-errout:
+err_replace_hw_filter:
+err_set_parms:
+       tcf_exts_destroy(&new->exts);
+err_exts_init:
        kfree(new);
        return err;
 }
@@ -234,6 +252,9 @@ static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
            nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
                goto nla_put_failure;
 
+       if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
+               goto nla_put_failure;
+
        if (tcf_exts_dump(skb, &head->exts))
                goto nla_put_failure;
 
index ae83c3aec3082d3c4a1714c909f817835925a546..4dbe0c680fe6363a88ca47cb167dcae9327920d1 100644 (file)
@@ -334,7 +334,6 @@ static int u32_init(struct tcf_proto *tp)
        if (root_ht == NULL)
                return -ENOBUFS;
 
-       root_ht->divisor = 0;
        root_ht->refcnt++;
        root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
        root_ht->prio = tp->prio;
@@ -524,6 +523,10 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
 
        err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
                                            tp->protocol, &offload);
+
+       if (!err)
+               n->flags |= TCA_CLS_FLAGS_IN_HW;
+
        if (tc_skip_sw(flags))
                return err;
 
@@ -896,6 +899,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
                        return err;
                }
 
+               if (!tc_in_hw(new->flags))
+                       new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+
                u32_replace_knode(tp, tp_c, new);
                tcf_unbind_filter(tp, &n->res);
                call_rcu(&n->rcu, u32_delete_key_rcu);
@@ -1015,6 +1021,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
                if (err)
                        goto errhw;
 
+               if (!tc_in_hw(n->flags))
+                       n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+
                ins = &ht->ht[TC_U32_HASH(handle)];
                for (pins = rtnl_dereference(*ins); pins;
                     ins = &pins->next, pins = rtnl_dereference(*ins))
index d7b93429f0cca61ff489536b4247f31f3690fdd8..bcf49cd2278670197f2a7e9d4e9a62ae8d117468 100644 (file)
@@ -440,7 +440,6 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab)
 EXPORT_SYMBOL(qdisc_put_rtab);
 
 static LIST_HEAD(qdisc_stab_list);
-static DEFINE_SPINLOCK(qdisc_stab_lock);
 
 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
        [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
@@ -474,20 +473,15 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
        if (tsize != s->tsize || (!tab && tsize > 0))
                return ERR_PTR(-EINVAL);
 
-       spin_lock(&qdisc_stab_lock);
-
        list_for_each_entry(stab, &qdisc_stab_list, list) {
                if (memcmp(&stab->szopts, s, sizeof(*s)))
                        continue;
                if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
                        continue;
                stab->refcnt++;
-               spin_unlock(&qdisc_stab_lock);
                return stab;
        }
 
-       spin_unlock(&qdisc_stab_lock);
-
        stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
        if (!stab)
                return ERR_PTR(-ENOMEM);
@@ -497,9 +491,7 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
        if (tsize > 0)
                memcpy(stab->data, tab, tsize * sizeof(u16));
 
-       spin_lock(&qdisc_stab_lock);
        list_add_tail(&stab->list, &qdisc_stab_list);
-       spin_unlock(&qdisc_stab_lock);
 
        return stab;
 }
@@ -514,14 +506,10 @@ void qdisc_put_stab(struct qdisc_size_table *tab)
        if (!tab)
                return;
 
-       spin_lock(&qdisc_stab_lock);
-
        if (--tab->refcnt == 0) {
                list_del(&tab->list);
                call_rcu_bh(&tab->rcu, stab_kfree_rcu);
        }
-
-       spin_unlock(&qdisc_stab_lock);
 }
 EXPORT_SYMBOL(qdisc_put_stab);
 
@@ -1019,6 +1007,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
 
                return sch;
        }
+       /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
+       ops->destroy(sch);
 err_out3:
        dev_put(dev);
        kfree((char *) sch - sch->padded);
@@ -1861,6 +1851,7 @@ int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 {
        __be16 protocol = tc_skb_protocol(skb);
 #ifdef CONFIG_NET_CLS_ACT
+       const int max_reclassify_loop = 4;
        const struct tcf_proto *old_tp = tp;
        int limit = 0;
 
@@ -1885,7 +1876,7 @@ reclassify:
        return TC_ACT_UNSPEC; /* signal: continue lookup */
 #ifdef CONFIG_NET_CLS_ACT
 reset:
-       if (unlikely(limit++ >= MAX_REC_LOOP)) {
+       if (unlikely(limit++ >= max_reclassify_loop)) {
                net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
                                       tp->q->ops->id, tp->prio & 0xffff,
                                       ntohs(tp->protocol));
@@ -1899,28 +1890,6 @@ reset:
 }
 EXPORT_SYMBOL(tc_classify);
 
-bool tcf_destroy(struct tcf_proto *tp, bool force)
-{
-       if (tp->ops->destroy(tp, force)) {
-               module_put(tp->ops->owner);
-               kfree_rcu(tp, rcu);
-               return true;
-       }
-
-       return false;
-}
-
-void tcf_destroy_chain(struct tcf_proto __rcu **fl)
-{
-       struct tcf_proto *tp;
-
-       while ((tp = rtnl_dereference(*fl)) != NULL) {
-               RCU_INIT_POINTER(*fl, tp->next);
-               tcf_destroy(tp, true);
-       }
-}
-EXPORT_SYMBOL(tcf_destroy_chain);
-
 #ifdef CONFIG_PROC_FS
 static int psched_show(struct seq_file *seq, void *v)
 {
index 481e4f12aeb4c05e17f6985764cd901c9fa59331..2209c2ddacbfb0b1f222cb593477891d7124d9e3 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/file.h>                /* for fput */
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 /*
  * The ATM queuing discipline provides a framework for invoking classifiers
index f1207582cbf3005f08af70275d6c1b217f60f26a..d6ca18dc04c3e9e72efedd44088e95118a06b711 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/skbuff.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 
 /*     Class-Based Queueing (CBQ) algorithm.
index 3b6d5bd691015f0dbfa285218e2453a8587b8043..3b86a97bc67c3e953cb181eddcb5c0c16bf3b27f 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/skbuff.h>
 #include <linux/vmalloc.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 #include <net/inet_ecn.h>
 #include <net/red.h>
 #include <net/flow_dissector.h>
index 1308bbf460f7ca7e496fe9d9c48da7eb2b0d282d..802ac7c2e5e87eed1341ba4c09d3e5d70bc75876 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/bitops.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 #include <net/dsfield.h>
 #include <net/inet_ecn.h>
 #include <asm/byteorder.h>
index a5ea0e9b6be485c383251636bbe681379f975ed1..9f3a884d15903fd9012c01b5eee802e02f9f709e 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/vmalloc.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 #include <net/codel.h>
 #include <net/codel_impl.h>
 #include <net/codel_qdisc.h>
@@ -57,7 +58,6 @@ struct fq_codel_sched_data {
        struct fq_codel_flow *flows;    /* Flows table [flows_cnt] */
        u32             *backlogs;      /* backlog table [flows_cnt] */
        u32             flows_cnt;      /* number of flows */
-       u32             perturbation;   /* hash perturbation */
        u32             quantum;        /* psched_mtu(qdisc_dev(sch)); */
        u32             drop_batch_size;
        u32             memory_limit;
@@ -75,9 +75,7 @@ struct fq_codel_sched_data {
 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
                                  struct sk_buff *skb)
 {
-       u32 hash = skb_get_hash_perturb(skb, q->perturbation);
-
-       return reciprocal_scale(hash, q->flows_cnt);
+       return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
 }
 
 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
@@ -482,7 +480,6 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
        q->memory_limit = 32 << 20; /* 32 MBytes */
        q->drop_batch_size = 64;
        q->quantum = psched_mtu(qdisc_dev(sch));
-       q->perturbation = prandom_u32();
        INIT_LIST_HEAD(&q->new_flows);
        INIT_LIST_HEAD(&q->old_flows);
        codel_params_init(&q->cparams);
index 6eb9c8e88519a36fc3ef82be7c7f1dbe0ef1e13c..b052b27a984e39c244c94132f1162a7033e5cc63 100644 (file)
@@ -247,7 +247,7 @@ static inline int qdisc_restart(struct Qdisc *q, int *packets)
 
 void __qdisc_run(struct Qdisc *q)
 {
-       int quota = weight_p;
+       int quota = dev_tx_weight;
        int packets;
 
        while (qdisc_restart(q, &packets)) {
index e3d0458af17ba32cb203d4a5bed952baf9d22588..2fae8b5f1b80c017c4ae60df54c9143f82de4e9d 100644 (file)
@@ -627,7 +627,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
                        q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
                                                      sizeof(u32));
                        if (!q->hhf_arrays[i]) {
-                               hhf_destroy(sch);
+                               /* Note: hhf_destroy() will be called
+                                * by our caller.
+                                */
                                return -ENOMEM;
                        }
                }
@@ -638,7 +640,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
                        q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
                                                          BITS_PER_BYTE);
                        if (!q->hhf_valid_bits[i]) {
-                               hhf_destroy(sch);
+                               /* Note: hhf_destroy() will be called
+                                * by our caller.
+                                */
                                return -ENOMEM;
                        }
                }
index 760f39e7caeeb91b6c34d561f64f1ed97c884a32..4cd5fb134bc9e2dbcdd61b51fb951f94301ed54c 100644 (file)
@@ -40,6 +40,7 @@
 #include <net/netlink.h>
 #include <net/sch_generic.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 /* HTB algorithm.
     Author: devik@cdi.cz
index 8fe6999b642ac3c039f40621f4ca123d129718c4..3bab5f66c39291fb4f20888dbdb779dc7edc2776 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
 {
index 2bc8d7f8df161005bc89245ca5ebc52f3360e3af..20b7f1646f69270e08d8b7588759a0146f262e89 100644 (file)
@@ -52,7 +52,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
        /* pre-allocate qdiscs, attachment can't fail */
        priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
                               GFP_KERNEL);
-       if (priv->qdiscs == NULL)
+       if (!priv->qdiscs)
                return -ENOMEM;
 
        for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
@@ -60,18 +60,14 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
                qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
                                          TC_H_MAKE(TC_H_MAJ(sch->handle),
                                                    TC_H_MIN(ntx + 1)));
-               if (qdisc == NULL)
-                       goto err;
+               if (!qdisc)
+                       return -ENOMEM;
                priv->qdiscs[ntx] = qdisc;
                qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
        }
 
        sch->flags |= TCQ_F_MQROOT;
        return 0;
-
-err:
-       mq_destroy(sch);
-       return -ENOMEM;
 }
 
 static void mq_attach(struct Qdisc *sch)
index b5c502c781439c0440c088b000a7b859271e50b2..922683418e53853cb71747d8d30ab0e4a989254b 100644 (file)
@@ -118,10 +118,8 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
        /* pre-allocate qdisc, attachment can't fail */
        priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
                               GFP_KERNEL);
-       if (priv->qdiscs == NULL) {
-               err = -ENOMEM;
-               goto err;
-       }
+       if (!priv->qdiscs)
+               return -ENOMEM;
 
        for (i = 0; i < dev->num_tx_queues; i++) {
                dev_queue = netdev_get_tx_queue(dev, i);
@@ -129,10 +127,9 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
                                          get_default_qdisc_ops(dev, i),
                                          TC_H_MAKE(TC_H_MAJ(sch->handle),
                                                    TC_H_MIN(i + 1)));
-               if (qdisc == NULL) {
-                       err = -ENOMEM;
-                       goto err;
-               }
+               if (!qdisc)
+                       return -ENOMEM;
+
                priv->qdiscs[i] = qdisc;
                qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
        }
@@ -148,7 +145,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
                priv->hw_owned = 1;
                err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc);
                if (err)
-                       goto err;
+                       return err;
        } else {
                netdev_set_num_tc(dev, qopt->num_tc);
                for (i = 0; i < qopt->num_tc; i++)
@@ -162,10 +159,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
 
        sch->flags |= TCQ_F_MQROOT;
        return 0;
-
-err:
-       mqprio_destroy(sch);
-       return err;
 }
 
 static void mqprio_attach(struct Qdisc *sch)
index 9ffbb025b37e70bc526f54735a9b16573efb0ff4..e7839a0d0eaa52572f675fdb1dfc590c2a70ac76 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/skbuff.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
-
+#include <net/pkt_cls.h>
 
 struct multiq_sched_data {
        u16 bands;
index bcfadfdea8e0928ba6f9615e919c5411dfd948d3..c8bb62a1e7449344a0fd81241fe0102ea2f9c0f9 100644 (file)
@@ -626,7 +626,7 @@ deliver:
                         * If it's at ingress let's pretend the delay is
                         * from the network (tstamp will be updated).
                         */
-                       if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
+                       if (skb->tc_redirected && skb->tc_from_ingress)
                                skb->tstamp = 0;
 #endif
 
index 8f575899adfa61cb985c0c645d265e1c9381c9d9..d4d7db267b6edfa56582ca4a588590e0ded9fe66 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/skbuff.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
-
+#include <net/pkt_cls.h>
 
 struct prio_sched_data {
        int bands;
index 20a350bd1b1dc8283c7e352ce0a953ce0eea2544..fe6963d2151956c508b510edec680b89201173ce 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/jhash.h>
 #include <net/ip.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 #include <net/inet_ecn.h>
 
 /*
index 7f195ed4d568c14c618c59f8d746cdd29f8dae21..42e8c8615e6563a2deabbb3c3437e3985d01ae14 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/vmalloc.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 #include <net/red.h>
 
 
@@ -742,9 +743,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
        q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
        q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
        if (!q->ht || !q->slots) {
-               sfq_destroy(sch);
+               /* Note: sfq_destroy() will be called by our caller */
                return -ENOMEM;
        }
+
        for (i = 0; i < q->divisor; i++)
                q->ht[i] = SFQ_EMPTY_SLOT;
 
index b0196366d58dd751972d65baf32b071745826088..9fe6b427afed01dc383e5fce1908c1dce62a7ca5 100644 (file)
@@ -401,8 +401,8 @@ static int teql_master_close(struct net_device *dev)
        return 0;
 }
 
-static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev,
-                                                    struct rtnl_link_stats64 *stats)
+static void teql_master_stats64(struct net_device *dev,
+                               struct rtnl_link_stats64 *stats)
 {
        struct teql_master *m = netdev_priv(dev);
 
@@ -410,7 +410,6 @@ static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev,
        stats->tx_bytes         = m->tx_bytes;
        stats->tx_errors        = m->tx_errors;
        stats->tx_dropped       = m->tx_dropped;
-       return stats;
 }
 
 static int teql_master_mtu(struct net_device *dev, int new_mtu)
index 6c4f7496cec612b52e1e69664a209b4d58763be5..70f1b570bab9764d692f1c2e605d76d056cda2cd 100644 (file)
@@ -11,7 +11,7 @@ sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
          transport.o chunk.o sm_make_chunk.o ulpevent.o \
          inqueue.o outqueue.o ulpqueue.o \
          tsnmap.o bind_addr.o socket.o primitive.o \
-         output.o input.o debug.o ssnmap.o auth.o \
+         output.o input.o debug.o stream.o auth.o \
          offload.o
 
 sctp_probe-y := probe.o
index d3cc30c25c41091c2bf18022506dff4145d29944..2a6835b4562b61cff52425a530524f1c48bc7919 100644 (file)
@@ -207,6 +207,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
         * association to the same value as the initial TSN.
         */
        asoc->addip_serial = asoc->c.initial_tsn;
+       asoc->strreset_outseq = asoc->c.initial_tsn;
 
        INIT_LIST_HEAD(&asoc->addip_chunk_list);
        INIT_LIST_HEAD(&asoc->asconf_ack_list);
@@ -269,6 +270,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
 
        asoc->active_key_id = ep->active_key_id;
        asoc->prsctp_enable = ep->prsctp_enable;
+       asoc->reconf_enable = ep->reconf_enable;
+       asoc->strreset_enable = ep->strreset_enable;
 
        /* Save the hmacs and chunks list into this association */
        if (ep->auth_hmacs_list)
@@ -358,8 +361,11 @@ void sctp_association_free(struct sctp_association *asoc)
 
        sctp_tsnmap_free(&asoc->peer.tsn_map);
 
-       /* Free ssnmap storage. */
-       sctp_ssnmap_free(asoc->ssnmap);
+       /* Free stream information. */
+       sctp_stream_free(asoc->stream);
+
+       if (asoc->strreset_chunk)
+               sctp_chunk_free(asoc->strreset_chunk);
 
        /* Clean up the bound address list. */
        sctp_bind_addr_free(&asoc->base.bind_addr);
@@ -519,6 +525,12 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
        if (asoc->peer.last_data_from == peer)
                asoc->peer.last_data_from = transport;
 
+       if (asoc->strreset_chunk &&
+           asoc->strreset_chunk->transport == peer) {
+               asoc->strreset_chunk->transport = transport;
+               sctp_transport_reset_reconf_timer(transport);
+       }
+
        /* If we remove the transport an INIT was last sent to, set it to
         * NULL. Combined with the update of the retran path above, this
         * will cause the next INIT to be sent to the next available
@@ -820,8 +832,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
                if (transport->state != SCTP_UNCONFIRMED)
                        transport->state = SCTP_INACTIVE;
                else {
-                       dst_release(transport->dst);
-                       transport->dst = NULL;
+                       sctp_transport_dst_release(transport);
                        ulp_notify = false;
                }
 
@@ -1137,7 +1148,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
                /* Reinitialize SSN for both local streams
                 * and peer's streams.
                 */
-               sctp_ssnmap_clear(asoc->ssnmap);
+               sctp_stream_clear(asoc->stream);
 
                /* Flush the ULP reassembly and ordered queue.
                 * Any data there will now be stale and will
@@ -1162,10 +1173,9 @@ void sctp_assoc_update(struct sctp_association *asoc,
 
                asoc->ctsn_ack_point = asoc->next_tsn - 1;
                asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
-               if (!asoc->ssnmap) {
-                       /* Move the ssnmap. */
-                       asoc->ssnmap = new->ssnmap;
-                       new->ssnmap = NULL;
+               if (!asoc->stream) {
+                       asoc->stream = new->stream;
+                       new->stream = NULL;
                }
 
                if (!asoc->assoc_id) {
index 615f0ddd41dfb1ff46a9d4e564716de8e7b60ea6..e3621cb4827fadb5f5cb41ebe8455dfa3300a765 100644 (file)
@@ -165,14 +165,12 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
                                            struct sctp_sndrcvinfo *sinfo,
                                            struct iov_iter *from)
 {
-       int max, whole, i, offset, over, err;
-       int len, first_len;
-       int max_data;
+       size_t len, first_len, max_data, remaining;
+       size_t msg_len = iov_iter_count(from);
+       struct list_head *pos, *temp;
        struct sctp_chunk *chunk;
        struct sctp_datamsg *msg;
-       struct list_head *pos, *temp;
-       size_t msg_len = iov_iter_count(from);
-       __u8 frag;
+       int err;
 
        msg = sctp_datamsg_new(GFP_KERNEL);
        if (!msg)
@@ -185,7 +183,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
            (SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags) ||
             !SCTP_PR_POLICY(sinfo->sinfo_flags)))
                msg->expires_at = jiffies +
-                                   msecs_to_jiffies(sinfo->sinfo_timetolive);
+                                 msecs_to_jiffies(sinfo->sinfo_timetolive);
 
        /* This is the biggest possible DATA chunk that can fit into
         * the packet
@@ -195,7 +193,6 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
                   sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk);
        max_data = SCTP_TRUNC4(max_data);
 
-       max = asoc->frag_point;
        /* If the the peer requested that we authenticate DATA chunks
         * we need to account for bundling of the AUTH chunks along with
         * DATA.
@@ -208,12 +205,11 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
                                              hmac_desc->hmac_len);
        }
 
-       /* Now, check if we need to reduce our max */
-       if (max > max_data)
-               max = max_data;
+       /* Check what's our max considering the above */
+       max_data = min_t(size_t, max_data, asoc->frag_point);
 
-       whole = 0;
-       first_len = max;
+       /* Set first_len and then account for possible bundles on first frag */
+       first_len = max_data;
 
        /* Check to see if we have a pending SACK and try to let it be bundled
         * with this message.  Do this if we don't have any data queued already.
@@ -224,40 +220,38 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
        if (timer_pending(&asoc->timers[SCTP_EVENT_TIMEOUT_SACK]) &&
            asoc->outqueue.out_qlen == 0 &&
            list_empty(&asoc->outqueue.retransmit) &&
-           msg_len > max)
-               max_data -= SCTP_PAD4(sizeof(sctp_sack_chunk_t));
+           msg_len > max_data)
+               first_len -= SCTP_PAD4(sizeof(sctp_sack_chunk_t));
 
        /* Encourage Cookie-ECHO bundling. */
        if (asoc->state < SCTP_STATE_COOKIE_ECHOED)
-               max_data -= SCTP_ARBITRARY_COOKIE_ECHO_LEN;
-
-       /* Now that we adjusted completely, reset first_len */
-       if (first_len > max_data)
-               first_len = max_data;
+               first_len -= SCTP_ARBITRARY_COOKIE_ECHO_LEN;
 
        /* Account for a different sized first fragment */
        if (msg_len >= first_len) {
-               msg_len -= first_len;
-               whole = 1;
                msg->can_delay = 0;
-       }
-
-       /* How many full sized?  How many bytes leftover? */
-       whole += msg_len / max;
-       over = msg_len % max;
-       offset = 0;
-
-       if ((whole > 1) || (whole && over))
                SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
+       } else {
+               /* Which may be the only one... */
+               first_len = msg_len;
+       }
 
-       /* Create chunks for all the full sized DATA chunks. */
-       for (i = 0, len = first_len; i < whole; i++) {
-               frag = SCTP_DATA_MIDDLE_FRAG;
+       /* Create chunks for all DATA chunks. */
+       for (remaining = msg_len; remaining; remaining -= len) {
+               u8 frag = SCTP_DATA_MIDDLE_FRAG;
 
-               if (0 == i)
+               if (remaining == msg_len) {
+                       /* First frag, which may also be the last */
                        frag |= SCTP_DATA_FIRST_FRAG;
+                       len = first_len;
+               } else {
+                       /* Middle frags */
+                       len = max_data;
+               }
 
-               if ((i == (whole - 1)) && !over) {
+               if (len >= remaining) {
+                       /* Last frag, which may also be the first */
+                       len = remaining;
                        frag |= SCTP_DATA_LAST_FRAG;
 
                        /* The application requests to set the I-bit of the
@@ -271,7 +265,6 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
 
                chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag,
                                                 0, GFP_KERNEL);
-
                if (!chunk) {
                        err = -ENOMEM;
                        goto errout;
@@ -282,45 +275,8 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
                        goto errout_chunk_free;
 
                /* Put the chunk->skb back into the form expected by send.  */
-               __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
-                          - (__u8 *)chunk->skb->data);
-
-               sctp_datamsg_assign(msg, chunk);
-               list_add_tail(&chunk->frag_list, &msg->chunks);
-
-               /* The first chunk, the first chunk was likely short
-                * to allow bundling, so reset to full size.
-                */
-               if (0 == i)
-                       len = max;
-       }
-
-       /* .. now the leftover bytes. */
-       if (over) {
-               if (!whole)
-                       frag = SCTP_DATA_NOT_FRAG;
-               else
-                       frag = SCTP_DATA_LAST_FRAG;
-
-               if ((sinfo->sinfo_flags & SCTP_EOF) ||
-                   (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY))
-                       frag |= SCTP_DATA_SACK_IMM;
-
-               chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag,
-                                                0, GFP_KERNEL);
-
-               if (!chunk) {
-                       err = -ENOMEM;
-                       goto errout;
-               }
-
-               err = sctp_user_addto_chunk(chunk, over, from);
-
-               /* Put the chunk->skb back into the form expected by send.  */
-               __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr
-                          - (__u8 *)chunk->skb->data);
-               if (err < 0)
-                       goto errout_chunk_free;
+               __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr -
+                                      chunk->skb->data);
 
                sctp_datamsg_assign(msg, chunk);
                list_add_tail(&chunk->frag_list, &msg->chunks);
@@ -338,6 +294,7 @@ errout:
                sctp_chunk_free(chunk);
        }
        sctp_datamsg_put(msg);
+
        return ERR_PTR(err);
 }
 
index 95d7b15dad2143dc6a74003125be719e72aeee24..2e47eb2f05cbfdbad49f819e4acc4513622c1005 100644 (file)
@@ -159,6 +159,7 @@ static const char *const sctp_timer_tbl[] = {
        "TIMEOUT_T4_RTO",
        "TIMEOUT_T5_SHUTDOWN_GUARD",
        "TIMEOUT_HEARTBEAT",
+       "TIMEOUT_RECONF",
        "TIMEOUT_SACK",
        "TIMEOUT_AUTOCLOSE",
 };
@@ -166,7 +167,9 @@ static const char *const sctp_timer_tbl[] = {
 /* Lookup timer debug name. */
 const char *sctp_tname(const sctp_subtype_t id)
 {
-       if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX)
+       BUILD_BUG_ON(SCTP_EVENT_TIMEOUT_MAX + 1 != ARRAY_SIZE(sctp_timer_tbl));
+
+       if (id.timeout < ARRAY_SIZE(sctp_timer_tbl))
                return sctp_timer_tbl[id.timeout];
        return "unknown_timer";
 }
index 410ddc1e344389cae97cb86327309516b0487c96..8c589230794f9394406d2a0ad2157b7a47d75757 100644 (file)
@@ -164,6 +164,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
        ep->auth_hmacs_list = auth_hmacs;
        ep->auth_chunk_list = auth_chunks;
        ep->prsctp_enable = net->sctp.prsctp_enable;
+       ep->reconf_enable = net->sctp.reconf_enable;
 
        return ep;
 
index 458e506ef84bae3c53c239d6cf89a9349faafb11..704ad19c1565fe5d8a8aad8e73be124f4c21e0f1 100644 (file)
@@ -1229,13 +1229,26 @@ static struct sctp_association *__sctp_rcv_lookup(struct net *net,
        struct sctp_association *asoc;
 
        asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
+       if (asoc)
+               goto out;
 
        /* Further lookup for INIT/INIT-ACK packets.
         * SCTP Implementors Guide, 2.18 Handling of address
         * parameters within the INIT or INIT-ACK.
         */
-       if (!asoc)
-               asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
+       asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp);
+       if (asoc)
+               goto out;
 
+       if (paddr->sa.sa_family == AF_INET)
+               pr_debug("sctp: asoc not found for src:%pI4:%d dst:%pI4:%d\n",
+                        &laddr->v4.sin_addr, ntohs(laddr->v4.sin_port),
+                        &paddr->v4.sin_addr, ntohs(paddr->v4.sin_port));
+       else
+               pr_debug("sctp: asoc not found for src:%pI6:%d dst:%pI6:%d\n",
+                        &laddr->v6.sin6_addr, ntohs(laddr->v6.sin6_port),
+                        &paddr->v6.sin6_addr, ntohs(paddr->v6.sin6_port));
+
+out:
        return asoc;
 }
index 64dfd35ccdcccbf35b2f6273565f2dbf89f941c5..063baac5b9fe4048e9d7b41e848a33f0f73c61d4 100644 (file)
@@ -413,22 +413,20 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
 static void sctp_v6_from_skb(union sctp_addr *addr, struct sk_buff *skb,
                             int is_saddr)
 {
-       __be16 *port;
-       struct sctphdr *sh;
+       /* Always called on head skb, so this is safe */
+       struct sctphdr *sh = sctp_hdr(skb);
+       struct sockaddr_in6 *sa = &addr->v6;
 
-       port = &addr->v6.sin6_port;
        addr->v6.sin6_family = AF_INET6;
        addr->v6.sin6_flowinfo = 0; /* FIXME */
        addr->v6.sin6_scope_id = ((struct inet6_skb_parm *)skb->cb)->iif;
 
-       /* Always called on head skb, so this is safe */
-       sh = sctp_hdr(skb);
        if (is_saddr) {
-               *port  = sh->source;
-               addr->v6.sin6_addr = ipv6_hdr(skb)->saddr;
+               sa->sin6_port = sh->source;
+               sa->sin6_addr = ipv6_hdr(skb)->saddr;
        } else {
-               *port = sh->dest;
-               addr->v6.sin6_addr = ipv6_hdr(skb)->daddr;
+               sa->sin6_port = sh->dest;
+               sa->sin6_addr = ipv6_hdr(skb)->daddr;
        }
 }
 
index 40e7fac96c41123eef4c1197b472f0a1051752cd..105ac3327b289cbb88494c116a3a3ffa32e4b60b 100644 (file)
@@ -51,7 +51,6 @@ SCTP_DBG_OBJCNT(bind_addr);
 SCTP_DBG_OBJCNT(bind_bucket);
 SCTP_DBG_OBJCNT(chunk);
 SCTP_DBG_OBJCNT(addr);
-SCTP_DBG_OBJCNT(ssnmap);
 SCTP_DBG_OBJCNT(datamsg);
 SCTP_DBG_OBJCNT(keys);
 
@@ -67,7 +66,6 @@ static sctp_dbg_objcnt_entry_t sctp_dbg_objcnt[] = {
        SCTP_DBG_OBJCNT_ENTRY(bind_addr),
        SCTP_DBG_OBJCNT_ENTRY(bind_bucket),
        SCTP_DBG_OBJCNT_ENTRY(addr),
-       SCTP_DBG_OBJCNT_ENTRY(ssnmap),
        SCTP_DBG_OBJCNT_ENTRY(datamsg),
        SCTP_DBG_OBJCNT_ENTRY(keys),
 };
index f5320a87341e160d46b1160edf4c38b569e7e79b..814eac047467c5b9fe8fdb4dd6b21a57812b3e1f 100644 (file)
@@ -81,8 +81,8 @@ static void sctp_packet_reset(struct sctp_packet *packet)
 /* Config a packet.
  * This appears to be a followup set of initializations.
  */
-struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
-                                      __u32 vtag, int ecn_capable)
+void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
+                       int ecn_capable)
 {
        struct sctp_transport *tp = packet->transport;
        struct sctp_association *asoc = tp->asoc;
@@ -123,14 +123,12 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
                if (chunk)
                        sctp_packet_append_chunk(packet, chunk);
        }
-
-       return packet;
 }
 
 /* Initialize the packet structure. */
-struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
-                                    struct sctp_transport *transport,
-                                    __u16 sport, __u16 dport)
+void sctp_packet_init(struct sctp_packet *packet,
+                     struct sctp_transport *transport,
+                     __u16 sport, __u16 dport)
 {
        struct sctp_association *asoc = transport->asoc;
        size_t overhead;
@@ -151,8 +149,6 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
        packet->overhead = overhead;
        sctp_packet_reset(packet);
        packet->vtag = 0;
-
-       return packet;
 }
 
 /* Free a packet.  */
@@ -550,6 +546,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
        struct sctp_association *asoc = tp->asoc;
        struct sctp_chunk *chunk, *tmp;
        int pkt_count, gso = 0;
+       int confirm;
        struct dst_entry *dst;
        struct sk_buff *head;
        struct sctphdr *sh;
@@ -628,7 +625,14 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
                        asoc->peer.last_sent_to = tp;
        }
        head->ignore_df = packet->ipfragok;
-       tp->af_specific->sctp_xmit(head, tp);
+       confirm = tp->dst_pending_confirm;
+       if (confirm)
+               skb_set_dst_pending_confirm(head, 1);
+       /* neighbour should be confirmed on successful transmission or
+        * positive error
+        */
+       if (tp->af_specific->sctp_xmit(head, tp) >= 0 && confirm)
+               tp->dst_pending_confirm = 0;
 
 out:
        list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
index 34efaa4ef2f6acfbed9b490f948f214e91a5606c..db352e5d61f8980dc461a162959643d872997217 100644 (file)
@@ -915,22 +915,28 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                case SCTP_CID_ECN_ECNE:
                case SCTP_CID_ASCONF:
                case SCTP_CID_FWD_TSN:
+               case SCTP_CID_RECONF:
                        status = sctp_packet_transmit_chunk(packet, chunk,
                                                            one_packet, gfp);
                        if (status  != SCTP_XMIT_OK) {
                                /* put the chunk back */
                                list_add(&chunk->list, &q->control_chunk_list);
-                       } else {
-                               asoc->stats.octrlchunks++;
-                               /* PR-SCTP C5) If a FORWARD TSN is sent, the
-                                * sender MUST assure that at least one T3-rtx
-                                * timer is running.
-                                */
-                               if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
-                                       sctp_transport_reset_t3_rtx(transport);
-                                       transport->last_time_sent = jiffies;
-                               }
+                               break;
+                       }
+
+                       asoc->stats.octrlchunks++;
+                       /* PR-SCTP C5) If a FORWARD TSN is sent, the
+                        * sender MUST assure that at least one T3-rtx
+                        * timer is running.
+                        */
+                       if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
+                               sctp_transport_reset_t3_rtx(transport);
+                               transport->last_time_sent = jiffies;
                        }
+
+                       if (chunk == asoc->strreset_chunk)
+                               sctp_transport_reset_reconf_timer(transport);
+
                        break;
 
                default:
@@ -1016,6 +1022,8 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
 
                /* Finally, transmit new packets.  */
                while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
+                       __u32 sid = ntohs(chunk->subh.data_hdr->stream);
+
                        /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
                         * stream identifier.
                         */
@@ -1038,6 +1046,11 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                                continue;
                        }
 
+                       if (asoc->stream->out[sid].state == SCTP_STREAM_CLOSED) {
+                               sctp_outq_head_data(q, chunk);
+                               goto sctp_flush_out;
+                       }
+
                        /* If there is a specified transport, use it.
                         * Otherwise, we want to use the active path.
                         */
@@ -1641,7 +1654,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
 
                if (forward_progress) {
                        if (transport->dst)
-                               dst_confirm(transport->dst);
+                               sctp_transport_dst_confirm(transport);
                }
        }
 
index ab8d9f96a177da9d900e01568cafc56b6b4cf1cb..f0553a022859235ffed49dc190c319995a143798 100644 (file)
@@ -211,3 +211,6 @@ DECLARE_PRIMITIVE(REQUESTHEARTBEAT);
 */
 
 DECLARE_PRIMITIVE(ASCONF);
+
+/* RE-CONFIG 5.1 */
+DECLARE_PRIMITIVE(RECONF);
index 616a9428e0c4f3ba2b2cf910f339074f79488e62..8227bbbd077a4ad04df53886558d34c4d6a3daea 100644 (file)
@@ -237,23 +237,19 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
 static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb,
                             int is_saddr)
 {
-       void *from;
-       __be16 *port;
-       struct sctphdr *sh;
+       /* Always called on head skb, so this is safe */
+       struct sctphdr *sh = sctp_hdr(skb);
+       struct sockaddr_in *sa = &addr->v4;
 
-       port = &addr->v4.sin_port;
        addr->v4.sin_family = AF_INET;
 
-       /* Always called on head skb, so this is safe */
-       sh = sctp_hdr(skb);
        if (is_saddr) {
-               *port  = sh->source;
-               from = &ip_hdr(skb)->saddr;
+               sa->sin_port = sh->source;
+               sa->sin_addr.s_addr = ip_hdr(skb)->saddr;
        } else {
-               *port = sh->dest;
-               from = &ip_hdr(skb)->daddr;
+               sa->sin_port = sh->dest;
+               sa->sin_addr.s_addr = ip_hdr(skb)->daddr;
        }
-       memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr));
 }
 
 /* Initialize an sctp_addr from a socket. */
@@ -1262,6 +1258,9 @@ static int __net_init sctp_defaults_init(struct net *net)
        /* Enable PR-SCTP by default. */
        net->sctp.prsctp_enable = 1;
 
+       /* Disable RECONF by default. */
+       net->sctp.reconf_enable = 0;
+
        /* Disable AUTH by default. */
        net->sctp.auth_enable = 0;
 
index 9e9690b7afe118636eb64751f9c58637be56c5e8..7f8dbf2c6ceed0ef463781531fe157e7241e9610 100644 (file)
@@ -270,6 +270,11 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
                num_ext += 2;
        }
 
+       if (asoc->reconf_enable) {
+               extensions[num_ext] = SCTP_CID_RECONF;
+               num_ext += 1;
+       }
+
        if (sp->adaptation_ind)
                chunksize += sizeof(aiparam);
 
@@ -434,6 +439,11 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
                num_ext += 2;
        }
 
+       if (asoc->peer.reconf_capable) {
+               extensions[num_ext] = SCTP_CID_RECONF;
+               num_ext += 1;
+       }
+
        if (sp->adaptation_ind)
                chunksize += sizeof(aiparam);
 
@@ -1536,7 +1546,7 @@ void sctp_chunk_assign_ssn(struct sctp_chunk *chunk)
 
        /* All fragments will be on the same stream */
        sid = ntohs(chunk->subh.data_hdr->stream);
-       stream = &chunk->asoc->ssnmap->out;
+       stream = chunk->asoc->stream;
 
        /* Now assign the sequence number to the entire message.
         * All fragments must have the same stream sequence number.
@@ -1547,9 +1557,9 @@ void sctp_chunk_assign_ssn(struct sctp_chunk *chunk)
                        ssn = 0;
                } else {
                        if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG)
-                               ssn = sctp_ssn_next(stream, sid);
+                               ssn = sctp_ssn_next(stream, out, sid);
                        else
-                               ssn = sctp_ssn_peek(stream, sid);
+                               ssn = sctp_ssn_peek(stream, out, sid);
                }
 
                lchunk->subh.data_hdr->ssn = htons(ssn);
@@ -1844,6 +1854,7 @@ no_hmac:
        retval->next_tsn = retval->c.initial_tsn;
        retval->ctsn_ack_point = retval->next_tsn - 1;
        retval->addip_serial = retval->c.initial_tsn;
+       retval->strreset_outseq = retval->c.initial_tsn;
        retval->adv_peer_ack_point = retval->ctsn_ack_point;
        retval->peer.prsctp_capable = retval->c.prsctp_capable;
        retval->peer.adaptation_ind = retval->c.adaptation_ind;
@@ -2011,6 +2022,11 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
 
        for (i = 0; i < num_ext; i++) {
                switch (param.ext->chunks[i]) {
+               case SCTP_CID_RECONF:
+                       if (asoc->reconf_enable &&
+                           !asoc->peer.reconf_capable)
+                               asoc->peer.reconf_capable = 1;
+                       break;
                case SCTP_CID_FWD_TSN:
                        if (asoc->prsctp_enable && !asoc->peer.prsctp_capable)
                                asoc->peer.prsctp_capable = 1;
@@ -2387,6 +2403,8 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
        asoc->peer.i.initial_tsn =
                ntohl(peer_init->init_hdr.initial_tsn);
 
+       asoc->strreset_inseq = asoc->peer.i.initial_tsn;
+
        /* Apply the upper bounds for output streams based on peer's
         * number of inbound streams.
         */
@@ -2444,9 +2462,9 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
        if (!asoc->temp) {
                int error;
 
-               asoc->ssnmap = sctp_ssnmap_new(asoc->c.sinit_max_instreams,
+               asoc->stream = sctp_stream_new(asoc->c.sinit_max_instreams,
                                               asoc->c.sinit_num_ostreams, gfp);
-               if (!asoc->ssnmap)
+               if (!asoc->stream)
                        goto clean_up;
 
                error = sctp_assoc_set_id(asoc, gfp);
@@ -3210,7 +3228,6 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
        union sctp_params param;
        sctp_addiphdr_t         *hdr;
        union sctp_addr_param   *addr_param;
-       sctp_addip_param_t      *asconf_param;
        struct sctp_chunk       *asconf_ack;
        __be16  err_code;
        int     length = 0;
@@ -3230,7 +3247,6 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
         * asconf parameter.
         */
        length = ntohs(addr_param->p.length);
-       asconf_param = (void *)addr_param + length;
        chunk_len -= length;
 
        /* create an ASCONF_ACK chunk.
@@ -3317,8 +3333,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
                local_bh_enable();
                list_for_each_entry(transport, &asoc->peer.transport_addr_list,
                                transports) {
-                       dst_release(transport->dst);
-                       transport->dst = NULL;
+                       sctp_transport_dst_release(transport);
                }
                break;
        case SCTP_PARAM_DEL_IP:
@@ -3332,8 +3347,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
                local_bh_enable();
                list_for_each_entry(transport, &asoc->peer.transport_addr_list,
                                transports) {
-                       dst_release(transport->dst);
-                       transport->dst = NULL;
+                       sctp_transport_dst_release(transport);
                }
                break;
        default:
@@ -3526,3 +3540,196 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
 
        return retval;
 }
+
+/* RE-CONFIG 3.1 (RE-CONFIG chunk)
+ *   0                   1                   2                   3
+ *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  | Type = 130    |  Chunk Flags  |      Chunk Length             |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  \                                                               \
+ *  /                  Re-configuration Parameter                   /
+ *  \                                                               \
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  \                                                               \
+ *  /             Re-configuration Parameter (optional)             /
+ *  \                                                               \
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+static struct sctp_chunk *sctp_make_reconf(
+                               const struct sctp_association *asoc,
+                               int length)
+{
+       struct sctp_reconf_chunk *reconf;
+       struct sctp_chunk *retval;
+
+       retval = sctp_make_control(asoc, SCTP_CID_RECONF, 0, length,
+                                  GFP_ATOMIC);
+       if (!retval)
+               return NULL;
+
+       reconf = (struct sctp_reconf_chunk *)retval->chunk_hdr;
+       retval->param_hdr.v = reconf->params;
+
+       return retval;
+}
+
+/* RE-CONFIG 4.1 (STREAM OUT RESET)
+ *   0                   1                   2                   3
+ *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |     Parameter Type = 13       | Parameter Length = 16 + 2 * N |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |           Re-configuration Request Sequence Number            |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |           Re-configuration Response Sequence Number           |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |                Sender's Last Assigned TSN                     |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |  Stream Number 1 (optional)   |    Stream Number 2 (optional) |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  /                            ......                             /
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |  Stream Number N-1 (optional) |    Stream Number N (optional) |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * RE-CONFIG 4.2 (STREAM IN RESET)
+ *   0                   1                   2                   3
+ *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |     Parameter Type = 14       |  Parameter Length = 8 + 2 * N |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |          Re-configuration Request Sequence Number             |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |  Stream Number 1 (optional)   |    Stream Number 2 (optional) |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  /                            ......                             /
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |  Stream Number N-1 (optional) |    Stream Number N (optional) |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct sctp_chunk *sctp_make_strreset_req(
+                               const struct sctp_association *asoc,
+                               __u16 stream_num, __u16 *stream_list,
+                               bool out, bool in)
+{
+       struct sctp_strreset_outreq outreq;
+       __u16 stream_len = stream_num * 2;
+       struct sctp_strreset_inreq inreq;
+       struct sctp_chunk *retval;
+       __u16 outlen, inlen, i;
+
+       outlen = (sizeof(outreq) + stream_len) * out;
+       inlen = (sizeof(inreq) + stream_len) * in;
+
+       retval = sctp_make_reconf(asoc, outlen + inlen);
+       if (!retval)
+               return NULL;
+
+       for (i = 0; i < stream_num; i++)
+               stream_list[i] = htons(stream_list[i]);
+
+       if (outlen) {
+               outreq.param_hdr.type = SCTP_PARAM_RESET_OUT_REQUEST;
+               outreq.param_hdr.length = htons(outlen);
+               outreq.request_seq = htonl(asoc->strreset_outseq);
+               outreq.response_seq = htonl(asoc->strreset_inseq - 1);
+               outreq.send_reset_at_tsn = htonl(asoc->next_tsn - 1);
+
+               sctp_addto_chunk(retval, sizeof(outreq), &outreq);
+
+               if (stream_len)
+                       sctp_addto_chunk(retval, stream_len, stream_list);
+       }
+
+       if (inlen) {
+               inreq.param_hdr.type = SCTP_PARAM_RESET_IN_REQUEST;
+               inreq.param_hdr.length = htons(inlen);
+               inreq.request_seq = htonl(asoc->strreset_outseq + out);
+
+               sctp_addto_chunk(retval, sizeof(inreq), &inreq);
+
+               if (stream_len)
+                       sctp_addto_chunk(retval, stream_len, stream_list);
+       }
+
+       for (i = 0; i < stream_num; i++)
+               stream_list[i] = ntohs(stream_list[i]);
+
+       return retval;
+}
+
+/* RE-CONFIG 4.3 (SSN/TSN RESET ALL)
+ *   0                   1                   2                   3
+ *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |     Parameter Type = 15       |      Parameter Length = 8     |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |         Re-configuration Request Sequence Number              |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct sctp_chunk *sctp_make_strreset_tsnreq(
+                               const struct sctp_association *asoc)
+{
+       struct sctp_strreset_tsnreq tsnreq;
+       __u16 length = sizeof(tsnreq);
+       struct sctp_chunk *retval;
+
+       retval = sctp_make_reconf(asoc, length);
+       if (!retval)
+               return NULL;
+
+       tsnreq.param_hdr.type = SCTP_PARAM_RESET_TSN_REQUEST;
+       tsnreq.param_hdr.length = htons(length);
+       tsnreq.request_seq = htonl(asoc->strreset_outseq);
+
+       sctp_addto_chunk(retval, sizeof(tsnreq), &tsnreq);
+
+       return retval;
+}
+
+/* RE-CONFIG 4.5/4.6 (ADD STREAM)
+ *   0                   1                   2                   3
+ *   0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |     Parameter Type = 17       |      Parameter Length = 12    |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |          Re-configuration Request Sequence Number             |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |      Number of new streams    |         Reserved              |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct sctp_chunk *sctp_make_strreset_addstrm(
+                               const struct sctp_association *asoc,
+                               __u16 out, __u16 in)
+{
+       struct sctp_strreset_addstrm addstrm;
+       __u16 size = sizeof(addstrm);
+       struct sctp_chunk *retval;
+
+       retval = sctp_make_reconf(asoc, (!!out + !!in) * size);
+       if (!retval)
+               return NULL;
+
+       if (out) {
+               addstrm.param_hdr.type = SCTP_PARAM_RESET_ADD_OUT_STREAMS;
+               addstrm.param_hdr.length = htons(size);
+               addstrm.number_of_streams = htons(out);
+               addstrm.request_seq = htonl(asoc->strreset_outseq);
+               addstrm.reserved = 0;
+
+               sctp_addto_chunk(retval, size, &addstrm);
+       }
+
+       if (in) {
+               addstrm.param_hdr.type = SCTP_PARAM_RESET_ADD_IN_STREAMS;
+               addstrm.param_hdr.length = htons(size);
+               addstrm.number_of_streams = htons(in);
+               addstrm.request_seq = htonl(asoc->strreset_outseq + !!out);
+               addstrm.reserved = 0;
+
+               sctp_addto_chunk(retval, size, &addstrm);
+       }
+
+       return retval;
+}
index c345bf153bed2393479b3e5e471d8c987e908960..51abcc90fe75d47ab16d717102b82780022d88a7 100644 (file)
@@ -436,6 +436,37 @@ out_unlock:
        sctp_association_put(asoc);
 }
 
+ /* Handle the timeout of the RE-CONFIG timer. */
+void sctp_generate_reconf_event(unsigned long data)
+{
+       struct sctp_transport *transport = (struct sctp_transport *)data;
+       struct sctp_association *asoc = transport->asoc;
+       struct sock *sk = asoc->base.sk;
+       struct net *net = sock_net(sk);
+       int error = 0;
+
+       bh_lock_sock(sk);
+       if (sock_owned_by_user(sk)) {
+               pr_debug("%s: sock is busy\n", __func__);
+
+               /* Try again later.  */
+               if (!mod_timer(&transport->reconf_timer, jiffies + (HZ / 20)))
+                       sctp_transport_hold(transport);
+               goto out_unlock;
+       }
+
+       error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
+                          SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF),
+                          asoc->state, asoc->ep, asoc,
+                          transport, GFP_ATOMIC);
+
+       if (error)
+               sk->sk_err = -error;
+
+out_unlock:
+       bh_unlock_sock(sk);
+       sctp_transport_put(transport);
+}
 
 /* Inject a SACK Timeout event into the state machine.  */
 static void sctp_generate_sack_event(unsigned long data)
@@ -453,6 +484,7 @@ sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
        sctp_generate_t4_rto_event,
        sctp_generate_t5_shutdown_guard_event,
        NULL,
+       NULL,
        sctp_generate_sack_event,
        sctp_generate_autoclose_event,
 };
@@ -723,7 +755,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
         * forward progress.
         */
        if (t->dst)
-               dst_confirm(t->dst);
+               sctp_transport_dst_confirm(t);
 
        /* The receiver of the HEARTBEAT ACK should also perform an
         * RTT measurement for that destination transport address
index 8ec20a64a3f8055a0c3576627c5ec5dad7e99ca8..d8798ddda726176ac66ed36f8234ed89109e8c20 100644 (file)
@@ -160,23 +160,22 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
 /* Small helper function that checks if the chunk length
  * is of the appropriate length.  The 'required_length' argument
  * is set to be the size of a specific chunk we are testing.
- * Return Values:  1 = Valid length
- *                0 = Invalid length
+ * Return Values:  true  = Valid length
+ *                false = Invalid length
  *
  */
-static inline int
-sctp_chunk_length_valid(struct sctp_chunk *chunk,
-                          __u16 required_length)
+static inline bool
+sctp_chunk_length_valid(struct sctp_chunk *chunk, __u16 required_length)
 {
        __u16 chunk_length = ntohs(chunk->chunk_hdr->length);
 
        /* Previously already marked? */
        if (unlikely(chunk->pdiscard))
-               return 0;
+               return false;
        if (unlikely(chunk_length < required_length))
-               return 0;
+               return false;
 
-       return 1;
+       return true;
 }
 
 /**********************************************************
@@ -1022,6 +1021,34 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net,
        return SCTP_DISPOSITION_CONSUME;
 }
 
+/* resend asoc strreset_chunk.  */
+sctp_disposition_t sctp_sf_send_reconf(struct net *net,
+                                      const struct sctp_endpoint *ep,
+                                      const struct sctp_association *asoc,
+                                      const sctp_subtype_t type, void *arg,
+                                      sctp_cmd_seq_t *commands)
+{
+       struct sctp_transport *transport = arg;
+
+       if (asoc->overall_error_count >= asoc->max_retrans) {
+               sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+                               SCTP_ERROR(ETIMEDOUT));
+               /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
+               sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+                               SCTP_PERR(SCTP_ERROR_NO_ERROR));
+               SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+               SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+               return SCTP_DISPOSITION_DELETE_TCB;
+       }
+
+       sctp_chunk_hold(asoc->strreset_chunk);
+       sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+                       SCTP_CHUNK(asoc->strreset_chunk));
+       sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport));
+
+       return SCTP_DISPOSITION_CONSUME;
+}
+
 /*
  * Process an heartbeat request.
  *
@@ -3237,36 +3264,34 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net,
        struct sctp_chunk *abort;
 
        packet = sctp_ootb_pkt_new(net, asoc, chunk);
+       if (!packet)
+               return SCTP_DISPOSITION_NOMEM;
 
-       if (packet) {
-               /* Make an ABORT. The T bit will be set if the asoc
-                * is NULL.
-                */
-               abort = sctp_make_abort(asoc, chunk, 0);
-               if (!abort) {
-                       sctp_ootb_pkt_free(packet);
-                       return SCTP_DISPOSITION_NOMEM;
-               }
-
-               /* Reflect vtag if T-Bit is set */
-               if (sctp_test_T_bit(abort))
-                       packet->vtag = ntohl(chunk->sctp_hdr->vtag);
+       /* Make an ABORT. The T bit will be set if the asoc
+        * is NULL.
+        */
+       abort = sctp_make_abort(asoc, chunk, 0);
+       if (!abort) {
+               sctp_ootb_pkt_free(packet);
+               return SCTP_DISPOSITION_NOMEM;
+       }
 
-               /* Set the skb to the belonging sock for accounting.  */
-               abort->skb->sk = ep->base.sk;
+       /* Reflect vtag if T-Bit is set */
+       if (sctp_test_T_bit(abort))
+               packet->vtag = ntohl(chunk->sctp_hdr->vtag);
 
-               sctp_packet_append_chunk(packet, abort);
+       /* Set the skb to the belonging sock for accounting.  */
+       abort->skb->sk = ep->base.sk;
 
-               sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
-                               SCTP_PACKET(packet));
+       sctp_packet_append_chunk(packet, abort);
 
-               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+       sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
+                       SCTP_PACKET(packet));
 
-               sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
-               return SCTP_DISPOSITION_CONSUME;
-       }
+       SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
-       return SCTP_DISPOSITION_NOMEM;
+       sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+       return SCTP_DISPOSITION_CONSUME;
 }
 
 /*
@@ -3503,45 +3528,43 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net,
        struct sctp_chunk *shut;
 
        packet = sctp_ootb_pkt_new(net, asoc, chunk);
+       if (!packet)
+               return SCTP_DISPOSITION_NOMEM;
 
-       if (packet) {
-               /* Make an SHUTDOWN_COMPLETE.
-                * The T bit will be set if the asoc is NULL.
-                */
-               shut = sctp_make_shutdown_complete(asoc, chunk);
-               if (!shut) {
-                       sctp_ootb_pkt_free(packet);
-                       return SCTP_DISPOSITION_NOMEM;
-               }
-
-               /* Reflect vtag if T-Bit is set */
-               if (sctp_test_T_bit(shut))
-                       packet->vtag = ntohl(chunk->sctp_hdr->vtag);
+       /* Make an SHUTDOWN_COMPLETE.
+        * The T bit will be set if the asoc is NULL.
+        */
+       shut = sctp_make_shutdown_complete(asoc, chunk);
+       if (!shut) {
+               sctp_ootb_pkt_free(packet);
+               return SCTP_DISPOSITION_NOMEM;
+       }
 
-               /* Set the skb to the belonging sock for accounting.  */
-               shut->skb->sk = ep->base.sk;
+       /* Reflect vtag if T-Bit is set */
+       if (sctp_test_T_bit(shut))
+               packet->vtag = ntohl(chunk->sctp_hdr->vtag);
 
-               sctp_packet_append_chunk(packet, shut);
+       /* Set the skb to the belonging sock for accounting.  */
+       shut->skb->sk = ep->base.sk;
 
-               sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
-                               SCTP_PACKET(packet));
+       sctp_packet_append_chunk(packet, shut);
 
-               SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
+       sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
+                       SCTP_PACKET(packet));
 
-               /* If the chunk length is invalid, we don't want to process
-                * the reset of the packet.
-                */
-               if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
-                       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+       SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
 
-               /* We need to discard the rest of the packet to prevent
-                * potential bomming attacks from additional bundled chunks.
-                * This is documented in SCTP Threats ID.
-                */
+       /* If the chunk length is invalid, we don't want to process
+        * the reset of the packet.
+        */
+       if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
                return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
-       }
 
-       return SCTP_DISPOSITION_NOMEM;
+       /* We need to discard the rest of the packet to prevent
+        * potential bomming attacks from additional bundled chunks.
+        * This is documented in SCTP Threats ID.
+        */
+       return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 }
 
 /*
@@ -3844,6 +3867,9 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
                return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
+       if (!asoc->peer.prsctp_capable)
+               return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
+
        /* Make sure that the FORWARD_TSN chunk has valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
                return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
@@ -3912,6 +3938,9 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
                return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
 
+       if (!asoc->peer.prsctp_capable)
+               return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
+
        /* Make sure that the FORWARD_TSN chunk has a valid length.  */
        if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
                return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
@@ -5162,6 +5191,19 @@ sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net,
        return SCTP_DISPOSITION_CONSUME;
 }
 
+/* RE-CONFIG Section 5.1 RECONF Chunk Procedures */
+sctp_disposition_t sctp_sf_do_prm_reconf(struct net *net,
+                                        const struct sctp_endpoint *ep,
+                                        const struct sctp_association *asoc,
+                                        const sctp_subtype_t type,
+                                        void *arg, sctp_cmd_seq_t *commands)
+{
+       struct sctp_chunk *chunk = arg;
+
+       sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk));
+       return SCTP_DISPOSITION_CONSUME;
+}
+
 /*
  * Ignore the primitive event
  *
@@ -6036,8 +6078,9 @@ static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
        sctp_transport_route(transport, (union sctp_addr *)&chunk->dest,
                             sctp_sk(net->sctp.ctl_sock));
 
-       packet = sctp_packet_init(&transport->packet, transport, sport, dport);
-       packet = sctp_packet_config(packet, vtag, 0);
+       packet = &transport->packet;
+       sctp_packet_init(packet, transport, sport, dport);
+       sctp_packet_config(packet, vtag, 0);
 
        return packet;
 
@@ -6278,9 +6321,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
         * and is invalid.
         */
        ssn = ntohs(data_hdr->ssn);
-       if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->ssnmap->in, sid))) {
+       if (ordered && SSN_lt(ssn, sctp_ssn_peek(asoc->stream, in, sid)))
                return SCTP_IERROR_PROTO_VIOLATION;
-       }
 
        /* Send the data up to the user.  Note:  Schedule  the
         * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
index a987d54b379c5c57513ef2f0fac046c8cae2bf75..b5438b4f6c1ee349161fdc12e4230db954b45b7d 100644 (file)
@@ -643,6 +643,25 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
        TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
 } /* TYPE_SCTP_PRIMITIVE_ASCONF */
 
+#define TYPE_SCTP_PRIMITIVE_RECONF { \
+       /* SCTP_STATE_CLOSED */ \
+       TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+       /* SCTP_STATE_COOKIE_WAIT */ \
+       TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+       /* SCTP_STATE_COOKIE_ECHOED */ \
+       TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+       /* SCTP_STATE_ESTABLISHED */ \
+       TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \
+       /* SCTP_STATE_SHUTDOWN_PENDING */ \
+       TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \
+       /* SCTP_STATE_SHUTDOWN_SENT */ \
+       TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \
+       /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+       TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \
+       /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+       TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
+} /* TYPE_SCTP_PRIMITIVE_RECONF */
+
 /* The primary index for this table is the primitive type.
  * The secondary index for this table is the state.
  */
@@ -653,6 +672,7 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE
        TYPE_SCTP_PRIMITIVE_SEND,
        TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT,
        TYPE_SCTP_PRIMITIVE_ASCONF,
+       TYPE_SCTP_PRIMITIVE_RECONF,
 };
 
 #define TYPE_SCTP_OTHER_NO_PENDING_TSN  { \
@@ -888,6 +908,25 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
        TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
 }
 
+#define TYPE_SCTP_EVENT_TIMEOUT_RECONF { \
+       /* SCTP_STATE_CLOSED */ \
+       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+       /* SCTP_STATE_COOKIE_WAIT */ \
+       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+       /* SCTP_STATE_COOKIE_ECHOED */ \
+       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+       /* SCTP_STATE_ESTABLISHED */ \
+       TYPE_SCTP_FUNC(sctp_sf_send_reconf), \
+       /* SCTP_STATE_SHUTDOWN_PENDING */ \
+       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+       /* SCTP_STATE_SHUTDOWN_SENT */ \
+       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+       /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+       /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+       TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+}
+
 static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = {
        TYPE_SCTP_EVENT_TIMEOUT_NONE,
        TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE,
@@ -897,6 +936,7 @@ static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][S
        TYPE_SCTP_EVENT_TIMEOUT_T4_RTO,
        TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
        TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT,
+       TYPE_SCTP_EVENT_TIMEOUT_RECONF,
        TYPE_SCTP_EVENT_TIMEOUT_SACK,
        TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE,
 };
index 1b5d669e30292a57ed57dd920d81be2a57f97b22..75f35cea43713856a613e04a933696fb870ba171 100644 (file)
@@ -364,7 +364,7 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
                }
        }
 
-       if (snum && snum < PROT_SOCK &&
+       if (snum && snum < inet_prot_sock(net) &&
            !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
                return -EACCES;
 
@@ -592,7 +592,7 @@ static int sctp_send_asconf_add_ip(struct sock              *sk,
                        list_for_each_entry(trans,
                            &asoc->peer.transport_addr_list, transports) {
                                /* Clear the source and route cache */
-                               dst_release(trans->dst);
+                               sctp_transport_dst_release(trans);
                                trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
                                    2*asoc->pathmtu, 4380));
                                trans->ssthresh = asoc->peer.i.a_rwnd;
@@ -843,7 +843,7 @@ skip_mkasconf:
                 */
                list_for_each_entry(transport, &asoc->peer.transport_addr_list,
                                        transports) {
-                       dst_release(transport->dst);
+                       sctp_transport_dst_release(transport);
                        sctp_transport_route(transport, NULL,
                                             sctp_sk(asoc->base.sk));
                }
@@ -1156,8 +1156,10 @@ static int __sctp_connect(struct sock *sk,
                                 * accept new associations, but it SHOULD NOT
                                 * be permitted to open new associations.
                                 */
-                               if (ep->base.bind_addr.port < PROT_SOCK &&
-                                   !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
+                               if (ep->base.bind_addr.port <
+                                   inet_prot_sock(net) &&
+                                   !ns_capable(net->user_ns,
+                                   CAP_NET_BIND_SERVICE)) {
                                        err = -EACCES;
                                        goto out_free;
                                }
@@ -1822,7 +1824,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
                         * but it SHOULD NOT be permitted to open new
                         * associations.
                         */
-                       if (ep->base.bind_addr.port < PROT_SOCK &&
+                       if (ep->base.bind_addr.port < inet_prot_sock(net) &&
                            !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
                                err = -EACCES;
                                goto out_unlock;
@@ -2434,7 +2436,6 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
                        sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
                } else if (asoc) {
                        asoc->pathmtu = params->spp_pathmtu;
-                       sctp_frag_point(asoc, params->spp_pathmtu);
                } else {
                        sp->pathmtu = params->spp_pathmtu;
                }
@@ -3755,6 +3756,120 @@ out:
        return retval;
 }
 
+static int sctp_setsockopt_enable_strreset(struct sock *sk,
+                                          char __user *optval,
+                                          unsigned int optlen)
+{
+       struct sctp_assoc_value params;
+       struct sctp_association *asoc;
+       int retval = -EINVAL;
+
+       if (optlen != sizeof(params))
+               goto out;
+
+       if (copy_from_user(&params, optval, optlen)) {
+               retval = -EFAULT;
+               goto out;
+       }
+
+       if (params.assoc_value & (~SCTP_ENABLE_STRRESET_MASK))
+               goto out;
+
+       asoc = sctp_id2assoc(sk, params.assoc_id);
+       if (asoc) {
+               asoc->strreset_enable = params.assoc_value;
+       } else if (!params.assoc_id) {
+               struct sctp_sock *sp = sctp_sk(sk);
+
+               sp->ep->strreset_enable = params.assoc_value;
+       } else {
+               goto out;
+       }
+
+       retval = 0;
+
+out:
+       return retval;
+}
+
+static int sctp_setsockopt_reset_streams(struct sock *sk,
+                                        char __user *optval,
+                                        unsigned int optlen)
+{
+       struct sctp_reset_streams *params;
+       struct sctp_association *asoc;
+       int retval = -EINVAL;
+
+       if (optlen < sizeof(struct sctp_reset_streams))
+               return -EINVAL;
+
+       params = memdup_user(optval, optlen);
+       if (IS_ERR(params))
+               return PTR_ERR(params);
+
+       asoc = sctp_id2assoc(sk, params->srs_assoc_id);
+       if (!asoc)
+               goto out;
+
+       retval = sctp_send_reset_streams(asoc, params);
+
+out:
+       kfree(params);
+       return retval;
+}
+
+static int sctp_setsockopt_reset_assoc(struct sock *sk,
+                                      char __user *optval,
+                                      unsigned int optlen)
+{
+       struct sctp_association *asoc;
+       sctp_assoc_t associd;
+       int retval = -EINVAL;
+
+       if (optlen != sizeof(associd))
+               goto out;
+
+       if (copy_from_user(&associd, optval, optlen)) {
+               retval = -EFAULT;
+               goto out;
+       }
+
+       asoc = sctp_id2assoc(sk, associd);
+       if (!asoc)
+               goto out;
+
+       retval = sctp_send_reset_assoc(asoc);
+
+out:
+       return retval;
+}
+
+static int sctp_setsockopt_add_streams(struct sock *sk,
+                                      char __user *optval,
+                                      unsigned int optlen)
+{
+       struct sctp_association *asoc;
+       struct sctp_add_streams params;
+       int retval = -EINVAL;
+
+       if (optlen != sizeof(params))
+               goto out;
+
+       if (copy_from_user(&params, optval, optlen)) {
+               retval = -EFAULT;
+               goto out;
+       }
+
+       asoc = sctp_id2assoc(sk, params.sas_assoc_id);
+       if (!asoc)
+               goto out;
+
+       retval = sctp_send_add_streams(asoc, &params);
+
+out:
+       return retval;
+}
+
 /* API 6.2 setsockopt(), getsockopt()
  *
  * Applications use setsockopt() and getsockopt() to set or retrieve
@@ -3921,6 +4036,18 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
        case SCTP_DEFAULT_PRINFO:
                retval = sctp_setsockopt_default_prinfo(sk, optval, optlen);
                break;
+       case SCTP_ENABLE_STREAM_RESET:
+               retval = sctp_setsockopt_enable_strreset(sk, optval, optlen);
+               break;
+       case SCTP_RESET_STREAMS:
+               retval = sctp_setsockopt_reset_streams(sk, optval, optlen);
+               break;
+       case SCTP_RESET_ASSOC:
+               retval = sctp_setsockopt_reset_assoc(sk, optval, optlen);
+               break;
+       case SCTP_ADD_STREAMS:
+               retval = sctp_setsockopt_add_streams(sk, optval, optlen);
+               break;
        default:
                retval = -ENOPROTOOPT;
                break;
@@ -6405,6 +6532,47 @@ out:
        return retval;
 }
 
+static int sctp_getsockopt_enable_strreset(struct sock *sk, int len,
+                                          char __user *optval,
+                                          int __user *optlen)
+{
+       struct sctp_assoc_value params;
+       struct sctp_association *asoc;
+       int retval = -EFAULT;
+
+       if (len < sizeof(params)) {
+               retval = -EINVAL;
+               goto out;
+       }
+
+       len = sizeof(params);
+       if (copy_from_user(&params, optval, len))
+               goto out;
+
+       asoc = sctp_id2assoc(sk, params.assoc_id);
+       if (asoc) {
+               params.assoc_value = asoc->strreset_enable;
+       } else if (!params.assoc_id) {
+               struct sctp_sock *sp = sctp_sk(sk);
+
+               params.assoc_value = sp->ep->strreset_enable;
+       } else {
+               retval = -EINVAL;
+               goto out;
+       }
+
+       if (put_user(len, optlen))
+               goto out;
+
+       if (copy_to_user(optval, &params, len))
+               goto out;
+
+       retval = 0;
+
+out:
+       return retval;
+}
+
 static int sctp_getsockopt(struct sock *sk, int level, int optname,
                           char __user *optval, int __user *optlen)
 {
@@ -6572,6 +6740,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
                retval = sctp_getsockopt_pr_assocstatus(sk, len, optval,
                                                        optlen);
                break;
+       case SCTP_ENABLE_STREAM_RESET:
+               retval = sctp_getsockopt_enable_strreset(sk, len, optval,
+                                                        optlen);
+               break;
        default:
                retval = -ENOPROTOOPT;
                break;
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c
deleted file mode 100644 (file)
index b9c8521..0000000
+++ /dev/null
@@ -1,125 +0,0 @@
-/* SCTP kernel implementation
- * Copyright (c) 2003 International Business Machines, Corp.
- *
- * This file is part of the SCTP kernel implementation
- *
- * These functions manipulate sctp SSN tracker.
- *
- * This SCTP implementation is free software;
- * you can redistribute it and/or modify it under the terms of
- * the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This SCTP implementation is distributed in the hope that it
- * will be useful, but WITHOUT ANY WARRANTY; without even the implied
- *                 ************************
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING.  If not, see
- * <http://www.gnu.org/licenses/>.
- *
- * Please send any bug reports or fixes you make to the
- * email address(es):
- *    lksctp developers <linux-sctp@vger.kernel.org>
- *
- * Written or modified by:
- *    Jon Grimm             <jgrimm@us.ibm.com>
- */
-
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <net/sctp/sctp.h>
-#include <net/sctp/sm.h>
-
-static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,
-                                           __u16 out);
-
-/* Storage size needed for map includes 2 headers and then the
- * specific needs of in or out streams.
- */
-static inline size_t sctp_ssnmap_size(__u16 in, __u16 out)
-{
-       return sizeof(struct sctp_ssnmap) + (in + out) * sizeof(__u16);
-}
-
-
-/* Create a new sctp_ssnmap.
- * Allocate room to store at least 'len' contiguous TSNs.
- */
-struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
-                                   gfp_t gfp)
-{
-       struct sctp_ssnmap *retval;
-       int size;
-
-       size = sctp_ssnmap_size(in, out);
-       if (size <= KMALLOC_MAX_SIZE)
-               retval = kmalloc(size, gfp);
-       else
-               retval = (struct sctp_ssnmap *)
-                         __get_free_pages(gfp, get_order(size));
-       if (!retval)
-               goto fail;
-
-       if (!sctp_ssnmap_init(retval, in, out))
-               goto fail_map;
-
-       SCTP_DBG_OBJCNT_INC(ssnmap);
-
-       return retval;
-
-fail_map:
-       if (size <= KMALLOC_MAX_SIZE)
-               kfree(retval);
-       else
-               free_pages((unsigned long)retval, get_order(size));
-fail:
-       return NULL;
-}
-
-
-/* Initialize a block of memory as a ssnmap.  */
-static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in,
-                                           __u16 out)
-{
-       memset(map, 0x00, sctp_ssnmap_size(in, out));
-
-       /* Start 'in' stream just after the map header. */
-       map->in.ssn = (__u16 *)&map[1];
-       map->in.len = in;
-
-       /* Start 'out' stream just after 'in'. */
-       map->out.ssn = &map->in.ssn[in];
-       map->out.len = out;
-
-       return map;
-}
-
-/* Clear out the ssnmap streams.  */
-void sctp_ssnmap_clear(struct sctp_ssnmap *map)
-{
-       size_t size;
-
-       size = (map->in.len + map->out.len) * sizeof(__u16);
-       memset(map->in.ssn, 0x00, size);
-}
-
-/* Dispose of a ssnmap.  */
-void sctp_ssnmap_free(struct sctp_ssnmap *map)
-{
-       int size;
-
-       if (unlikely(!map))
-               return;
-
-       size = sctp_ssnmap_size(map->in.len, map->out.len);
-       if (size <= KMALLOC_MAX_SIZE)
-               kfree(map);
-       else
-               free_pages((unsigned long)map, get_order(size));
-
-       SCTP_DBG_OBJCNT_DEC(ssnmap);
-}
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
new file mode 100644 (file)
index 0000000..eb02490
--- /dev/null
@@ -0,0 +1,296 @@
+/* SCTP kernel implementation
+ * (C) Copyright IBM Corp. 2001, 2004
+ * Copyright (c) 1999-2000 Cisco, Inc.
+ * Copyright (c) 1999-2001 Motorola, Inc.
+ * Copyright (c) 2001 Intel Corp.
+ *
+ * This file is part of the SCTP kernel implementation
+ *
+ * These functions manipulate sctp tsn mapping array.
+ *
+ * This SCTP implementation is free software;
+ * you can redistribute it and/or modify it under the terms of
+ * the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This SCTP implementation is distributed in the hope that it
+ * will be useful, but WITHOUT ANY WARRANTY; without even the implied
+ *                 ************************
+ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU CC; see the file COPYING.  If not, see
+ * <http://www.gnu.org/licenses/>.
+ *
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ *    lksctp developers <linux-sctp@vger.kernel.org>
+ *
+ * Written or modified by:
+ *    Xin Long <lucien.xin@gmail.com>
+ */
+
+#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
+
+struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp)
+{
+       struct sctp_stream *stream;
+       int i;
+
+       stream = kzalloc(sizeof(*stream), gfp);
+       if (!stream)
+               return NULL;
+
+       stream->outcnt = outcnt;
+       stream->out = kcalloc(stream->outcnt, sizeof(*stream->out), gfp);
+       if (!stream->out) {
+               kfree(stream);
+               return NULL;
+       }
+       for (i = 0; i < stream->outcnt; i++)
+               stream->out[i].state = SCTP_STREAM_OPEN;
+
+       stream->incnt = incnt;
+       stream->in = kcalloc(stream->incnt, sizeof(*stream->in), gfp);
+       if (!stream->in) {
+               kfree(stream->out);
+               kfree(stream);
+               return NULL;
+       }
+
+       return stream;
+}
+
+void sctp_stream_free(struct sctp_stream *stream)
+{
+       if (unlikely(!stream))
+               return;
+
+       kfree(stream->out);
+       kfree(stream->in);
+       kfree(stream);
+}
+
+void sctp_stream_clear(struct sctp_stream *stream)
+{
+       int i;
+
+       for (i = 0; i < stream->outcnt; i++)
+               stream->out[i].ssn = 0;
+
+       for (i = 0; i < stream->incnt; i++)
+               stream->in[i].ssn = 0;
+}
+
+static int sctp_send_reconf(struct sctp_association *asoc,
+                           struct sctp_chunk *chunk)
+{
+       struct net *net = sock_net(asoc->base.sk);
+       int retval = 0;
+
+       retval = sctp_primitive_RECONF(net, asoc, chunk);
+       if (retval)
+               sctp_chunk_free(chunk);
+
+       return retval;
+}
+
+int sctp_send_reset_streams(struct sctp_association *asoc,
+                           struct sctp_reset_streams *params)
+{
+       struct sctp_stream *stream = asoc->stream;
+       __u16 i, str_nums, *str_list;
+       struct sctp_chunk *chunk;
+       int retval = -EINVAL;
+       bool out, in;
+
+       if (!asoc->peer.reconf_capable ||
+           !(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) {
+               retval = -ENOPROTOOPT;
+               goto out;
+       }
+
+       if (asoc->strreset_outstanding) {
+               retval = -EINPROGRESS;
+               goto out;
+       }
+
+       out = params->srs_flags & SCTP_STREAM_RESET_OUTGOING;
+       in  = params->srs_flags & SCTP_STREAM_RESET_INCOMING;
+       if (!out && !in)
+               goto out;
+
+       str_nums = params->srs_number_streams;
+       str_list = params->srs_stream_list;
+       if (out && str_nums)
+               for (i = 0; i < str_nums; i++)
+                       if (str_list[i] >= stream->outcnt)
+                               goto out;
+
+       if (in && str_nums)
+               for (i = 0; i < str_nums; i++)
+                       if (str_list[i] >= stream->incnt)
+                               goto out;
+
+       chunk = sctp_make_strreset_req(asoc, str_nums, str_list, out, in);
+       if (!chunk) {
+               retval = -ENOMEM;
+               goto out;
+       }
+
+       if (out) {
+               if (str_nums)
+                       for (i = 0; i < str_nums; i++)
+                               stream->out[str_list[i]].state =
+                                                      SCTP_STREAM_CLOSED;
+               else
+                       for (i = 0; i < stream->outcnt; i++)
+                               stream->out[i].state = SCTP_STREAM_CLOSED;
+       }
+
+       asoc->strreset_chunk = chunk;
+       sctp_chunk_hold(asoc->strreset_chunk);
+
+       retval = sctp_send_reconf(asoc, chunk);
+       if (retval) {
+               sctp_chunk_put(asoc->strreset_chunk);
+               asoc->strreset_chunk = NULL;
+               if (!out)
+                       goto out;
+
+               if (str_nums)
+                       for (i = 0; i < str_nums; i++)
+                               stream->out[str_list[i]].state =
+                                                      SCTP_STREAM_OPEN;
+               else
+                       for (i = 0; i < stream->outcnt; i++)
+                               stream->out[i].state = SCTP_STREAM_OPEN;
+
+               goto out;
+       }
+
+       asoc->strreset_outstanding = out + in;
+
+out:
+       return retval;
+}
+
+int sctp_send_reset_assoc(struct sctp_association *asoc)
+{
+       struct sctp_chunk *chunk = NULL;
+       int retval;
+       __u16 i;
+
+       if (!asoc->peer.reconf_capable ||
+           !(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
+               return -ENOPROTOOPT;
+
+       if (asoc->strreset_outstanding)
+               return -EINPROGRESS;
+
+       chunk = sctp_make_strreset_tsnreq(asoc);
+       if (!chunk)
+               return -ENOMEM;
+
+       /* Block further xmit of data until this request is completed */
+       for (i = 0; i < asoc->stream->outcnt; i++)
+               asoc->stream->out[i].state = SCTP_STREAM_CLOSED;
+
+       asoc->strreset_chunk = chunk;
+       sctp_chunk_hold(asoc->strreset_chunk);
+
+       retval = sctp_send_reconf(asoc, chunk);
+       if (retval) {
+               sctp_chunk_put(asoc->strreset_chunk);
+               asoc->strreset_chunk = NULL;
+
+               for (i = 0; i < asoc->stream->outcnt; i++)
+                       asoc->stream->out[i].state = SCTP_STREAM_OPEN;
+
+               return retval;
+       }
+
+       asoc->strreset_outstanding = 1;
+
+       return 0;
+}
+
+int sctp_send_add_streams(struct sctp_association *asoc,
+                         struct sctp_add_streams *params)
+{
+       struct sctp_stream *stream = asoc->stream;
+       struct sctp_chunk *chunk = NULL;
+       int retval = -ENOMEM;
+       __u32 outcnt, incnt;
+       __u16 out, in;
+
+       if (!asoc->peer.reconf_capable ||
+           !(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
+               retval = -ENOPROTOOPT;
+               goto out;
+       }
+
+       if (asoc->strreset_outstanding) {
+               retval = -EINPROGRESS;
+               goto out;
+       }
+
+       out = params->sas_outstrms;
+       in  = params->sas_instrms;
+       outcnt = stream->outcnt + out;
+       incnt = stream->incnt + in;
+       if (outcnt > SCTP_MAX_STREAM || incnt > SCTP_MAX_STREAM ||
+           (!out && !in)) {
+               retval = -EINVAL;
+               goto out;
+       }
+
+       if (out) {
+               struct sctp_stream_out *streamout;
+
+               streamout = krealloc(stream->out, outcnt * sizeof(*streamout),
+                                    GFP_KERNEL);
+               if (!streamout)
+                       goto out;
+
+               memset(streamout + stream->outcnt, 0, out * sizeof(*streamout));
+               stream->out = streamout;
+       }
+
+       if (in) {
+               struct sctp_stream_in *streamin;
+
+               streamin = krealloc(stream->in, incnt * sizeof(*streamin),
+                                   GFP_KERNEL);
+               if (!streamin)
+                       goto out;
+
+               memset(streamin + stream->incnt, 0, in * sizeof(*streamin));
+               stream->in = streamin;
+       }
+
+       chunk = sctp_make_strreset_addstrm(asoc, out, in);
+       if (!chunk)
+               goto out;
+
+       asoc->strreset_chunk = chunk;
+       sctp_chunk_hold(asoc->strreset_chunk);
+
+       retval = sctp_send_reconf(asoc, chunk);
+       if (retval) {
+               sctp_chunk_put(asoc->strreset_chunk);
+               asoc->strreset_chunk = NULL;
+               goto out;
+       }
+
+       stream->incnt = incnt;
+       stream->outcnt = outcnt;
+
+       asoc->strreset_outstanding = !!out + !!in;
+
+out:
+       return retval;
+}
index a1652ab63918940be605eaf002b5506f5e4e6673..5b63ceb3bf3758f441a3240d7b516c5e543bfc98 100644 (file)
@@ -88,9 +88,11 @@ static struct sctp_transport *sctp_transport_init(struct net *net,
        INIT_LIST_HEAD(&peer->transports);
 
        setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event,
-                       (unsigned long)peer);
+                   (unsigned long)peer);
        setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
-                       (unsigned long)peer);
+                   (unsigned long)peer);
+       setup_timer(&peer->reconf_timer, sctp_generate_reconf_event,
+                   (unsigned long)peer);
        setup_timer(&peer->proto_unreach_timer,
                    sctp_generate_proto_unreach_event, (unsigned long)peer);
 
@@ -144,6 +146,9 @@ void sctp_transport_free(struct sctp_transport *transport)
        if (del_timer(&transport->T3_rtx_timer))
                sctp_transport_put(transport);
 
+       if (del_timer(&transport->reconf_timer))
+               sctp_transport_put(transport);
+
        /* Delete the ICMP proto unreachable timer if it's active. */
        if (del_timer(&transport->proto_unreach_timer))
                sctp_association_put(transport->asoc);
@@ -211,6 +216,14 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
                sctp_transport_hold(transport);
 }
 
+void sctp_transport_reset_reconf_timer(struct sctp_transport *transport)
+{
+       if (!timer_pending(&transport->reconf_timer))
+               if (!mod_timer(&transport->reconf_timer,
+                              jiffies + transport->rto))
+                       sctp_transport_hold(transport);
+}
+
 /* This transport has been assigned to an association.
  * Initialize fields from the association or from the sock itself.
  * Register the reference count in the association.
@@ -227,7 +240,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
 {
        /* If we don't have a fresh route, look one up */
        if (!transport->dst || transport->dst->obsolete) {
-               dst_release(transport->dst);
+               sctp_transport_dst_release(transport);
                transport->af_specific->get_dst(transport, &transport->saddr,
                                                &transport->fl, sk);
        }
@@ -659,3 +672,17 @@ void sctp_transport_immediate_rtx(struct sctp_transport *t)
                        sctp_transport_hold(t);
        }
 }
+
+/* Drop dst */
+void sctp_transport_dst_release(struct sctp_transport *t)
+{
+       dst_release(t->dst);
+       t->dst = NULL;
+       t->dst_pending_confirm = 0;
+}
+
+/* Schedule neighbour confirm */
+void sctp_transport_dst_confirm(struct sctp_transport *t)
+{
+       t->dst_pending_confirm = 1;
+}
index 84d0fdaf7de9d9b14c2d3072b32919580b9cd1f0..aa3624d50278086633ae6cdb17936731cd10f464 100644 (file)
@@ -760,11 +760,11 @@ static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
        struct sk_buff_head *event_list;
        struct sk_buff *pos, *tmp;
        struct sctp_ulpevent *cevent;
-       struct sctp_stream *in;
+       struct sctp_stream *stream;
        __u16 sid, csid, cssn;
 
        sid = event->stream;
-       in  = &ulpq->asoc->ssnmap->in;
+       stream  = ulpq->asoc->stream;
 
        event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
 
@@ -782,11 +782,11 @@ static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
                if (csid < sid)
                        continue;
 
-               if (cssn != sctp_ssn_peek(in, sid))
+               if (cssn != sctp_ssn_peek(stream, in, sid))
                        break;
 
-               /* Found it, so mark in the ssnmap. */
-               sctp_ssn_next(in, sid);
+               /* Found it, so mark in the stream. */
+               sctp_ssn_next(stream, in, sid);
 
                __skb_unlink(pos, &ulpq->lobby);
 
@@ -849,7 +849,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
                                             struct sctp_ulpevent *event)
 {
        __u16 sid, ssn;
-       struct sctp_stream *in;
+       struct sctp_stream *stream;
 
        /* Check if this message needs ordering.  */
        if (SCTP_DATA_UNORDERED & event->msg_flags)
@@ -858,10 +858,10 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
        /* Note: The stream ID must be verified before this routine.  */
        sid = event->stream;
        ssn = event->ssn;
-       in  = &ulpq->asoc->ssnmap->in;
+       stream  = ulpq->asoc->stream;
 
        /* Is this the expected SSN for this stream ID?  */
-       if (ssn != sctp_ssn_peek(in, sid)) {
+       if (ssn != sctp_ssn_peek(stream, in, sid)) {
                /* We've received something out of order, so find where it
                 * needs to be placed.  We order by stream and then by SSN.
                 */
@@ -870,7 +870,7 @@ static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
        }
 
        /* Mark that the next chunk has been found.  */
-       sctp_ssn_next(in, sid);
+       sctp_ssn_next(stream, in, sid);
 
        /* Go find any other chunks that were waiting for
         * ordering.
@@ -888,12 +888,12 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
        struct sk_buff *pos, *tmp;
        struct sctp_ulpevent *cevent;
        struct sctp_ulpevent *event;
-       struct sctp_stream *in;
+       struct sctp_stream *stream;
        struct sk_buff_head temp;
        struct sk_buff_head *lobby = &ulpq->lobby;
        __u16 csid, cssn;
 
-       in  = &ulpq->asoc->ssnmap->in;
+       stream = ulpq->asoc->stream;
 
        /* We are holding the chunks by stream, by SSN.  */
        skb_queue_head_init(&temp);
@@ -912,7 +912,7 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
                        continue;
 
                /* see if this ssn has been marked by skipping */
-               if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
+               if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
                        break;
 
                __skb_unlink(pos, lobby);
@@ -932,8 +932,8 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
                csid = cevent->stream;
                cssn = cevent->ssn;
 
-               if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
-                       sctp_ssn_next(in, csid);
+               if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
+                       sctp_ssn_next(stream, in, csid);
                        __skb_unlink(pos, lobby);
                        __skb_queue_tail(&temp, pos);
                        event = sctp_skb2event(pos);
@@ -955,17 +955,17 @@ static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
  */
 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
 {
-       struct sctp_stream *in;
+       struct sctp_stream *stream;
 
        /* Note: The stream ID must be verified before this routine.  */
-       in  = &ulpq->asoc->ssnmap->in;
+       stream  = ulpq->asoc->stream;
 
        /* Is this an old SSN?  If so ignore. */
-       if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
+       if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
                return;
 
        /* Mark that we are no longer expecting this SSN or lower. */
-       sctp_ssn_skip(in, sid, ssn);
+       sctp_ssn_skip(stream, in, sid, ssn);
 
        /* Go find any other chunks that were waiting for
         * ordering and deliver them if needed.
diff --git a/net/smc/Kconfig b/net/smc/Kconfig
new file mode 100644 (file)
index 0000000..c717ef0
--- /dev/null
@@ -0,0 +1,20 @@
+config SMC
+       tristate "SMC socket protocol family"
+       depends on INET && INFINIBAND
+       ---help---
+         SMC-R provides a "sockets over RDMA" solution making use of
+         RDMA over Converged Ethernet (RoCE) technology to upgrade
+         AF_INET TCP connections transparently.
+         The Linux implementation of the SMC-R solution is designed as
+         a separate socket family SMC.
+
+         Select this option if you want to run SMC socket applications
+
+config SMC_DIAG
+       tristate "SMC: socket monitoring interface"
+       depends on SMC
+       ---help---
+         Support for SMC socket monitoring interface used by tools such as
+         smcss.
+
+         if unsure, say Y.
diff --git a/net/smc/Makefile b/net/smc/Makefile
new file mode 100644 (file)
index 0000000..1881046
--- /dev/null
@@ -0,0 +1,4 @@
+obj-$(CONFIG_SMC)      += smc.o
+obj-$(CONFIG_SMC_DIAG) += smc_diag.o
+smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o
+smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
new file mode 100644 (file)
index 0000000..5d4208a
--- /dev/null
@@ -0,0 +1,1407 @@
+/*
+ *  Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ *  AF_SMC protocol family socket handler keeping the AF_INET sock address type
+ *  applies to SOCK_STREAM sockets only
+ *  offers an alternative communication option for TCP-protocol sockets
+ *  applicable with RoCE-cards only
+ *
+ *  Initial restrictions:
+ *    - non-blocking connect postponed
+ *    - IPv6 support postponed
+ *    - support for alternate links postponed
+ *    - partial support for non-blocking sockets only
+ *    - support for urgent data postponed
+ *
+ *  Copyright IBM Corp. 2016
+ *
+ *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ *              based on prototype from Frank Blaschka
+ */
+
+#define KMSG_COMPONENT "smc"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/socket.h>
+#include <linux/inetdevice.h>
+#include <linux/workqueue.h>
+#include <linux/in.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/smc.h>
+
+#include "smc.h"
+#include "smc_clc.h"
+#include "smc_llc.h"
+#include "smc_cdc.h"
+#include "smc_core.h"
+#include "smc_ib.h"
+#include "smc_pnet.h"
+#include "smc_tx.h"
+#include "smc_rx.h"
+#include "smc_close.h"
+
+static DEFINE_MUTEX(smc_create_lgr_pending);   /* serialize link group
+                                                * creation
+                                                */
+
+struct smc_lgr_list smc_lgr_list = {           /* established link groups */
+       .lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
+       .list = LIST_HEAD_INIT(smc_lgr_list.list),
+};
+
+static void smc_tcp_listen_work(struct work_struct *);
+
+static void smc_set_keepalive(struct sock *sk, int val)
+{
+       struct smc_sock *smc = smc_sk(sk);
+
+       smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
+}
+
+static struct smc_hashinfo smc_v4_hashinfo = {
+       .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
+};
+
+int smc_hash_sk(struct sock *sk)
+{
+       struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
+       struct hlist_head *head;
+
+       head = &h->ht;
+
+       write_lock_bh(&h->lock);
+       sk_add_node(sk, head);
+       sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+       write_unlock_bh(&h->lock);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(smc_hash_sk);
+
+void smc_unhash_sk(struct sock *sk)
+{
+       struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
+
+       write_lock_bh(&h->lock);
+       if (sk_del_node_init(sk))
+               sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+       write_unlock_bh(&h->lock);
+}
+EXPORT_SYMBOL_GPL(smc_unhash_sk);
+
+struct proto smc_proto = {
+       .name           = "SMC",
+       .owner          = THIS_MODULE,
+       .keepalive      = smc_set_keepalive,
+       .hash           = smc_hash_sk,
+       .unhash         = smc_unhash_sk,
+       .obj_size       = sizeof(struct smc_sock),
+       .h.smc_hash     = &smc_v4_hashinfo,
+       .slab_flags     = SLAB_DESTROY_BY_RCU,
+};
+EXPORT_SYMBOL_GPL(smc_proto);
+
+static int smc_release(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+       struct smc_sock *smc;
+       int rc = 0;
+
+       if (!sk)
+               goto out;
+
+       smc = smc_sk(sk);
+       sock_hold(sk);
+       if (sk->sk_state == SMC_LISTEN)
+               /* smc_close_non_accepted() is called and acquires
+                * sock lock for child sockets again
+                */
+               lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+       else
+               lock_sock(sk);
+
+       if (smc->use_fallback) {
+               sk->sk_state = SMC_CLOSED;
+               sk->sk_state_change(sk);
+       } else {
+               rc = smc_close_active(smc);
+               sock_set_flag(sk, SOCK_DEAD);
+               sk->sk_shutdown |= SHUTDOWN_MASK;
+       }
+       if (smc->clcsock) {
+               sock_release(smc->clcsock);
+               smc->clcsock = NULL;
+       }
+
+       /* detach socket */
+       sock_orphan(sk);
+       sock->sk = NULL;
+       if (smc->use_fallback) {
+               schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
+       } else if (sk->sk_state == SMC_CLOSED) {
+               smc_conn_free(&smc->conn);
+               schedule_delayed_work(&smc->sock_put_work,
+                                     SMC_CLOSE_SOCK_PUT_DELAY);
+       }
+       sk->sk_prot->unhash(sk);
+       release_sock(sk);
+
+       sock_put(sk);
+out:
+       return rc;
+}
+
+static void smc_destruct(struct sock *sk)
+{
+       if (sk->sk_state != SMC_CLOSED)
+               return;
+       if (!sock_flag(sk, SOCK_DEAD))
+               return;
+
+       sk_refcnt_debug_dec(sk);
+}
+
+static struct sock *smc_sock_alloc(struct net *net, struct socket *sock)
+{
+       struct smc_sock *smc;
+       struct sock *sk;
+
+       sk = sk_alloc(net, PF_SMC, GFP_KERNEL, &smc_proto, 0);
+       if (!sk)
+               return NULL;
+
+       sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
+       sk->sk_state = SMC_INIT;
+       sk->sk_destruct = smc_destruct;
+       sk->sk_protocol = SMCPROTO_SMC;
+       smc = smc_sk(sk);
+       INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
+       INIT_LIST_HEAD(&smc->accept_q);
+       spin_lock_init(&smc->accept_q_lock);
+       INIT_DELAYED_WORK(&smc->sock_put_work, smc_close_sock_put_work);
+       sk->sk_prot->hash(sk);
+       sk_refcnt_debug_inc(sk);
+
+       return sk;
+}
+
+static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
+                   int addr_len)
+{
+       struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
+       struct sock *sk = sock->sk;
+       struct smc_sock *smc;
+       int rc;
+
+       smc = smc_sk(sk);
+
+       /* replicate tests from inet_bind(), to be safe wrt. future changes */
+       rc = -EINVAL;
+       if (addr_len < sizeof(struct sockaddr_in))
+               goto out;
+
+       rc = -EAFNOSUPPORT;
+       /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
+       if ((addr->sin_family != AF_INET) &&
+           ((addr->sin_family != AF_UNSPEC) ||
+            (addr->sin_addr.s_addr != htonl(INADDR_ANY))))
+               goto out;
+
+       lock_sock(sk);
+
+       /* Check if socket is already active */
+       rc = -EINVAL;
+       if (sk->sk_state != SMC_INIT)
+               goto out_rel;
+
+       smc->clcsock->sk->sk_reuse = sk->sk_reuse;
+       rc = kernel_bind(smc->clcsock, uaddr, addr_len);
+
+out_rel:
+       release_sock(sk);
+out:
+       return rc;
+}
+
+static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
+                                  unsigned long mask)
+{
+       /* options we don't get control via setsockopt for */
+       nsk->sk_type = osk->sk_type;
+       nsk->sk_sndbuf = osk->sk_sndbuf;
+       nsk->sk_rcvbuf = osk->sk_rcvbuf;
+       nsk->sk_sndtimeo = osk->sk_sndtimeo;
+       nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
+       nsk->sk_mark = osk->sk_mark;
+       nsk->sk_priority = osk->sk_priority;
+       nsk->sk_rcvlowat = osk->sk_rcvlowat;
+       nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
+       nsk->sk_err = osk->sk_err;
+
+       nsk->sk_flags &= ~mask;
+       nsk->sk_flags |= osk->sk_flags & mask;
+}
+
+#define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
+                            (1UL << SOCK_KEEPOPEN) | \
+                            (1UL << SOCK_LINGER) | \
+                            (1UL << SOCK_BROADCAST) | \
+                            (1UL << SOCK_TIMESTAMP) | \
+                            (1UL << SOCK_DBG) | \
+                            (1UL << SOCK_RCVTSTAMP) | \
+                            (1UL << SOCK_RCVTSTAMPNS) | \
+                            (1UL << SOCK_LOCALROUTE) | \
+                            (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
+                            (1UL << SOCK_RXQ_OVFL) | \
+                            (1UL << SOCK_WIFI_STATUS) | \
+                            (1UL << SOCK_NOFCS) | \
+                            (1UL << SOCK_FILTER_LOCKED))
+/* copy only relevant settings and flags of SOL_SOCKET level from smc to
+ * clc socket (since smc is not called for these options from net/core)
+ */
+static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
+{
+       smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
+}
+
+#define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
+                            (1UL << SOCK_KEEPOPEN) | \
+                            (1UL << SOCK_LINGER) | \
+                            (1UL << SOCK_DBG))
+/* copy only settings and flags relevant for smc from clc to smc socket */
+static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
+{
+       smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
+}
+
+/* determine subnet and mask of internal TCP socket */
+int smc_netinfo_by_tcpsk(struct socket *clcsock,
+                        __be32 *subnet, u8 *prefix_len)
+{
+       struct dst_entry *dst = sk_dst_get(clcsock->sk);
+       struct sockaddr_in addr;
+       int rc = -ENOENT;
+       int len;
+
+       if (!dst) {
+               rc = -ENOTCONN;
+               goto out;
+       }
+       if (!dst->dev) {
+               rc = -ENODEV;
+               goto out_rel;
+       }
+
+       /* get address to which the internal TCP socket is bound */
+       kernel_getsockname(clcsock, (struct sockaddr *)&addr, &len);
+       /* analyze IPv4 specific data of net_device belonging to TCP socket */
+       for_ifa(dst->dev->ip_ptr) {
+               if (ifa->ifa_address != addr.sin_addr.s_addr)
+                       continue;
+               *prefix_len = inet_mask_len(ifa->ifa_mask);
+               *subnet = ifa->ifa_address & ifa->ifa_mask;
+               rc = 0;
+               break;
+       } endfor_ifa(dst->dev->ip_ptr);
+
+out_rel:
+       dst_release(dst);
+out:
+       return rc;
+}
+
+static int smc_clnt_conf_first_link(struct smc_sock *smc, union ib_gid *gid)
+{
+       struct smc_link_group *lgr = smc->conn.lgr;
+       struct smc_link *link;
+       int rest;
+       int rc;
+
+       link = &lgr->lnk[SMC_SINGLE_LINK];
+       /* receive CONFIRM LINK request from server over RoCE fabric */
+       rest = wait_for_completion_interruptible_timeout(
+               &link->llc_confirm,
+               SMC_LLC_WAIT_FIRST_TIME);
+       if (rest <= 0) {
+               struct smc_clc_msg_decline dclc;
+
+               rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
+                                     SMC_CLC_DECLINE);
+               return rc;
+       }
+
+       rc = smc_ib_modify_qp_rts(link);
+       if (rc)
+               return SMC_CLC_DECL_INTERR;
+
+       smc_wr_remember_qp_attr(link);
+       /* send CONFIRM LINK response over RoCE fabric */
+       rc = smc_llc_send_confirm_link(link,
+                                      link->smcibdev->mac[link->ibport - 1],
+                                      gid, SMC_LLC_RESP);
+       if (rc < 0)
+               return SMC_CLC_DECL_TCL;
+
+       return rc;
+}
+
+static void smc_conn_save_peer_info(struct smc_sock *smc,
+                                   struct smc_clc_msg_accept_confirm *clc)
+{
+       smc->conn.peer_conn_idx = clc->conn_idx;
+       smc->conn.local_tx_ctrl.token = ntohl(clc->rmbe_alert_token);
+       smc->conn.peer_rmbe_size = smc_uncompress_bufsize(clc->rmbe_size);
+       atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
+}
+
+static void smc_link_save_peer_info(struct smc_link *link,
+                                   struct smc_clc_msg_accept_confirm *clc)
+{
+       link->peer_qpn = ntoh24(clc->qpn);
+       memcpy(link->peer_gid, clc->lcl.gid, SMC_GID_SIZE);
+       memcpy(link->peer_mac, clc->lcl.mac, sizeof(link->peer_mac));
+       link->peer_psn = ntoh24(clc->psn);
+       link->peer_mtu = clc->qp_mtu;
+}
+
+/* setup for RDMA connection of client */
+static int smc_connect_rdma(struct smc_sock *smc)
+{
+       struct sockaddr_in *inaddr = (struct sockaddr_in *)smc->addr;
+       struct smc_clc_msg_accept_confirm aclc;
+       int local_contact = SMC_FIRST_CONTACT;
+       struct smc_ib_device *smcibdev;
+       struct smc_link *link;
+       u8 srv_first_contact;
+       int reason_code = 0;
+       int rc = 0;
+       u8 ibport;
+
+       /* IPSec connections opt out of SMC-R optimizations */
+       if (using_ipsec(smc)) {
+               reason_code = SMC_CLC_DECL_IPSEC;
+               goto decline_rdma;
+       }
+
+       /* PNET table look up: search active ib_device and port
+        * within same PNETID that also contains the ethernet device
+        * used for the internal TCP socket
+        */
+       smc_pnet_find_roce_resource(smc->clcsock->sk, &smcibdev, &ibport);
+       if (!smcibdev) {
+               reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
+               goto decline_rdma;
+       }
+
+       /* do inband token exchange */
+       reason_code = smc_clc_send_proposal(smc, smcibdev, ibport);
+       if (reason_code < 0) {
+               rc = reason_code;
+               goto out_err;
+       }
+       if (reason_code > 0) /* configuration error */
+               goto decline_rdma;
+       /* receive SMC Accept CLC message */
+       reason_code = smc_clc_wait_msg(smc, &aclc, sizeof(aclc),
+                                      SMC_CLC_ACCEPT);
+       if (reason_code < 0) {
+               rc = reason_code;
+               goto out_err;
+       }
+       if (reason_code > 0)
+               goto decline_rdma;
+
+       srv_first_contact = aclc.hdr.flag;
+       mutex_lock(&smc_create_lgr_pending);
+       local_contact = smc_conn_create(smc, inaddr->sin_addr.s_addr, smcibdev,
+                                       ibport, &aclc.lcl, srv_first_contact);
+       if (local_contact < 0) {
+               rc = local_contact;
+               if (rc == -ENOMEM)
+                       reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
+               else if (rc == -ENOLINK)
+                       reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */
+               goto decline_rdma_unlock;
+       }
+       link = &smc->conn.lgr->lnk[SMC_SINGLE_LINK];
+
+       smc_conn_save_peer_info(smc, &aclc);
+
+       rc = smc_sndbuf_create(smc);
+       if (rc) {
+               reason_code = SMC_CLC_DECL_MEM;
+               goto decline_rdma_unlock;
+       }
+       rc = smc_rmb_create(smc);
+       if (rc) {
+               reason_code = SMC_CLC_DECL_MEM;
+               goto decline_rdma_unlock;
+       }
+
+       if (local_contact == SMC_FIRST_CONTACT)
+               smc_link_save_peer_info(link, &aclc);
+
+       rc = smc_rmb_rtoken_handling(&smc->conn, &aclc);
+       if (rc) {
+               reason_code = SMC_CLC_DECL_INTERR;
+               goto decline_rdma_unlock;
+       }
+
+       if (local_contact == SMC_FIRST_CONTACT) {
+               rc = smc_ib_ready_link(link);
+               if (rc) {
+                       reason_code = SMC_CLC_DECL_INTERR;
+                       goto decline_rdma_unlock;
+               }
+       }
+
+       rc = smc_clc_send_confirm(smc);
+       if (rc)
+               goto out_err_unlock;
+
+       if (local_contact == SMC_FIRST_CONTACT) {
+               /* QP confirmation over RoCE fabric */
+               reason_code = smc_clnt_conf_first_link(
+                       smc, &smcibdev->gid[ibport - 1]);
+               if (reason_code < 0) {
+                       rc = reason_code;
+                       goto out_err_unlock;
+               }
+               if (reason_code > 0)
+                       goto decline_rdma_unlock;
+       }
+
+       mutex_unlock(&smc_create_lgr_pending);
+       smc_tx_init(smc);
+       smc_rx_init(smc);
+
+out_connected:
+       smc_copy_sock_settings_to_clc(smc);
+       if (smc->sk.sk_state == SMC_INIT)
+               smc->sk.sk_state = SMC_ACTIVE;
+
+       return rc ? rc : local_contact;
+
+decline_rdma_unlock:
+       mutex_unlock(&smc_create_lgr_pending);
+       smc_conn_free(&smc->conn);
+decline_rdma:
+       /* RDMA setup failed, switch back to TCP */
+       smc->use_fallback = true;
+       if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
+               rc = smc_clc_send_decline(smc, reason_code, 0);
+               if (rc < sizeof(struct smc_clc_msg_decline))
+                       goto out_err;
+       }
+       goto out_connected;
+
+out_err_unlock:
+       mutex_unlock(&smc_create_lgr_pending);
+       smc_conn_free(&smc->conn);
+out_err:
+       return rc;
+}
+
+static int smc_connect(struct socket *sock, struct sockaddr *addr,
+                      int alen, int flags)
+{
+       struct sock *sk = sock->sk;
+       struct smc_sock *smc;
+       int rc = -EINVAL;
+
+       smc = smc_sk(sk);
+
+       /* separate smc parameter checking to be safe */
+       if (alen < sizeof(addr->sa_family))
+               goto out_err;
+       if (addr->sa_family != AF_INET)
+               goto out_err;
+       smc->addr = addr;       /* needed for nonblocking connect */
+
+       lock_sock(sk);
+       switch (sk->sk_state) {
+       default:
+               goto out;
+       case SMC_ACTIVE:
+               rc = -EISCONN;
+               goto out;
+       case SMC_INIT:
+               rc = 0;
+               break;
+       }
+
+       smc_copy_sock_settings_to_clc(smc);
+       rc = kernel_connect(smc->clcsock, addr, alen, flags);
+       if (rc)
+               goto out;
+
+       /* setup RDMA connection */
+       rc = smc_connect_rdma(smc);
+       if (rc < 0)
+               goto out;
+       else
+               rc = 0; /* success cases including fallback */
+
+out:
+       release_sock(sk);
+out_err:
+       return rc;
+}
+
+static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
+{
+       struct sock *sk = &lsmc->sk;
+       struct socket *new_clcsock;
+       struct sock *new_sk;
+       int rc;
+
+       release_sock(&lsmc->sk);
+       new_sk = smc_sock_alloc(sock_net(sk), NULL);
+       if (!new_sk) {
+               rc = -ENOMEM;
+               lsmc->sk.sk_err = ENOMEM;
+               *new_smc = NULL;
+               lock_sock(&lsmc->sk);
+               goto out;
+       }
+       *new_smc = smc_sk(new_sk);
+
+       rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
+       lock_sock(&lsmc->sk);
+       if  (rc < 0) {
+               lsmc->sk.sk_err = -rc;
+               new_sk->sk_state = SMC_CLOSED;
+               sock_set_flag(new_sk, SOCK_DEAD);
+               sk->sk_prot->unhash(new_sk);
+               sock_put(new_sk);
+               *new_smc = NULL;
+               goto out;
+       }
+       if (lsmc->sk.sk_state == SMC_CLOSED) {
+               if (new_clcsock)
+                       sock_release(new_clcsock);
+               new_sk->sk_state = SMC_CLOSED;
+               sock_set_flag(new_sk, SOCK_DEAD);
+               sk->sk_prot->unhash(new_sk);
+               sock_put(new_sk);
+               *new_smc = NULL;
+               goto out;
+       }
+
+       (*new_smc)->clcsock = new_clcsock;
+out:
+       return rc;
+}
+
+/* add a just created sock to the accept queue of the listen sock as
+ * candidate for a following socket accept call from user space
+ */
+static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
+{
+       struct smc_sock *par = smc_sk(parent);
+
+       sock_hold(sk);
+       spin_lock(&par->accept_q_lock);
+       list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
+       spin_unlock(&par->accept_q_lock);
+       sk_acceptq_added(parent);
+}
+
+/* remove a socket from the accept queue of its parental listening socket */
+static void smc_accept_unlink(struct sock *sk)
+{
+       struct smc_sock *par = smc_sk(sk)->listen_smc;
+
+       spin_lock(&par->accept_q_lock);
+       list_del_init(&smc_sk(sk)->accept_q);
+       spin_unlock(&par->accept_q_lock);
+       sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
+       sock_put(sk);
+}
+
+/* remove a sock from the accept queue to bind it to a new socket created
+ * for a socket accept call from user space
+ */
+struct sock *smc_accept_dequeue(struct sock *parent,
+                               struct socket *new_sock)
+{
+       struct smc_sock *isk, *n;
+       struct sock *new_sk;
+
+       list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
+               new_sk = (struct sock *)isk;
+
+               smc_accept_unlink(new_sk);
+               if (new_sk->sk_state == SMC_CLOSED) {
+                       /* tbd in follow-on patch: close this sock */
+                       continue;
+               }
+               if (new_sock)
+                       sock_graft(new_sk, new_sock);
+               return new_sk;
+       }
+       return NULL;
+}
+
+/* clean up for a created but never accepted sock */
+void smc_close_non_accepted(struct sock *sk)
+{
+       struct smc_sock *smc = smc_sk(sk);
+
+       sock_hold(sk);
+       lock_sock(sk);
+       if (!sk->sk_lingertime)
+               /* wait for peer closing */
+               sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
+       if (!smc->use_fallback)
+               smc_close_active(smc);
+       if (smc->clcsock) {
+               struct socket *tcp;
+
+               tcp = smc->clcsock;
+               smc->clcsock = NULL;
+               sock_release(tcp);
+       }
+       sock_set_flag(sk, SOCK_DEAD);
+       sk->sk_shutdown |= SHUTDOWN_MASK;
+       if (smc->use_fallback) {
+               schedule_delayed_work(&smc->sock_put_work, TCP_TIMEWAIT_LEN);
+       } else {
+               smc_conn_free(&smc->conn);
+               schedule_delayed_work(&smc->sock_put_work,
+                                     SMC_CLOSE_SOCK_PUT_DELAY);
+       }
+       release_sock(sk);
+       sock_put(sk);
+}
+
+static int smc_serv_conf_first_link(struct smc_sock *smc)
+{
+       struct smc_link_group *lgr = smc->conn.lgr;
+       struct smc_link *link;
+       int rest;
+       int rc;
+
+       link = &lgr->lnk[SMC_SINGLE_LINK];
+       /* send CONFIRM LINK request to client over the RoCE fabric */
+       rc = smc_llc_send_confirm_link(link,
+                                      link->smcibdev->mac[link->ibport - 1],
+                                      &link->smcibdev->gid[link->ibport - 1],
+                                      SMC_LLC_REQ);
+       if (rc < 0)
+               return SMC_CLC_DECL_TCL;
+
+       /* receive CONFIRM LINK response from client over the RoCE fabric */
+       rest = wait_for_completion_interruptible_timeout(
+               &link->llc_confirm_resp,
+               SMC_LLC_WAIT_FIRST_TIME);
+       if (rest <= 0) {
+               struct smc_clc_msg_decline dclc;
+
+               rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
+                                     SMC_CLC_DECLINE);
+       }
+
+       return rc;
+}
+
+/* setup for RDMA connection of server */
+static void smc_listen_work(struct work_struct *work)
+{
+       struct smc_sock *new_smc = container_of(work, struct smc_sock,
+                                               smc_listen_work);
+       struct socket *newclcsock = new_smc->clcsock;
+       struct smc_sock *lsmc = new_smc->listen_smc;
+       struct smc_clc_msg_accept_confirm cclc;
+       int local_contact = SMC_REUSE_CONTACT;
+       struct sock *newsmcsk = &new_smc->sk;
+       struct smc_clc_msg_proposal pclc;
+       struct smc_ib_device *smcibdev;
+       struct sockaddr_in peeraddr;
+       struct smc_link *link;
+       int reason_code = 0;
+       int rc = 0, len;
+       __be32 subnet;
+       u8 prefix_len;
+       u8 ibport;
+
+       /* do inband token exchange -
+        *wait for and receive SMC Proposal CLC message
+        */
+       reason_code = smc_clc_wait_msg(new_smc, &pclc, sizeof(pclc),
+                                      SMC_CLC_PROPOSAL);
+       if (reason_code < 0)
+               goto out_err;
+       if (reason_code > 0)
+               goto decline_rdma;
+
+       /* IPSec connections opt out of SMC-R optimizations */
+       if (using_ipsec(new_smc)) {
+               reason_code = SMC_CLC_DECL_IPSEC;
+               goto decline_rdma;
+       }
+
+       /* PNET table look up: search active ib_device and port
+        * within same PNETID that also contains the ethernet device
+        * used for the internal TCP socket
+        */
+       smc_pnet_find_roce_resource(newclcsock->sk, &smcibdev, &ibport);
+       if (!smcibdev) {
+               reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
+               goto decline_rdma;
+       }
+
+       /* determine subnet and mask from internal TCP socket */
+       rc = smc_netinfo_by_tcpsk(newclcsock, &subnet, &prefix_len);
+       if (rc) {
+               reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
+               goto decline_rdma;
+       }
+       if ((pclc.outgoing_subnet != subnet) ||
+           (pclc.prefix_len != prefix_len)) {
+               reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
+               goto decline_rdma;
+       }
+
+       /* get address of the peer connected to the internal TCP socket */
+       kernel_getpeername(newclcsock, (struct sockaddr *)&peeraddr, &len);
+
+       /* allocate connection / link group */
+       mutex_lock(&smc_create_lgr_pending);
+       local_contact = smc_conn_create(new_smc, peeraddr.sin_addr.s_addr,
+                                       smcibdev, ibport, &pclc.lcl, 0);
+       if (local_contact == SMC_REUSE_CONTACT)
+               /* lock no longer needed, free it due to following
+                * smc_clc_wait_msg() call
+                */
+               mutex_unlock(&smc_create_lgr_pending);
+       if (local_contact < 0) {
+               rc = local_contact;
+               if (rc == -ENOMEM)
+                       reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
+               else if (rc == -ENOLINK)
+                       reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */
+               goto decline_rdma;
+       }
+       link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
+
+       rc = smc_sndbuf_create(new_smc);
+       if (rc) {
+               reason_code = SMC_CLC_DECL_MEM;
+               goto decline_rdma;
+       }
+       rc = smc_rmb_create(new_smc);
+       if (rc) {
+               reason_code = SMC_CLC_DECL_MEM;
+               goto decline_rdma;
+       }
+
+       rc = smc_clc_send_accept(new_smc, local_contact);
+       if (rc)
+               goto out_err;
+
+       /* receive SMC Confirm CLC message */
+       reason_code = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
+                                      SMC_CLC_CONFIRM);
+       if (reason_code < 0)
+               goto out_err;
+       if (reason_code > 0)
+               goto decline_rdma;
+       smc_conn_save_peer_info(new_smc, &cclc);
+       if (local_contact == SMC_FIRST_CONTACT)
+               smc_link_save_peer_info(link, &cclc);
+
+       rc = smc_rmb_rtoken_handling(&new_smc->conn, &cclc);
+       if (rc) {
+               reason_code = SMC_CLC_DECL_INTERR;
+               goto decline_rdma;
+       }
+
+       if (local_contact == SMC_FIRST_CONTACT) {
+               rc = smc_ib_ready_link(link);
+               if (rc) {
+                       reason_code = SMC_CLC_DECL_INTERR;
+                       goto decline_rdma;
+               }
+               /* QP confirmation over RoCE fabric */
+               reason_code = smc_serv_conf_first_link(new_smc);
+               if (reason_code < 0) {
+                       /* peer is not aware of a problem */
+                       rc = reason_code;
+                       goto out_err;
+               }
+               if (reason_code > 0)
+                       goto decline_rdma;
+       }
+
+       smc_tx_init(new_smc);
+       smc_rx_init(new_smc);
+
+out_connected:
+       sk_refcnt_debug_inc(newsmcsk);
+       if (newsmcsk->sk_state == SMC_INIT)
+               newsmcsk->sk_state = SMC_ACTIVE;
+enqueue:
+       if (local_contact == SMC_FIRST_CONTACT)
+               mutex_unlock(&smc_create_lgr_pending);
+       lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
+       if (lsmc->sk.sk_state == SMC_LISTEN) {
+               smc_accept_enqueue(&lsmc->sk, newsmcsk);
+       } else { /* no longer listening */
+               smc_close_non_accepted(newsmcsk);
+       }
+       release_sock(&lsmc->sk);
+
+       /* Wake up accept */
+       lsmc->sk.sk_data_ready(&lsmc->sk);
+       sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
+       return;
+
+decline_rdma:
+       /* RDMA setup failed, switch back to TCP */
+       smc_conn_free(&new_smc->conn);
+       new_smc->use_fallback = true;
+       if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
+               rc = smc_clc_send_decline(new_smc, reason_code, 0);
+               if (rc < sizeof(struct smc_clc_msg_decline))
+                       goto out_err;
+       }
+       goto out_connected;
+
+out_err:
+       newsmcsk->sk_state = SMC_CLOSED;
+       smc_conn_free(&new_smc->conn);
+       goto enqueue; /* queue new sock with sk_err set */
+}
+
+static void smc_tcp_listen_work(struct work_struct *work)
+{
+       struct smc_sock *lsmc = container_of(work, struct smc_sock,
+                                            tcp_listen_work);
+       struct smc_sock *new_smc;
+       int rc = 0;
+
+       lock_sock(&lsmc->sk);
+       while (lsmc->sk.sk_state == SMC_LISTEN) {
+               rc = smc_clcsock_accept(lsmc, &new_smc);
+               if (rc)
+                       goto out;
+               if (!new_smc)
+                       continue;
+
+               new_smc->listen_smc = lsmc;
+               new_smc->use_fallback = false; /* assume rdma capability first*/
+               sock_hold(&lsmc->sk); /* sock_put in smc_listen_work */
+               INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
+               smc_copy_sock_settings_to_smc(new_smc);
+               schedule_work(&new_smc->smc_listen_work);
+       }
+
+out:
+       release_sock(&lsmc->sk);
+       lsmc->sk.sk_data_ready(&lsmc->sk); /* no more listening, wake accept */
+}
+
+static int smc_listen(struct socket *sock, int backlog)
+{
+       struct sock *sk = sock->sk;
+       struct smc_sock *smc;
+       int rc;
+
+       smc = smc_sk(sk);
+       lock_sock(sk);
+
+       rc = -EINVAL;
+       if ((sk->sk_state != SMC_INIT) && (sk->sk_state != SMC_LISTEN))
+               goto out;
+
+       rc = 0;
+       if (sk->sk_state == SMC_LISTEN) {
+               sk->sk_max_ack_backlog = backlog;
+               goto out;
+       }
+       /* some socket options are handled in core, so we could not apply
+        * them to the clc socket -- copy smc socket options to clc socket
+        */
+       smc_copy_sock_settings_to_clc(smc);
+
+       rc = kernel_listen(smc->clcsock, backlog);
+       if (rc)
+               goto out;
+       sk->sk_max_ack_backlog = backlog;
+       sk->sk_ack_backlog = 0;
+       sk->sk_state = SMC_LISTEN;
+       INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
+       schedule_work(&smc->tcp_listen_work);
+
+out:
+       release_sock(sk);
+       return rc;
+}
+
+static int smc_accept(struct socket *sock, struct socket *new_sock,
+                     int flags)
+{
+       struct sock *sk = sock->sk, *nsk;
+       DECLARE_WAITQUEUE(wait, current);
+       struct smc_sock *lsmc;
+       long timeo;
+       int rc = 0;
+
+       lsmc = smc_sk(sk);
+       lock_sock(sk);
+
+       if (lsmc->sk.sk_state != SMC_LISTEN) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* Wait for an incoming connection */
+       timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+       add_wait_queue_exclusive(sk_sleep(sk), &wait);
+       while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               if (!timeo) {
+                       rc = -EAGAIN;
+                       break;
+               }
+               release_sock(sk);
+               timeo = schedule_timeout(timeo);
+               /* wakeup by sk_data_ready in smc_listen_work() */
+               sched_annotate_sleep();
+               lock_sock(sk);
+               if (signal_pending(current)) {
+                       rc = sock_intr_errno(timeo);
+                       break;
+               }
+       }
+       set_current_state(TASK_RUNNING);
+       remove_wait_queue(sk_sleep(sk), &wait);
+
+       if (!rc)
+               rc = sock_error(nsk);
+
+out:
+       release_sock(sk);
+       return rc;
+}
+
+static int smc_getname(struct socket *sock, struct sockaddr *addr,
+                      int *len, int peer)
+{
+       struct smc_sock *smc;
+
+       if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
+           (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
+               return -ENOTCONN;
+
+       smc = smc_sk(sock->sk);
+
+       return smc->clcsock->ops->getname(smc->clcsock, addr, len, peer);
+}
+
+static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+{
+       struct sock *sk = sock->sk;
+       struct smc_sock *smc;
+       int rc = -EPIPE;
+
+       smc = smc_sk(sk);
+       lock_sock(sk);
+       if ((sk->sk_state != SMC_ACTIVE) &&
+           (sk->sk_state != SMC_APPCLOSEWAIT1) &&
+           (sk->sk_state != SMC_INIT))
+               goto out;
+       if (smc->use_fallback)
+               rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
+       else
+               rc = smc_tx_sendmsg(smc, msg, len);
+out:
+       release_sock(sk);
+       return rc;
+}
+
+static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+                      int flags)
+{
+       struct sock *sk = sock->sk;
+       struct smc_sock *smc;
+       int rc = -ENOTCONN;
+
+       smc = smc_sk(sk);
+       lock_sock(sk);
+       if ((sk->sk_state == SMC_INIT) ||
+           (sk->sk_state == SMC_LISTEN) ||
+           (sk->sk_state == SMC_CLOSED))
+               goto out;
+
+       if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
+               rc = 0;
+               goto out;
+       }
+
+       if (smc->use_fallback)
+               rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
+       else
+               rc = smc_rx_recvmsg(smc, msg, len, flags);
+
+out:
+       release_sock(sk);
+       return rc;
+}
+
+static unsigned int smc_accept_poll(struct sock *parent)
+{
+       struct smc_sock *isk;
+       struct sock *sk;
+
+       lock_sock(parent);
+       list_for_each_entry(isk, &smc_sk(parent)->accept_q, accept_q) {
+               sk = (struct sock *)isk;
+
+               if (sk->sk_state == SMC_ACTIVE) {
+                       release_sock(parent);
+                       return POLLIN | POLLRDNORM;
+               }
+       }
+       release_sock(parent);
+
+       return 0;
+}
+
+static unsigned int smc_poll(struct file *file, struct socket *sock,
+                            poll_table *wait)
+{
+       struct sock *sk = sock->sk;
+       unsigned int mask = 0;
+       struct smc_sock *smc;
+       int rc;
+
+       smc = smc_sk(sock->sk);
+       if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
+               /* delegate to CLC child sock */
+               mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
+               /* if non-blocking connect finished ... */
+               lock_sock(sk);
+               if ((sk->sk_state == SMC_INIT) && (mask & POLLOUT)) {
+                       sk->sk_err = smc->clcsock->sk->sk_err;
+                       if (sk->sk_err) {
+                               mask |= POLLERR;
+                       } else {
+                               rc = smc_connect_rdma(smc);
+                               if (rc < 0)
+                                       mask |= POLLERR;
+                               else
+                                       /* success cases including fallback */
+                                       mask |= POLLOUT | POLLWRNORM;
+                       }
+               }
+               release_sock(sk);
+       } else {
+               sock_poll_wait(file, sk_sleep(sk), wait);
+               if (sk->sk_state == SMC_LISTEN)
+                       /* woken up by sk_data_ready in smc_listen_work() */
+                       mask |= smc_accept_poll(sk);
+               if (sk->sk_err)
+                       mask |= POLLERR;
+               if (atomic_read(&smc->conn.sndbuf_space) ||
+                   (sk->sk_shutdown & SEND_SHUTDOWN)) {
+                       mask |= POLLOUT | POLLWRNORM;
+               } else {
+                       sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+                       set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+               }
+               if (atomic_read(&smc->conn.bytes_to_rcv))
+                       mask |= POLLIN | POLLRDNORM;
+               if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
+                   (sk->sk_state == SMC_CLOSED))
+                       mask |= POLLHUP;
+               if (sk->sk_shutdown & RCV_SHUTDOWN)
+                       mask |= POLLIN | POLLRDNORM | POLLRDHUP;
+               if (sk->sk_state == SMC_APPCLOSEWAIT1)
+                       mask |= POLLIN;
+
+       }
+
+       return mask;
+}
+
+static int smc_shutdown(struct socket *sock, int how)
+{
+       struct sock *sk = sock->sk;
+       struct smc_sock *smc;
+       int rc = -EINVAL;
+       int rc1 = 0;
+
+       smc = smc_sk(sk);
+
+       if ((how < SHUT_RD) || (how > SHUT_RDWR))
+               return rc;
+
+       lock_sock(sk);
+
+       rc = -ENOTCONN;
+       if ((sk->sk_state != SMC_LISTEN) &&
+           (sk->sk_state != SMC_ACTIVE) &&
+           (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
+           (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
+           (sk->sk_state != SMC_APPCLOSEWAIT1) &&
+           (sk->sk_state != SMC_APPCLOSEWAIT2) &&
+           (sk->sk_state != SMC_APPFINCLOSEWAIT))
+               goto out;
+       if (smc->use_fallback) {
+               rc = kernel_sock_shutdown(smc->clcsock, how);
+               sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
+               if (sk->sk_shutdown == SHUTDOWN_MASK)
+                       sk->sk_state = SMC_CLOSED;
+               goto out;
+       }
+       switch (how) {
+       case SHUT_RDWR:         /* shutdown in both directions */
+               rc = smc_close_active(smc);
+               break;
+       case SHUT_WR:
+               rc = smc_close_shutdown_write(smc);
+               break;
+       case SHUT_RD:
+               if (sk->sk_state == SMC_LISTEN)
+                       rc = smc_close_active(smc);
+               else
+                       rc = 0;
+                       /* nothing more to do because peer is not involved */
+               break;
+       }
+       rc1 = kernel_sock_shutdown(smc->clcsock, how);
+       /* map sock_shutdown_cmd constants to sk_shutdown value range */
+       sk->sk_shutdown |= how + 1;
+
+out:
+       release_sock(sk);
+       return rc ? rc : rc1;
+}
+
+static int smc_setsockopt(struct socket *sock, int level, int optname,
+                         char __user *optval, unsigned int optlen)
+{
+       struct sock *sk = sock->sk;
+       struct smc_sock *smc;
+
+       smc = smc_sk(sk);
+
+       /* generic setsockopts reaching us here always apply to the
+        * CLC socket
+        */
+       return smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
+                                            optval, optlen);
+}
+
+static int smc_getsockopt(struct socket *sock, int level, int optname,
+                         char __user *optval, int __user *optlen)
+{
+       struct smc_sock *smc;
+
+       smc = smc_sk(sock->sk);
+       /* socket options apply to the CLC socket */
+       return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
+                                            optval, optlen);
+}
+
+static int smc_ioctl(struct socket *sock, unsigned int cmd,
+                    unsigned long arg)
+{
+       struct smc_sock *smc;
+
+       smc = smc_sk(sock->sk);
+       if (smc->use_fallback)
+               return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
+       else
+               return sock_no_ioctl(sock, cmd, arg);
+}
+
+static ssize_t smc_sendpage(struct socket *sock, struct page *page,
+                           int offset, size_t size, int flags)
+{
+       struct sock *sk = sock->sk;
+       struct smc_sock *smc;
+       int rc = -EPIPE;
+
+       smc = smc_sk(sk);
+       lock_sock(sk);
+       if (sk->sk_state != SMC_ACTIVE)
+               goto out;
+       if (smc->use_fallback)
+               rc = kernel_sendpage(smc->clcsock, page, offset,
+                                    size, flags);
+       else
+               rc = sock_no_sendpage(sock, page, offset, size, flags);
+
+out:
+       release_sock(sk);
+       return rc;
+}
+
+static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
+                              struct pipe_inode_info *pipe, size_t len,
+                                   unsigned int flags)
+{
+       struct sock *sk = sock->sk;
+       struct smc_sock *smc;
+       int rc = -ENOTCONN;
+
+       smc = smc_sk(sk);
+       lock_sock(sk);
+       if ((sk->sk_state != SMC_ACTIVE) && (sk->sk_state != SMC_CLOSED))
+               goto out;
+       if (smc->use_fallback) {
+               rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
+                                                   pipe, len, flags);
+       } else {
+               rc = -EOPNOTSUPP;
+       }
+out:
+       release_sock(sk);
+       return rc;
+}
+
+/* must look like tcp */
+static const struct proto_ops smc_sock_ops = {
+       .family         = PF_SMC,
+       .owner          = THIS_MODULE,
+       .release        = smc_release,
+       .bind           = smc_bind,
+       .connect        = smc_connect,
+       .socketpair     = sock_no_socketpair,
+       .accept         = smc_accept,
+       .getname        = smc_getname,
+       .poll           = smc_poll,
+       .ioctl          = smc_ioctl,
+       .listen         = smc_listen,
+       .shutdown       = smc_shutdown,
+       .setsockopt     = smc_setsockopt,
+       .getsockopt     = smc_getsockopt,
+       .sendmsg        = smc_sendmsg,
+       .recvmsg        = smc_recvmsg,
+       .mmap           = sock_no_mmap,
+       .sendpage       = smc_sendpage,
+       .splice_read    = smc_splice_read,
+};
+
+static int smc_create(struct net *net, struct socket *sock, int protocol,
+                     int kern)
+{
+       struct smc_sock *smc;
+       struct sock *sk;
+       int rc;
+
+       rc = -ESOCKTNOSUPPORT;
+       if (sock->type != SOCK_STREAM)
+               goto out;
+
+       rc = -EPROTONOSUPPORT;
+       if ((protocol != IPPROTO_IP) && (protocol != IPPROTO_TCP))
+               goto out;
+
+       rc = -ENOBUFS;
+       sock->ops = &smc_sock_ops;
+       sk = smc_sock_alloc(net, sock);
+       if (!sk)
+               goto out;
+
+       /* create internal TCP socket for CLC handshake and fallback */
+       smc = smc_sk(sk);
+       smc->use_fallback = false; /* assume rdma capability first */
+       rc = sock_create_kern(net, PF_INET, SOCK_STREAM,
+                             IPPROTO_TCP, &smc->clcsock);
+       if (rc)
+               sk_common_release(sk);
+       smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
+       smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
+
+out:
+       return rc;
+}
+
+static const struct net_proto_family smc_sock_family_ops = {
+       .family = PF_SMC,
+       .owner  = THIS_MODULE,
+       .create = smc_create,
+};
+
+static int __init smc_init(void)
+{
+       int rc;
+
+       rc = smc_pnet_init();
+       if (rc)
+               return rc;
+
+       rc = smc_llc_init();
+       if (rc) {
+               pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
+               goto out_pnet;
+       }
+
+       rc = smc_cdc_init();
+       if (rc) {
+               pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
+               goto out_pnet;
+       }
+
+       rc = proto_register(&smc_proto, 1);
+       if (rc) {
+               pr_err("%s: proto_register fails with %d\n", __func__, rc);
+               goto out_pnet;
+       }
+
+       rc = sock_register(&smc_sock_family_ops);
+       if (rc) {
+               pr_err("%s: sock_register fails with %d\n", __func__, rc);
+               goto out_proto;
+       }
+       INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
+
+       rc = smc_ib_register_client();
+       if (rc) {
+               pr_err("%s: ib_register fails with %d\n", __func__, rc);
+               goto out_sock;
+       }
+
+       return 0;
+
+out_sock:
+       sock_unregister(PF_SMC);
+out_proto:
+       proto_unregister(&smc_proto);
+out_pnet:
+       smc_pnet_exit();
+       return rc;
+}
+
+static void __exit smc_exit(void)
+{
+       struct smc_link_group *lgr, *lg;
+       LIST_HEAD(lgr_freeing_list);
+
+       spin_lock_bh(&smc_lgr_list.lock);
+       if (!list_empty(&smc_lgr_list.list))
+               list_splice_init(&smc_lgr_list.list, &lgr_freeing_list);
+       spin_unlock_bh(&smc_lgr_list.lock);
+       list_for_each_entry_safe(lgr, lg, &lgr_freeing_list, list) {
+               list_del_init(&lgr->list);
+               smc_lgr_free(lgr); /* free link group */
+       }
+       smc_ib_unregister_client();
+       sock_unregister(PF_SMC);
+       proto_unregister(&smc_proto);
+       smc_pnet_exit();
+}
+
+module_init(smc_init);
+module_exit(smc_exit);
+
+MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("smc socket address family");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(PF_SMC);
diff --git a/net/smc/smc.h b/net/smc/smc.h
new file mode 100644 (file)
index 0000000..ee5fbea
--- /dev/null
@@ -0,0 +1,274 @@
+/*
+ *  Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ *  Definitions for the SMC module (socket related)
+ *
+ *  Copyright IBM Corp. 2016
+ *
+ *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+#ifndef __SMC_H
+#define __SMC_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+#include <linux/compiler.h> /* __aligned */
+#include <net/sock.h>
+
+#include "smc_ib.h"
+
+#define SMCPROTO_SMC           0       /* SMC protocol */
+
+#define SMC_MAX_PORTS          2       /* Max # of ports */
+
+extern struct proto smc_proto;
+
+#ifdef ATOMIC64_INIT
+#define KERNEL_HAS_ATOMIC64
+#endif
+
+enum smc_state {               /* possible states of an SMC socket */
+       SMC_ACTIVE      = 1,
+       SMC_INIT        = 2,
+       SMC_CLOSED      = 7,
+       SMC_LISTEN      = 10,
+       /* normal close */
+       SMC_PEERCLOSEWAIT1      = 20,
+       SMC_PEERCLOSEWAIT2      = 21,
+       SMC_APPFINCLOSEWAIT     = 24,
+       SMC_APPCLOSEWAIT1       = 22,
+       SMC_APPCLOSEWAIT2       = 23,
+       SMC_PEERFINCLOSEWAIT    = 25,
+       /* abnormal close */
+       SMC_PEERABORTWAIT       = 26,
+       SMC_PROCESSABORT        = 27,
+};
+
+struct smc_link_group;
+
+struct smc_wr_rx_hdr { /* common prefix part of LLC and CDC to demultiplex */
+       u8                      type;
+} __aligned(1);
+
+struct smc_cdc_conn_state_flags {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u8      peer_done_writing : 1;  /* Sending done indicator */
+       u8      peer_conn_closed : 1;   /* Peer connection closed indicator */
+       u8      peer_conn_abort : 1;    /* Abnormal close indicator */
+       u8      reserved : 5;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u8      reserved : 5;
+       u8      peer_conn_abort : 1;
+       u8      peer_conn_closed : 1;
+       u8      peer_done_writing : 1;
+#endif
+};
+
+struct smc_cdc_producer_flags {
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u8      write_blocked : 1;      /* Writing Blocked, no rx buf space */
+       u8      urg_data_pending : 1;   /* Urgent Data Pending */
+       u8      urg_data_present : 1;   /* Urgent Data Present */
+       u8      cons_curs_upd_req : 1;  /* cursor update requested */
+       u8      failover_validation : 1;/* message replay due to failover */
+       u8      reserved : 3;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u8      reserved : 3;
+       u8      failover_validation : 1;
+       u8      cons_curs_upd_req : 1;
+       u8      urg_data_present : 1;
+       u8      urg_data_pending : 1;
+       u8      write_blocked : 1;
+#endif
+};
+
+/* in host byte order */
+union smc_host_cursor {        /* SMC cursor - an offset in an RMBE */
+       struct {
+               u16     reserved;
+               u16     wrap;           /* window wrap sequence number */
+               u32     count;          /* cursor (= offset) part */
+       };
+#ifdef KERNEL_HAS_ATOMIC64
+       atomic64_t              acurs;  /* for atomic processing */
+#else
+       u64                     acurs;  /* for atomic processing */
+#endif
+} __aligned(8);
+
+/* in host byte order, except for flag bitfields in network byte order */
+struct smc_host_cdc_msg {              /* Connection Data Control message */
+       struct smc_wr_rx_hdr            common; /* .type = 0xFE */
+       u8                              len;    /* length = 44 */
+       u16                             seqno;  /* connection seq # */
+       u32                             token;  /* alert_token */
+       union smc_host_cursor           prod;           /* producer cursor */
+       union smc_host_cursor           cons;           /* consumer cursor,
+                                                        * piggy backed "ack"
+                                                        */
+       struct smc_cdc_producer_flags   prod_flags;     /* conn. tx/rx status */
+       struct smc_cdc_conn_state_flags conn_state_flags; /* peer conn. status*/
+       u8                              reserved[18];
+} __aligned(8);
+
+struct smc_connection {
+       struct rb_node          alert_node;
+       struct smc_link_group   *lgr;           /* link group of connection */
+       u32                     alert_token_local; /* unique conn. id */
+       u8                      peer_conn_idx;  /* from tcp handshake */
+       int                     peer_rmbe_size; /* size of peer rx buffer */
+       atomic_t                peer_rmbe_space;/* remaining free bytes in peer
+                                                * rmbe
+                                                */
+       int                     rtoken_idx;     /* idx to peer RMB rkey/addr */
+
+       struct smc_buf_desc     *sndbuf_desc;   /* send buffer descriptor */
+       int                     sndbuf_size;    /* sndbuf size <== sock wmem */
+       struct smc_buf_desc     *rmb_desc;      /* RMBE descriptor */
+       int                     rmbe_size;      /* RMBE size <== sock rmem */
+       int                     rmbe_size_short;/* compressed notation */
+       int                     rmbe_update_limit;
+                                               /* lower limit for consumer
+                                                * cursor update
+                                                */
+
+       struct smc_host_cdc_msg local_tx_ctrl;  /* host byte order staging
+                                                * buffer for CDC msg send
+                                                * .prod cf. TCP snd_nxt
+                                                * .cons cf. TCP sends ack
+                                                */
+       union smc_host_cursor   tx_curs_prep;   /* tx - prepared data
+                                                * snd_max..wmem_alloc
+                                                */
+       union smc_host_cursor   tx_curs_sent;   /* tx - sent data
+                                                * snd_nxt ?
+                                                */
+       union smc_host_cursor   tx_curs_fin;    /* tx - confirmed by peer
+                                                * snd-wnd-begin ?
+                                                */
+       atomic_t                sndbuf_space;   /* remaining space in sndbuf */
+       u16                     tx_cdc_seq;     /* sequence # for CDC send */
+       spinlock_t              send_lock;      /* protect wr_sends */
+       struct work_struct      tx_work;        /* retry of smc_cdc_msg_send */
+
+       struct smc_host_cdc_msg local_rx_ctrl;  /* filled during event_handl.
+                                                * .prod cf. TCP rcv_nxt
+                                                * .cons cf. TCP snd_una
+                                                */
+       union smc_host_cursor   rx_curs_confirmed; /* confirmed to peer
+                                                   * source of snd_una ?
+                                                   */
+       atomic_t                bytes_to_rcv;   /* arrived data,
+                                                * not yet received
+                                                */
+#ifndef KERNEL_HAS_ATOMIC64
+       spinlock_t              acurs_lock;     /* protect cursors */
+#endif
+};
+
+struct smc_sock {                              /* smc sock container */
+       struct sock             sk;
+       struct socket           *clcsock;       /* internal tcp socket */
+       struct smc_connection   conn;           /* smc connection */
+       struct sockaddr         *addr;          /* inet connect address */
+       struct smc_sock         *listen_smc;    /* listen parent */
+       struct work_struct      tcp_listen_work;/* handle tcp socket accepts */
+       struct work_struct      smc_listen_work;/* prepare new accept socket */
+       struct list_head        accept_q;       /* sockets to be accepted */
+       spinlock_t              accept_q_lock;  /* protects accept_q */
+       struct delayed_work     sock_put_work;  /* final socket freeing */
+       bool                    use_fallback;   /* fallback to tcp */
+       u8                      wait_close_tx_prepared : 1;
+                                               /* shutdown wr or close
+                                                * started, waiting for unsent
+                                                * data to be sent
+                                                */
+};
+
+static inline struct smc_sock *smc_sk(const struct sock *sk)
+{
+       return (struct smc_sock *)sk;
+}
+
+#define SMC_SYSTEMID_LEN               8
+
+extern u8      local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */
+
+/* convert an u32 value into network byte order, store it into a 3 byte field */
+static inline void hton24(u8 *net, u32 host)
+{
+       __be32 t;
+
+       t = cpu_to_be32(host);
+       memcpy(net, ((u8 *)&t) + 1, 3);
+}
+
+/* convert a received 3 byte field into host byte order*/
+static inline u32 ntoh24(u8 *net)
+{
+       __be32 t = 0;
+
+       memcpy(((u8 *)&t) + 1, net, 3);
+       return be32_to_cpu(t);
+}
+
+#define SMC_BUF_MIN_SIZE 16384         /* minimum size of an RMB */
+
+#define SMC_RMBE_SIZES 16      /* number of distinct sizes for an RMBE */
+/* theoretically, the RFC states that largest size would be 512K,
+ * i.e. compressed 5 and thus 6 sizes (0..5), despite
+ * struct smc_clc_msg_accept_confirm.rmbe_size being a 4 bit value (0..15)
+ */
+
+/* convert the RMB size into the compressed notation - minimum 16K.
+ * In contrast to plain ilog2, this rounds towards the next power of 2,
+ * so the socket application gets at least its desired sndbuf / rcvbuf size.
+ */
+static inline u8 smc_compress_bufsize(int size)
+{
+       u8 compressed;
+
+       if (size <= SMC_BUF_MIN_SIZE)
+               return 0;
+
+       size = (size - 1) >> 14;
+       compressed = ilog2(size) + 1;
+       if (compressed >= SMC_RMBE_SIZES)
+               compressed = SMC_RMBE_SIZES - 1;
+       return compressed;
+}
+
+/* convert the RMB size from compressed notation into integer */
+static inline int smc_uncompress_bufsize(u8 compressed)
+{
+       u32 size;
+
+       size = 0x00000001 << (((int)compressed) + 14);
+       return (int)size;
+}
+
+#ifdef CONFIG_XFRM
+static inline bool using_ipsec(struct smc_sock *smc)
+{
+       return (smc->clcsock->sk->sk_policy[0] ||
+               smc->clcsock->sk->sk_policy[1]) ? 1 : 0;
+}
+#else
+static inline bool using_ipsec(struct smc_sock *smc)
+{
+       return 0;
+}
+#endif
+
+struct smc_clc_msg_local;
+
+int smc_netinfo_by_tcpsk(struct socket *clcsock, __be32 *subnet,
+                        u8 *prefix_len);
+void smc_conn_free(struct smc_connection *conn);
+int smc_conn_create(struct smc_sock *smc, __be32 peer_in_addr,
+                   struct smc_ib_device *smcibdev, u8 ibport,
+                   struct smc_clc_msg_local *lcl, int srv_first_contact);
+struct sock *smc_accept_dequeue(struct sock *parent, struct socket *new_sock);
+void smc_close_non_accepted(struct sock *sk);
+
+#endif /* __SMC_H */
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
new file mode 100644 (file)
index 0000000..5a33949
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Connection Data Control (CDC)
+ * handles flow control
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#include <linux/spinlock.h>
+
+#include "smc.h"
+#include "smc_wr.h"
+#include "smc_cdc.h"
+#include "smc_tx.h"
+#include "smc_rx.h"
+#include "smc_close.h"
+
+/********************************** send *************************************/
+
+struct smc_cdc_tx_pend {
+       struct smc_connection   *conn;          /* socket connection */
+       union smc_host_cursor   cursor; /* tx sndbuf cursor sent */
+       union smc_host_cursor   p_cursor;       /* rx RMBE cursor produced */
+       u16                     ctrl_seq;       /* conn. tx sequence # */
+};
+
+/* handler for send/transmission completion of a CDC msg */
+static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
+                              struct smc_link *link,
+                              enum ib_wc_status wc_status)
+{
+       struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
+       struct smc_sock *smc;
+       int diff;
+
+       if (!cdcpend->conn)
+               /* already dismissed */
+               return;
+
+       smc = container_of(cdcpend->conn, struct smc_sock, conn);
+       bh_lock_sock(&smc->sk);
+       if (!wc_status) {
+               diff = smc_curs_diff(cdcpend->conn->sndbuf_size,
+                                    &cdcpend->conn->tx_curs_fin,
+                                    &cdcpend->cursor);
+               /* sndbuf_space is decreased in smc_sendmsg */
+               smp_mb__before_atomic();
+               atomic_add(diff, &cdcpend->conn->sndbuf_space);
+               /* guarantee 0 <= sndbuf_space <= sndbuf_size */
+               smp_mb__after_atomic();
+               smc_curs_write(&cdcpend->conn->tx_curs_fin,
+                              smc_curs_read(&cdcpend->cursor, cdcpend->conn),
+                              cdcpend->conn);
+       }
+       smc_tx_sndbuf_nonfull(smc);
+       if (smc->sk.sk_state != SMC_ACTIVE)
+               /* wake up smc_close_wait_tx_pends() */
+               smc->sk.sk_state_change(&smc->sk);
+       bh_unlock_sock(&smc->sk);
+}
+
+int smc_cdc_get_free_slot(struct smc_link *link,
+                         struct smc_wr_buf **wr_buf,
+                         struct smc_cdc_tx_pend **pend)
+{
+       return smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
+                                      (struct smc_wr_tx_pend_priv **)pend);
+}
+
+static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
+                                           struct smc_cdc_tx_pend *pend)
+{
+       BUILD_BUG_ON_MSG(
+               sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE,
+               "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)");
+       BUILD_BUG_ON_MSG(
+               offsetof(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE,
+               "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
+       BUILD_BUG_ON_MSG(
+               sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
+               "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_cdc_tx_pend)");
+       pend->conn = conn;
+       pend->cursor = conn->tx_curs_sent;
+       pend->p_cursor = conn->local_tx_ctrl.prod;
+       pend->ctrl_seq = conn->tx_cdc_seq;
+}
+
+int smc_cdc_msg_send(struct smc_connection *conn,
+                    struct smc_wr_buf *wr_buf,
+                    struct smc_cdc_tx_pend *pend)
+{
+       struct smc_link *link;
+       int rc;
+
+       link = &conn->lgr->lnk[SMC_SINGLE_LINK];
+
+       smc_cdc_add_pending_send(conn, pend);
+
+       conn->tx_cdc_seq++;
+       conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
+       smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf,
+                           &conn->local_tx_ctrl, conn);
+       rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
+       if (!rc)
+               smc_curs_write(&conn->rx_curs_confirmed,
+                              smc_curs_read(&conn->local_tx_ctrl.cons, conn),
+                              conn);
+
+       return rc;
+}
+
+int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
+{
+       struct smc_cdc_tx_pend *pend;
+       struct smc_wr_buf *wr_buf;
+       int rc;
+
+       rc = smc_cdc_get_free_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], &wr_buf,
+                                  &pend);
+       if (rc)
+               return rc;
+
+       return smc_cdc_msg_send(conn, wr_buf, pend);
+}
+
+static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend,
+                             unsigned long data)
+{
+       struct smc_connection *conn = (struct smc_connection *)data;
+       struct smc_cdc_tx_pend *cdc_pend =
+               (struct smc_cdc_tx_pend *)tx_pend;
+
+       return cdc_pend->conn == conn;
+}
+
+static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend)
+{
+       struct smc_cdc_tx_pend *cdc_pend =
+               (struct smc_cdc_tx_pend *)tx_pend;
+
+       cdc_pend->conn = NULL;
+}
+
+void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
+{
+       struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
+
+       smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE,
+                               smc_cdc_tx_filter, smc_cdc_tx_dismisser,
+                               (unsigned long)conn);
+}
+
+bool smc_cdc_tx_has_pending(struct smc_connection *conn)
+{
+       struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
+
+       return smc_wr_tx_has_pending(link, SMC_CDC_MSG_TYPE,
+                                    smc_cdc_tx_filter, (unsigned long)conn);
+}
+
+/********************************* receive ***********************************/
+
+static inline bool smc_cdc_before(u16 seq1, u16 seq2)
+{
+       return (s16)(seq1 - seq2) < 0;
+}
+
+static void smc_cdc_msg_recv_action(struct smc_sock *smc,
+                                   struct smc_link *link,
+                                   struct smc_cdc_msg *cdc)
+{
+       union smc_host_cursor cons_old, prod_old;
+       struct smc_connection *conn = &smc->conn;
+       int diff_cons, diff_prod;
+
+       if (!cdc->prod_flags.failover_validation) {
+               if (smc_cdc_before(ntohs(cdc->seqno),
+                                  conn->local_rx_ctrl.seqno))
+                       /* received seqno is old */
+                       return;
+       }
+       smc_curs_write(&prod_old,
+                      smc_curs_read(&conn->local_rx_ctrl.prod, conn),
+                      conn);
+       smc_curs_write(&cons_old,
+                      smc_curs_read(&conn->local_rx_ctrl.cons, conn),
+                      conn);
+       smc_cdc_msg_to_host(&conn->local_rx_ctrl, cdc, conn);
+
+       diff_cons = smc_curs_diff(conn->peer_rmbe_size, &cons_old,
+                                 &conn->local_rx_ctrl.cons);
+       if (diff_cons) {
+               /* peer_rmbe_space is decreased during data transfer with RDMA
+                * write
+                */
+               smp_mb__before_atomic();
+               atomic_add(diff_cons, &conn->peer_rmbe_space);
+               /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
+               smp_mb__after_atomic();
+       }
+
+       diff_prod = smc_curs_diff(conn->rmbe_size, &prod_old,
+                                 &conn->local_rx_ctrl.prod);
+       if (diff_prod) {
+               /* bytes_to_rcv is decreased in smc_recvmsg */
+               smp_mb__before_atomic();
+               atomic_add(diff_prod, &conn->bytes_to_rcv);
+               /* guarantee 0 <= bytes_to_rcv <= rmbe_size */
+               smp_mb__after_atomic();
+               smc->sk.sk_data_ready(&smc->sk);
+       }
+
+       if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
+               smc->sk.sk_err = ECONNRESET;
+               conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
+       }
+       if (smc_cdc_rxed_any_close_or_senddone(conn))
+               smc_close_passive_received(smc);
+
+       /* piggy backed tx info */
+       /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
+       if (diff_cons && smc_tx_prepared_sends(conn)) {
+               smc_tx_sndbuf_nonempty(conn);
+               /* trigger socket release if connection closed */
+               smc_close_wake_tx_prepared(smc);
+       }
+
+       /* subsequent patch: trigger socket release if connection closed */
+
+       /* socket connected but not accepted */
+       if (!smc->sk.sk_socket)
+               return;
+
+       /* data available */
+       if ((conn->local_rx_ctrl.prod_flags.write_blocked) ||
+           (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req))
+               smc_tx_consumer_update(conn);
+}
+
+/* called under tasklet context */
+static inline void smc_cdc_msg_recv(struct smc_cdc_msg *cdc,
+                                   struct smc_link *link, u64 wr_id)
+{
+       struct smc_link_group *lgr = container_of(link, struct smc_link_group,
+                                                 lnk[SMC_SINGLE_LINK]);
+       struct smc_connection *connection;
+       struct smc_sock *smc;
+
+       /* lookup connection */
+       read_lock_bh(&lgr->conns_lock);
+       connection = smc_lgr_find_conn(ntohl(cdc->token), lgr);
+       if (!connection) {
+               read_unlock_bh(&lgr->conns_lock);
+               return;
+       }
+       smc = container_of(connection, struct smc_sock, conn);
+       sock_hold(&smc->sk);
+       read_unlock_bh(&lgr->conns_lock);
+       bh_lock_sock(&smc->sk);
+       smc_cdc_msg_recv_action(smc, link, cdc);
+       bh_unlock_sock(&smc->sk);
+       sock_put(&smc->sk); /* no free sk in softirq-context */
+}
+
+/***************************** init, exit, misc ******************************/
+
+static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf)
+{
+       struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
+       struct smc_cdc_msg *cdc = buf;
+
+       if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved))
+               return; /* short message */
+       if (cdc->len != sizeof(*cdc))
+               return; /* invalid message */
+       smc_cdc_msg_recv(cdc, link, wc->wr_id);
+}
+
+static struct smc_wr_rx_handler smc_cdc_rx_handlers[] = {
+       {
+               .handler        = smc_cdc_rx_handler,
+               .type           = SMC_CDC_MSG_TYPE
+       },
+       {
+               .handler        = NULL,
+       }
+};
+
+int __init smc_cdc_init(void)
+{
+       struct smc_wr_rx_handler *handler;
+       int rc = 0;
+
+       for (handler = smc_cdc_rx_handlers; handler->handler; handler++) {
+               INIT_HLIST_NODE(&handler->list);
+               rc = smc_wr_rx_register_handler(handler);
+               if (rc)
+                       break;
+       }
+       return rc;
+}
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h
new file mode 100644 (file)
index 0000000..8e1d76f
--- /dev/null
@@ -0,0 +1,218 @@
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Connection Data Control (CDC)
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#ifndef SMC_CDC_H
+#define SMC_CDC_H
+
+#include <linux/kernel.h> /* max_t */
+#include <linux/atomic.h>
+#include <linux/in.h>
+#include <linux/compiler.h>
+
+#include "smc.h"
+#include "smc_core.h"
+#include "smc_wr.h"
+
+#define        SMC_CDC_MSG_TYPE                0xFE
+
+/* in network byte order */
+union smc_cdc_cursor {         /* SMC cursor */
+       struct {
+               __be16  reserved;
+               __be16  wrap;
+               __be32  count;
+       };
+#ifdef KERNEL_HAS_ATOMIC64
+       atomic64_t      acurs;          /* for atomic processing */
+#else
+       u64             acurs;          /* for atomic processing */
+#endif
+} __aligned(8);
+
+/* in network byte order */
+struct smc_cdc_msg {
+       struct smc_wr_rx_hdr            common; /* .type = 0xFE */
+       u8                              len;    /* 44 */
+       __be16                          seqno;
+       __be32                          token;
+       union smc_cdc_cursor            prod;
+       union smc_cdc_cursor            cons;   /* piggy backed "ack" */
+       struct smc_cdc_producer_flags   prod_flags;
+       struct smc_cdc_conn_state_flags conn_state_flags;
+       u8                              reserved[18];
+} __aligned(8);
+
+static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn)
+{
+       return conn->local_rx_ctrl.conn_state_flags.peer_conn_abort ||
+              conn->local_rx_ctrl.conn_state_flags.peer_conn_closed;
+}
+
+static inline bool smc_cdc_rxed_any_close_or_senddone(
+       struct smc_connection *conn)
+{
+       return smc_cdc_rxed_any_close(conn) ||
+              conn->local_rx_ctrl.conn_state_flags.peer_done_writing;
+}
+
+static inline void smc_curs_add(int size, union smc_host_cursor *curs,
+                               int value)
+{
+       curs->count += value;
+       if (curs->count >= size) {
+               curs->wrap++;
+               curs->count -= size;
+       }
+}
+
+/* SMC cursors are 8 bytes long and require atomic reading and writing */
+static inline u64 smc_curs_read(union smc_host_cursor *curs,
+                               struct smc_connection *conn)
+{
+#ifndef KERNEL_HAS_ATOMIC64
+       unsigned long flags;
+       u64 ret;
+
+       spin_lock_irqsave(&conn->acurs_lock, flags);
+       ret = curs->acurs;
+       spin_unlock_irqrestore(&conn->acurs_lock, flags);
+       return ret;
+#else
+       return atomic64_read(&curs->acurs);
+#endif
+}
+
+static inline u64 smc_curs_read_net(union smc_cdc_cursor *curs,
+                                   struct smc_connection *conn)
+{
+#ifndef KERNEL_HAS_ATOMIC64
+       unsigned long flags;
+       u64 ret;
+
+       spin_lock_irqsave(&conn->acurs_lock, flags);
+       ret = curs->acurs;
+       spin_unlock_irqrestore(&conn->acurs_lock, flags);
+       return ret;
+#else
+       return atomic64_read(&curs->acurs);
+#endif
+}
+
+static inline void smc_curs_write(union smc_host_cursor *curs, u64 val,
+                                 struct smc_connection *conn)
+{
+#ifndef KERNEL_HAS_ATOMIC64
+       unsigned long flags;
+
+       spin_lock_irqsave(&conn->acurs_lock, flags);
+       curs->acurs = val;
+       spin_unlock_irqrestore(&conn->acurs_lock, flags);
+#else
+       atomic64_set(&curs->acurs, val);
+#endif
+}
+
+static inline void smc_curs_write_net(union smc_cdc_cursor *curs, u64 val,
+                                     struct smc_connection *conn)
+{
+#ifndef KERNEL_HAS_ATOMIC64
+       unsigned long flags;
+
+       spin_lock_irqsave(&conn->acurs_lock, flags);
+       curs->acurs = val;
+       spin_unlock_irqrestore(&conn->acurs_lock, flags);
+#else
+       atomic64_set(&curs->acurs, val);
+#endif
+}
+
+/* calculate cursor difference between old and new, where old <= new */
+static inline int smc_curs_diff(unsigned int size,
+                               union smc_host_cursor *old,
+                               union smc_host_cursor *new)
+{
+       if (old->wrap != new->wrap)
+               return max_t(int, 0,
+                            ((size - old->count) + new->count));
+
+       return max_t(int, 0, (new->count - old->count));
+}
+
+static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
+                                         union smc_host_cursor *local,
+                                         struct smc_connection *conn)
+{
+       union smc_host_cursor temp;
+
+       smc_curs_write(&temp, smc_curs_read(local, conn), conn);
+       peer->count = htonl(temp.count);
+       peer->wrap = htons(temp.wrap);
+       /* peer->reserved = htons(0); must be ensured by caller */
+}
+
+static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer,
+                                      struct smc_host_cdc_msg *local,
+                                      struct smc_connection *conn)
+{
+       peer->common.type = local->common.type;
+       peer->len = local->len;
+       peer->seqno = htons(local->seqno);
+       peer->token = htonl(local->token);
+       smc_host_cursor_to_cdc(&peer->prod, &local->prod, conn);
+       smc_host_cursor_to_cdc(&peer->cons, &local->cons, conn);
+       peer->prod_flags = local->prod_flags;
+       peer->conn_state_flags = local->conn_state_flags;
+}
+
+static inline void smc_cdc_cursor_to_host(union smc_host_cursor *local,
+                                         union smc_cdc_cursor *peer,
+                                         struct smc_connection *conn)
+{
+       union smc_host_cursor temp, old;
+       union smc_cdc_cursor net;
+
+       smc_curs_write(&old, smc_curs_read(local, conn), conn);
+       smc_curs_write_net(&net, smc_curs_read_net(peer, conn), conn);
+       temp.count = ntohl(net.count);
+       temp.wrap = ntohs(net.wrap);
+       if ((old.wrap > temp.wrap) && temp.wrap)
+               return;
+       if ((old.wrap == temp.wrap) &&
+           (old.count > temp.count))
+               return;
+       smc_curs_write(local, smc_curs_read(&temp, conn), conn);
+}
+
+static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
+                                      struct smc_cdc_msg *peer,
+                                      struct smc_connection *conn)
+{
+       local->common.type = peer->common.type;
+       local->len = peer->len;
+       local->seqno = ntohs(peer->seqno);
+       local->token = ntohl(peer->token);
+       smc_cdc_cursor_to_host(&local->prod, &peer->prod, conn);
+       smc_cdc_cursor_to_host(&local->cons, &peer->cons, conn);
+       local->prod_flags = peer->prod_flags;
+       local->conn_state_flags = peer->conn_state_flags;
+}
+
+struct smc_cdc_tx_pend;
+
+int smc_cdc_get_free_slot(struct smc_link *link, struct smc_wr_buf **wr_buf,
+                         struct smc_cdc_tx_pend **pend);
+void smc_cdc_tx_dismiss_slots(struct smc_connection *conn);
+int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf,
+                    struct smc_cdc_tx_pend *pend);
+int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn);
+bool smc_cdc_tx_has_pending(struct smc_connection *conn);
+int smc_cdc_init(void) __init;
+
+#endif /* SMC_CDC_H */
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
new file mode 100644 (file)
index 0000000..cc6b6f8
--- /dev/null
@@ -0,0 +1,280 @@
+/*
+ *  Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ *  CLC (connection layer control) handshake over initial TCP socket to
+ *  prepare for RDMA traffic
+ *
+ *  Copyright IBM Corp. 2016
+ *
+ *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+
+#include "smc.h"
+#include "smc_core.h"
+#include "smc_clc.h"
+#include "smc_ib.h"
+
+/* Wait for data on the tcp-socket, analyze received data
+ * Returns:
+ * 0 if success and it was not a decline that we received.
+ * SMC_CLC_DECL_REPLY if decline received for fallback w/o another decl send.
+ * clcsock error, -EINTR, -ECONNRESET, -EPROTO otherwise.
+ */
+int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
+                    u8 expected_type)
+{
+       struct sock *clc_sk = smc->clcsock->sk;
+       struct smc_clc_msg_hdr *clcm = buf;
+       struct msghdr msg = {NULL, 0};
+       int reason_code = 0;
+       struct kvec vec;
+       int len, datlen;
+       int krflags;
+
+       /* peek the first few bytes to determine length of data to receive
+        * so we don't consume any subsequent CLC message or payload data
+        * in the TCP byte stream
+        */
+       vec.iov_base = buf;
+       vec.iov_len = buflen;
+       krflags = MSG_PEEK | MSG_WAITALL;
+       smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
+       len = kernel_recvmsg(smc->clcsock, &msg, &vec, 1,
+                            sizeof(struct smc_clc_msg_hdr), krflags);
+       if (signal_pending(current)) {
+               reason_code = -EINTR;
+               clc_sk->sk_err = EINTR;
+               smc->sk.sk_err = EINTR;
+               goto out;
+       }
+       if (clc_sk->sk_err) {
+               reason_code = -clc_sk->sk_err;
+               smc->sk.sk_err = clc_sk->sk_err;
+               goto out;
+       }
+       if (!len) { /* peer has performed orderly shutdown */
+               smc->sk.sk_err = ECONNRESET;
+               reason_code = -ECONNRESET;
+               goto out;
+       }
+       if (len < 0) {
+               smc->sk.sk_err = -len;
+               reason_code = len;
+               goto out;
+       }
+       datlen = ntohs(clcm->length);
+       if ((len < sizeof(struct smc_clc_msg_hdr)) ||
+           (datlen < sizeof(struct smc_clc_msg_decline)) ||
+           (datlen > sizeof(struct smc_clc_msg_accept_confirm)) ||
+           memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) ||
+           ((clcm->type != SMC_CLC_DECLINE) &&
+            (clcm->type != expected_type))) {
+               smc->sk.sk_err = EPROTO;
+               reason_code = -EPROTO;
+               goto out;
+       }
+
+       /* receive the complete CLC message */
+       vec.iov_base = buf;
+       vec.iov_len = buflen;
+       memset(&msg, 0, sizeof(struct msghdr));
+       krflags = MSG_WAITALL;
+       smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
+       len = kernel_recvmsg(smc->clcsock, &msg, &vec, 1, datlen, krflags);
+       if (len < datlen) {
+               smc->sk.sk_err = EPROTO;
+               reason_code = -EPROTO;
+               goto out;
+       }
+       if (clcm->type == SMC_CLC_DECLINE) {
+               reason_code = SMC_CLC_DECL_REPLY;
+               if (ntohl(((struct smc_clc_msg_decline *)buf)->peer_diagnosis)
+                       == SMC_CLC_DECL_SYNCERR)
+                       smc->conn.lgr->sync_err = true;
+       }
+
+out:
+       return reason_code;
+}
+
+/* send CLC DECLINE message across internal TCP socket */
+int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info,
+                        u8 out_of_sync)
+{
+       struct smc_clc_msg_decline dclc;
+       struct msghdr msg;
+       struct kvec vec;
+       int len;
+
+       memset(&dclc, 0, sizeof(dclc));
+       memcpy(dclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+       dclc.hdr.type = SMC_CLC_DECLINE;
+       dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
+       dclc.hdr.version = SMC_CLC_V1;
+       dclc.hdr.flag = out_of_sync ? 1 : 0;
+       memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid));
+       dclc.peer_diagnosis = htonl(peer_diag_info);
+       memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+
+       memset(&msg, 0, sizeof(msg));
+       vec.iov_base = &dclc;
+       vec.iov_len = sizeof(struct smc_clc_msg_decline);
+       len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
+                            sizeof(struct smc_clc_msg_decline));
+       if (len < sizeof(struct smc_clc_msg_decline))
+               smc->sk.sk_err = EPROTO;
+       if (len < 0)
+               smc->sk.sk_err = -len;
+       return len;
+}
+
+/* send CLC PROPOSAL message across internal TCP socket */
+int smc_clc_send_proposal(struct smc_sock *smc,
+                         struct smc_ib_device *smcibdev,
+                         u8 ibport)
+{
+       struct smc_clc_msg_proposal pclc;
+       int reason_code = 0;
+       struct msghdr msg;
+       struct kvec vec;
+       int len, rc;
+
+       /* send SMC Proposal CLC message */
+       memset(&pclc, 0, sizeof(pclc));
+       memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+       pclc.hdr.type = SMC_CLC_PROPOSAL;
+       pclc.hdr.length = htons(sizeof(pclc));
+       pclc.hdr.version = SMC_CLC_V1;          /* SMC version */
+       memcpy(pclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
+       memcpy(&pclc.lcl.gid, &smcibdev->gid[ibport - 1], SMC_GID_SIZE);
+       memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], ETH_ALEN);
+
+       /* determine subnet and mask from internal TCP socket */
+       rc = smc_netinfo_by_tcpsk(smc->clcsock, &pclc.outgoing_subnet,
+                                 &pclc.prefix_len);
+       if (rc)
+               return SMC_CLC_DECL_CNFERR; /* configuration error */
+       memcpy(pclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+       memset(&msg, 0, sizeof(msg));
+       vec.iov_base = &pclc;
+       vec.iov_len = sizeof(pclc);
+       /* due to the few bytes needed for clc-handshake this cannot block */
+       len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, sizeof(pclc));
+       if (len < sizeof(pclc)) {
+               if (len >= 0) {
+                       reason_code = -ENETUNREACH;
+                       smc->sk.sk_err = -reason_code;
+               } else {
+                       smc->sk.sk_err = smc->clcsock->sk->sk_err;
+                       reason_code = -smc->sk.sk_err;
+               }
+       }
+
+       return reason_code;
+}
+
+/* send CLC CONFIRM message across internal TCP socket */
+int smc_clc_send_confirm(struct smc_sock *smc)
+{
+       struct smc_connection *conn = &smc->conn;
+       struct smc_clc_msg_accept_confirm cclc;
+       struct smc_link *link;
+       int reason_code = 0;
+       struct msghdr msg;
+       struct kvec vec;
+       int len;
+
+       link = &conn->lgr->lnk[SMC_SINGLE_LINK];
+       /* send SMC Confirm CLC msg */
+       memset(&cclc, 0, sizeof(cclc));
+       memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+       cclc.hdr.type = SMC_CLC_CONFIRM;
+       cclc.hdr.length = htons(sizeof(cclc));
+       cclc.hdr.version = SMC_CLC_V1;          /* SMC version */
+       memcpy(cclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
+       memcpy(&cclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1],
+              SMC_GID_SIZE);
+       memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
+       hton24(cclc.qpn, link->roce_qp->qp_num);
+       cclc.rmb_rkey =
+               htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
+       cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */
+       cclc.rmbe_alert_token = htonl(conn->alert_token_local);
+       cclc.qp_mtu = min(link->path_mtu, link->peer_mtu);
+       cclc.rmbe_size = conn->rmbe_size_short;
+       cclc.rmb_dma_addr =
+               cpu_to_be64((u64)conn->rmb_desc->dma_addr[SMC_SINGLE_LINK]);
+       hton24(cclc.psn, link->psn_initial);
+
+       memcpy(cclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+
+       memset(&msg, 0, sizeof(msg));
+       vec.iov_base = &cclc;
+       vec.iov_len = sizeof(cclc);
+       len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, sizeof(cclc));
+       if (len < sizeof(cclc)) {
+               if (len >= 0) {
+                       reason_code = -ENETUNREACH;
+                       smc->sk.sk_err = -reason_code;
+               } else {
+                       smc->sk.sk_err = smc->clcsock->sk->sk_err;
+                       reason_code = -smc->sk.sk_err;
+               }
+       }
+       return reason_code;
+}
+
+/* send CLC ACCEPT message across internal TCP socket */
+int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
+{
+       struct smc_connection *conn = &new_smc->conn;
+       struct smc_clc_msg_accept_confirm aclc;
+       struct smc_link *link;
+       struct msghdr msg;
+       struct kvec vec;
+       int rc = 0;
+       int len;
+
+       link = &conn->lgr->lnk[SMC_SINGLE_LINK];
+       memset(&aclc, 0, sizeof(aclc));
+       memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+       aclc.hdr.type = SMC_CLC_ACCEPT;
+       aclc.hdr.length = htons(sizeof(aclc));
+       aclc.hdr.version = SMC_CLC_V1;          /* SMC version */
+       if (srv_first_contact)
+               aclc.hdr.flag = 1;
+       memcpy(aclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
+       memcpy(&aclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1],
+              SMC_GID_SIZE);
+       memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
+       hton24(aclc.qpn, link->roce_qp->qp_num);
+       aclc.rmb_rkey =
+               htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
+       aclc.conn_idx = 1;                      /* as long as 1 RMB = 1 RMBE */
+       aclc.rmbe_alert_token = htonl(conn->alert_token_local);
+       aclc.qp_mtu = link->path_mtu;
+       aclc.rmbe_size = conn->rmbe_size_short,
+       aclc.rmb_dma_addr =
+               cpu_to_be64((u64)conn->rmb_desc->dma_addr[SMC_SINGLE_LINK]);
+       hton24(aclc.psn, link->psn_initial);
+       memcpy(aclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+
+       memset(&msg, 0, sizeof(msg));
+       vec.iov_base = &aclc;
+       vec.iov_len = sizeof(aclc);
+       len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1, sizeof(aclc));
+       if (len < sizeof(aclc)) {
+               if (len >= 0)
+                       new_smc->sk.sk_err = EPROTO;
+               else
+                       new_smc->sk.sk_err = new_smc->clcsock->sk->sk_err;
+               rc = sock_error(&new_smc->sk);
+       }
+
+       return rc;
+}
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
new file mode 100644 (file)
index 0000000..13db8ce
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ *  Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ *  CLC (connection layer control) handshake over initial TCP socket to
+ *  prepare for RDMA traffic
+ *
+ *  Copyright IBM Corp. 2016
+ *
+ *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#ifndef _SMC_CLC_H
+#define _SMC_CLC_H
+
+#include <rdma/ib_verbs.h>
+
+#include "smc.h"
+
+#define SMC_CLC_PROPOSAL       0x01
+#define SMC_CLC_ACCEPT         0x02
+#define SMC_CLC_CONFIRM                0x03
+#define SMC_CLC_DECLINE                0x04
+
+/* eye catcher "SMCR" EBCDIC for CLC messages */
+static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'};
+
+#define SMC_CLC_V1             0x1             /* SMC version                */
+#define CLC_WAIT_TIME          (6 * HZ)        /* max. wait time on clcsock  */
+#define SMC_CLC_DECL_MEM       0x01010000  /* insufficient memory resources  */
+#define SMC_CLC_DECL_TIMEOUT   0x02000000  /* timeout                        */
+#define SMC_CLC_DECL_CNFERR    0x03000000  /* configuration error            */
+#define SMC_CLC_DECL_IPSEC     0x03030000  /* IPsec usage                    */
+#define SMC_CLC_DECL_SYNCERR   0x04000000  /* synchronization error          */
+#define SMC_CLC_DECL_REPLY     0x06000000  /* reply to a received decline    */
+#define SMC_CLC_DECL_INTERR    0x99990000  /* internal error                 */
+#define SMC_CLC_DECL_TCL       0x02040000  /* timeout w4 QP confirm          */
+#define SMC_CLC_DECL_SEND      0x07000000  /* sending problem                */
+
+struct smc_clc_msg_hdr {       /* header1 of clc messages */
+       u8 eyecatcher[4];       /* eye catcher */
+       u8 type;                /* proposal / accept / confirm / decline */
+       __be16 length;
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u8 version : 4,
+          flag    : 1,
+          rsvd    : 3;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u8 rsvd    : 3,
+          flag    : 1,
+          version : 4;
+#endif
+} __packed;                    /* format defined in RFC7609 */
+
+struct smc_clc_msg_trail {     /* trailer of clc messages */
+       u8 eyecatcher[4];
+};
+
+struct smc_clc_msg_local {     /* header2 of clc messages */
+       u8 id_for_peer[SMC_SYSTEMID_LEN]; /* unique system id */
+       u8 gid[16];             /* gid of ib_device port */
+       u8 mac[6];              /* mac of ib_device port */
+};
+
+struct smc_clc_msg_proposal {  /* clc proposal message */
+       struct smc_clc_msg_hdr hdr;
+       struct smc_clc_msg_local lcl;
+       __be16 iparea_offset;   /* offset to IP address information area */
+       __be32 outgoing_subnet; /* subnet mask */
+       u8 prefix_len;          /* number of significant bits in mask */
+       u8 reserved[2];
+       u8 ipv6_prefixes_cnt;   /* number of IPv6 prefixes in prefix array */
+       struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */
+} __aligned(4);
+
+struct smc_clc_msg_accept_confirm {    /* clc accept / confirm message */
+       struct smc_clc_msg_hdr hdr;
+       struct smc_clc_msg_local lcl;
+       u8 qpn[3];              /* QP number */
+       __be32 rmb_rkey;        /* RMB rkey */
+       u8 conn_idx;            /* Connection index, which RMBE in RMB */
+       __be32 rmbe_alert_token;/* unique connection id */
+#if defined(__BIG_ENDIAN_BITFIELD)
+       u8 rmbe_size : 4,       /* RMBE buf size (compressed notation) */
+          qp_mtu   : 4;        /* QP mtu */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+       u8 qp_mtu   : 4,
+          rmbe_size : 4;
+#endif
+       u8 reserved;
+       __be64 rmb_dma_addr;    /* RMB virtual address */
+       u8 reserved2;
+       u8 psn[3];              /* initial packet sequence number */
+       struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */
+} __packed;                    /* format defined in RFC7609 */
+
+struct smc_clc_msg_decline {   /* clc decline message */
+       struct smc_clc_msg_hdr hdr;
+       u8 id_for_peer[SMC_SYSTEMID_LEN]; /* sender peer_id */
+       __be32 peer_diagnosis;  /* diagnosis information */
+       u8 reserved2[4];
+       struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */
+} __aligned(4);
+
+struct smc_sock;
+struct smc_ib_device;
+
+int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
+                    u8 expected_type);
+int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info,
+                        u8 out_of_sync);
+int smc_clc_send_proposal(struct smc_sock *smc, struct smc_ib_device *smcibdev,
+                         u8 ibport);
+int smc_clc_send_confirm(struct smc_sock *smc);
+int smc_clc_send_accept(struct smc_sock *smc, int srv_first_contact);
+
+#endif
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
new file mode 100644 (file)
index 0000000..03dfcc6
--- /dev/null
@@ -0,0 +1,442 @@
+/*
+ *  Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ *  Socket Closing - normal and abnormal
+ *
+ *  Copyright IBM Corp. 2016
+ *
+ *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#include <linux/workqueue.h>
+#include <net/sock.h>
+
+#include "smc.h"
+#include "smc_tx.h"
+#include "smc_cdc.h"
+#include "smc_close.h"
+
+#define SMC_CLOSE_WAIT_TX_PENDS_TIME           (5 * HZ)
+
+static void smc_close_cleanup_listen(struct sock *parent)
+{
+       struct sock *sk;
+
+       /* Close non-accepted connections */
+       while ((sk = smc_accept_dequeue(parent, NULL)))
+               smc_close_non_accepted(sk);
+}
+
+static void smc_close_wait_tx_pends(struct smc_sock *smc)
+{
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
+       struct sock *sk = &smc->sk;
+       signed long timeout;
+
+       timeout = SMC_CLOSE_WAIT_TX_PENDS_TIME;
+       add_wait_queue(sk_sleep(sk), &wait);
+       while (!signal_pending(current) && timeout) {
+               int rc;
+
+               rc = sk_wait_event(sk, &timeout,
+                                  !smc_cdc_tx_has_pending(&smc->conn),
+                                  &wait);
+               if (rc)
+                       break;
+       }
+       remove_wait_queue(sk_sleep(sk), &wait);
+}
+
+/* wait for sndbuf data being transmitted */
+static void smc_close_stream_wait(struct smc_sock *smc, long timeout)
+{
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
+       struct sock *sk = &smc->sk;
+
+       if (!timeout)
+               return;
+
+       if (!smc_tx_prepared_sends(&smc->conn))
+               return;
+
+       smc->wait_close_tx_prepared = 1;
+       add_wait_queue(sk_sleep(sk), &wait);
+       while (!signal_pending(current) && timeout) {
+               int rc;
+
+               rc = sk_wait_event(sk, &timeout,
+                                  !smc_tx_prepared_sends(&smc->conn) ||
+                                  (sk->sk_err == ECONNABORTED) ||
+                                  (sk->sk_err == ECONNRESET),
+                                  &wait);
+               if (rc)
+                       break;
+       }
+       remove_wait_queue(sk_sleep(sk), &wait);
+       smc->wait_close_tx_prepared = 0;
+}
+
+void smc_close_wake_tx_prepared(struct smc_sock *smc)
+{
+       if (smc->wait_close_tx_prepared)
+               /* wake up socket closing */
+               smc->sk.sk_state_change(&smc->sk);
+}
+
+static int smc_close_wr(struct smc_connection *conn)
+{
+       conn->local_tx_ctrl.conn_state_flags.peer_done_writing = 1;
+
+       return smc_cdc_get_slot_and_msg_send(conn);
+}
+
+static int smc_close_final(struct smc_connection *conn)
+{
+       if (atomic_read(&conn->bytes_to_rcv))
+               conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
+       else
+               conn->local_tx_ctrl.conn_state_flags.peer_conn_closed = 1;
+
+       return smc_cdc_get_slot_and_msg_send(conn);
+}
+
+static int smc_close_abort(struct smc_connection *conn)
+{
+       conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
+
+       return smc_cdc_get_slot_and_msg_send(conn);
+}
+
+/* terminate smc socket abnormally - active abort
+ * RDMA communication no longer possible
+ */
+void smc_close_active_abort(struct smc_sock *smc)
+{
+       struct smc_cdc_conn_state_flags *txflags =
+               &smc->conn.local_tx_ctrl.conn_state_flags;
+
+       bh_lock_sock(&smc->sk);
+       smc->sk.sk_err = ECONNABORTED;
+       if (smc->clcsock && smc->clcsock->sk) {
+               smc->clcsock->sk->sk_err = ECONNABORTED;
+               smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
+       }
+       switch (smc->sk.sk_state) {
+       case SMC_INIT:
+               smc->sk.sk_state = SMC_PEERABORTWAIT;
+               break;
+       case SMC_APPCLOSEWAIT1:
+       case SMC_APPCLOSEWAIT2:
+               txflags->peer_conn_abort = 1;
+               sock_release(smc->clcsock);
+               if (!smc_cdc_rxed_any_close(&smc->conn))
+                       smc->sk.sk_state = SMC_PEERABORTWAIT;
+               else
+                       smc->sk.sk_state = SMC_CLOSED;
+               break;
+       case SMC_PEERCLOSEWAIT1:
+       case SMC_PEERCLOSEWAIT2:
+               if (!txflags->peer_conn_closed) {
+                       smc->sk.sk_state = SMC_PEERABORTWAIT;
+                       txflags->peer_conn_abort = 1;
+                       sock_release(smc->clcsock);
+               } else {
+                       smc->sk.sk_state = SMC_CLOSED;
+               }
+               break;
+       case SMC_PROCESSABORT:
+       case SMC_APPFINCLOSEWAIT:
+               if (!txflags->peer_conn_closed) {
+                       txflags->peer_conn_abort = 1;
+                       sock_release(smc->clcsock);
+               }
+               smc->sk.sk_state = SMC_CLOSED;
+               break;
+       case SMC_PEERFINCLOSEWAIT:
+       case SMC_PEERABORTWAIT:
+       case SMC_CLOSED:
+               break;
+       }
+
+       sock_set_flag(&smc->sk, SOCK_DEAD);
+       bh_unlock_sock(&smc->sk);
+       smc->sk.sk_state_change(&smc->sk);
+}
+
+int smc_close_active(struct smc_sock *smc)
+{
+       struct smc_cdc_conn_state_flags *txflags =
+               &smc->conn.local_tx_ctrl.conn_state_flags;
+       long timeout = SMC_MAX_STREAM_WAIT_TIMEOUT;
+       struct smc_connection *conn = &smc->conn;
+       struct sock *sk = &smc->sk;
+       int old_state;
+       int rc = 0;
+
+       if (sock_flag(sk, SOCK_LINGER) &&
+           !(current->flags & PF_EXITING))
+               timeout = sk->sk_lingertime;
+
+again:
+       old_state = sk->sk_state;
+       switch (old_state) {
+       case SMC_INIT:
+               sk->sk_state = SMC_CLOSED;
+               if (smc->smc_listen_work.func)
+                       flush_work(&smc->smc_listen_work);
+               sock_put(sk);
+               break;
+       case SMC_LISTEN:
+               sk->sk_state = SMC_CLOSED;
+               sk->sk_state_change(sk); /* wake up accept */
+               if (smc->clcsock && smc->clcsock->sk) {
+                       rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
+                       /* wake up kernel_accept of smc_tcp_listen_worker */
+                       smc->clcsock->sk->sk_data_ready(smc->clcsock->sk);
+               }
+               release_sock(sk);
+               smc_close_cleanup_listen(sk);
+               flush_work(&smc->tcp_listen_work);
+               lock_sock(sk);
+               break;
+       case SMC_ACTIVE:
+               smc_close_stream_wait(smc, timeout);
+               release_sock(sk);
+               cancel_work_sync(&conn->tx_work);
+               lock_sock(sk);
+               if (sk->sk_state == SMC_ACTIVE) {
+                       /* send close request */
+                       rc = smc_close_final(conn);
+                       sk->sk_state = SMC_PEERCLOSEWAIT1;
+               } else {
+                       /* peer event has changed the state */
+                       goto again;
+               }
+               break;
+       case SMC_APPFINCLOSEWAIT:
+               /* socket already shutdown wr or both (active close) */
+               if (txflags->peer_done_writing &&
+                   !txflags->peer_conn_closed) {
+                       /* just shutdown wr done, send close request */
+                       rc = smc_close_final(conn);
+               }
+               sk->sk_state = SMC_CLOSED;
+               smc_close_wait_tx_pends(smc);
+               break;
+       case SMC_APPCLOSEWAIT1:
+       case SMC_APPCLOSEWAIT2:
+               if (!smc_cdc_rxed_any_close(conn))
+                       smc_close_stream_wait(smc, timeout);
+               release_sock(sk);
+               cancel_work_sync(&conn->tx_work);
+               lock_sock(sk);
+               if (sk->sk_err != ECONNABORTED) {
+                       /* confirm close from peer */
+                       rc = smc_close_final(conn);
+                       if (rc)
+                               break;
+               }
+               if (smc_cdc_rxed_any_close(conn))
+                       /* peer has closed the socket already */
+                       sk->sk_state = SMC_CLOSED;
+               else
+                       /* peer has just issued a shutdown write */
+                       sk->sk_state = SMC_PEERFINCLOSEWAIT;
+               smc_close_wait_tx_pends(smc);
+               break;
+       case SMC_PEERCLOSEWAIT1:
+       case SMC_PEERCLOSEWAIT2:
+       case SMC_PEERFINCLOSEWAIT:
+               /* peer sending PeerConnectionClosed will cause transition */
+               break;
+       case SMC_PROCESSABORT:
+               cancel_work_sync(&conn->tx_work);
+               smc_close_abort(conn);
+               sk->sk_state = SMC_CLOSED;
+               smc_close_wait_tx_pends(smc);
+               break;
+       case SMC_PEERABORTWAIT:
+       case SMC_CLOSED:
+               /* nothing to do, add tracing in future patch */
+               break;
+       }
+
+       if (old_state != sk->sk_state)
+               sk->sk_state_change(&smc->sk);
+       return rc;
+}
+
+static void smc_close_passive_abort_received(struct smc_sock *smc)
+{
+       struct smc_cdc_conn_state_flags *txflags =
+               &smc->conn.local_tx_ctrl.conn_state_flags;
+       struct sock *sk = &smc->sk;
+
+       switch (sk->sk_state) {
+       case SMC_ACTIVE:
+       case SMC_APPFINCLOSEWAIT:
+       case SMC_APPCLOSEWAIT1:
+       case SMC_APPCLOSEWAIT2:
+               smc_close_abort(&smc->conn);
+               sk->sk_state = SMC_PROCESSABORT;
+               break;
+       case SMC_PEERCLOSEWAIT1:
+       case SMC_PEERCLOSEWAIT2:
+               if (txflags->peer_done_writing &&
+                   !txflags->peer_conn_closed) {
+                       /* just shutdown, but not yet closed locally */
+                       smc_close_abort(&smc->conn);
+                       sk->sk_state = SMC_PROCESSABORT;
+               } else {
+                       sk->sk_state = SMC_CLOSED;
+               }
+               break;
+       case SMC_PEERFINCLOSEWAIT:
+       case SMC_PEERABORTWAIT:
+               sk->sk_state = SMC_CLOSED;
+               break;
+       case SMC_INIT:
+       case SMC_PROCESSABORT:
+       /* nothing to do, add tracing in future patch */
+               break;
+       }
+}
+
+/* Some kind of closing has been received: peer_conn_closed, peer_conn_abort,
+ * or peer_done_writing.
+ * Called under tasklet context.
+ */
+void smc_close_passive_received(struct smc_sock *smc)
+{
+       struct smc_cdc_conn_state_flags *rxflags =
+               &smc->conn.local_rx_ctrl.conn_state_flags;
+       struct sock *sk = &smc->sk;
+       int old_state;
+
+       sk->sk_shutdown |= RCV_SHUTDOWN;
+       if (smc->clcsock && smc->clcsock->sk)
+               smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
+       sock_set_flag(&smc->sk, SOCK_DONE);
+
+       old_state = sk->sk_state;
+
+       if (rxflags->peer_conn_abort) {
+               smc_close_passive_abort_received(smc);
+               goto wakeup;
+       }
+
+       switch (sk->sk_state) {
+       case SMC_INIT:
+               if (atomic_read(&smc->conn.bytes_to_rcv) ||
+                   (rxflags->peer_done_writing &&
+                    !rxflags->peer_conn_closed))
+                       sk->sk_state = SMC_APPCLOSEWAIT1;
+               else
+                       sk->sk_state = SMC_CLOSED;
+               break;
+       case SMC_ACTIVE:
+               sk->sk_state = SMC_APPCLOSEWAIT1;
+               break;
+       case SMC_PEERCLOSEWAIT1:
+               if (rxflags->peer_done_writing)
+                       sk->sk_state = SMC_PEERCLOSEWAIT2;
+               /* fall through to check for closing */
+       case SMC_PEERCLOSEWAIT2:
+       case SMC_PEERFINCLOSEWAIT:
+               if (!smc_cdc_rxed_any_close(&smc->conn))
+                       break;
+               if (sock_flag(sk, SOCK_DEAD) &&
+                   (sk->sk_shutdown == SHUTDOWN_MASK)) {
+                       /* smc_release has already been called locally */
+                       sk->sk_state = SMC_CLOSED;
+               } else {
+                       /* just shutdown, but not yet closed locally */
+                       sk->sk_state = SMC_APPFINCLOSEWAIT;
+               }
+               break;
+       case SMC_APPCLOSEWAIT1:
+       case SMC_APPCLOSEWAIT2:
+       case SMC_APPFINCLOSEWAIT:
+       case SMC_PEERABORTWAIT:
+       case SMC_PROCESSABORT:
+       case SMC_CLOSED:
+               /* nothing to do, add tracing in future patch */
+               break;
+       }
+
+wakeup:
+       if (old_state != sk->sk_state)
+               sk->sk_state_change(sk);
+       sk->sk_data_ready(sk); /* wakeup blocked rcvbuf consumers */
+       sk->sk_write_space(sk); /* wakeup blocked sndbuf producers */
+
+       if ((sk->sk_state == SMC_CLOSED) &&
+           (sock_flag(sk, SOCK_DEAD) || (old_state == SMC_INIT))) {
+               smc_conn_free(&smc->conn);
+               schedule_delayed_work(&smc->sock_put_work,
+                                     SMC_CLOSE_SOCK_PUT_DELAY);
+       }
+}
+
+void smc_close_sock_put_work(struct work_struct *work)
+{
+       struct smc_sock *smc = container_of(to_delayed_work(work),
+                                           struct smc_sock,
+                                           sock_put_work);
+
+       smc->sk.sk_prot->unhash(&smc->sk);
+       sock_put(&smc->sk);
+}
+
+int smc_close_shutdown_write(struct smc_sock *smc)
+{
+       struct smc_connection *conn = &smc->conn;
+       long timeout = SMC_MAX_STREAM_WAIT_TIMEOUT;
+       struct sock *sk = &smc->sk;
+       int old_state;
+       int rc = 0;
+
+       if (sock_flag(sk, SOCK_LINGER))
+               timeout = sk->sk_lingertime;
+
+again:
+       old_state = sk->sk_state;
+       switch (old_state) {
+       case SMC_ACTIVE:
+               smc_close_stream_wait(smc, timeout);
+               release_sock(sk);
+               cancel_work_sync(&conn->tx_work);
+               lock_sock(sk);
+               /* send close wr request */
+               rc = smc_close_wr(conn);
+               if (sk->sk_state == SMC_ACTIVE)
+                       sk->sk_state = SMC_PEERCLOSEWAIT1;
+               else
+                       goto again;
+               break;
+       case SMC_APPCLOSEWAIT1:
+               /* passive close */
+               if (!smc_cdc_rxed_any_close(conn))
+                       smc_close_stream_wait(smc, timeout);
+               release_sock(sk);
+               cancel_work_sync(&conn->tx_work);
+               lock_sock(sk);
+               /* confirm close from peer */
+               rc = smc_close_wr(conn);
+               sk->sk_state = SMC_APPCLOSEWAIT2;
+               break;
+       case SMC_APPCLOSEWAIT2:
+       case SMC_PEERFINCLOSEWAIT:
+       case SMC_PEERCLOSEWAIT1:
+       case SMC_PEERCLOSEWAIT2:
+       case SMC_APPFINCLOSEWAIT:
+       case SMC_PROCESSABORT:
+       case SMC_PEERABORTWAIT:
+               /* nothing to do, add tracing in future patch */
+               break;
+       }
+
+       if (old_state != sk->sk_state)
+               sk->sk_state_change(&smc->sk);
+       return rc;
+}
diff --git a/net/smc/smc_close.h b/net/smc/smc_close.h
new file mode 100644 (file)
index 0000000..bc9a2df
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Socket Closing
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#ifndef SMC_CLOSE_H
+#define SMC_CLOSE_H
+
+#include <linux/workqueue.h>
+
+#include "smc.h"
+
+#define SMC_MAX_STREAM_WAIT_TIMEOUT            (2 * HZ)
+#define SMC_CLOSE_SOCK_PUT_DELAY               HZ
+
+void smc_close_wake_tx_prepared(struct smc_sock *smc);
+void smc_close_active_abort(struct smc_sock *smc);
+int smc_close_active(struct smc_sock *smc);
+void smc_close_passive_received(struct smc_sock *smc);
+void smc_close_sock_put_work(struct work_struct *work);
+int smc_close_shutdown_write(struct smc_sock *smc);
+
+#endif /* SMC_CLOSE_H */
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
new file mode 100644 (file)
index 0000000..0eac633
--- /dev/null
@@ -0,0 +1,682 @@
+/*
+ *  Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ *  Basic Transport Functions exploiting Infiniband API
+ *
+ *  Copyright IBM Corp. 2016
+ *
+ *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#include <linux/socket.h>
+#include <linux/if_vlan.h>
+#include <linux/random.h>
+#include <linux/workqueue.h>
+#include <net/tcp.h>
+#include <net/sock.h>
+#include <rdma/ib_verbs.h>
+
+#include "smc.h"
+#include "smc_clc.h"
+#include "smc_core.h"
+#include "smc_ib.h"
+#include "smc_wr.h"
+#include "smc_llc.h"
+#include "smc_cdc.h"
+#include "smc_close.h"
+
+#define SMC_LGR_NUM_INCR       256
+#define SMC_LGR_FREE_DELAY     (600 * HZ)
+
+static u32 smc_lgr_num;                        /* unique link group number */
+
+/* Register connection's alert token in our lookup structure.
+ * To use rbtrees we have to implement our own insert core.
+ * Requires @conns_lock
+ * @smc                connection to register
+ * Returns 0 on success, != otherwise.
+ */
+static void smc_lgr_add_alert_token(struct smc_connection *conn)
+{
+       struct rb_node **link, *parent = NULL;
+       u32 token = conn->alert_token_local;
+
+       link = &conn->lgr->conns_all.rb_node;
+       while (*link) {
+               struct smc_connection *cur = rb_entry(*link,
+                                       struct smc_connection, alert_node);
+
+               parent = *link;
+               if (cur->alert_token_local > token)
+                       link = &parent->rb_left;
+               else
+                       link = &parent->rb_right;
+       }
+       /* Put the new node there */
+       rb_link_node(&conn->alert_node, parent, link);
+       rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
+}
+
+/* Register connection in link group by assigning an alert token
+ * registered in a search tree.
+ * Requires @conns_lock
+ * Note that '0' is a reserved value and not assigned.
+ */
+static void smc_lgr_register_conn(struct smc_connection *conn)
+{
+       struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+       static atomic_t nexttoken = ATOMIC_INIT(0);
+
+       /* find a new alert_token_local value not yet used by some connection
+        * in this link group
+        */
+       sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
+       while (!conn->alert_token_local) {
+               conn->alert_token_local = atomic_inc_return(&nexttoken);
+               if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
+                       conn->alert_token_local = 0;
+       }
+       smc_lgr_add_alert_token(conn);
+       conn->lgr->conns_num++;
+}
+
+/* Unregister connection and reset the alert token of the given connection<
+ */
+static void __smc_lgr_unregister_conn(struct smc_connection *conn)
+{
+       struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+       struct smc_link_group *lgr = conn->lgr;
+
+       rb_erase(&conn->alert_node, &lgr->conns_all);
+       lgr->conns_num--;
+       conn->alert_token_local = 0;
+       conn->lgr = NULL;
+       sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
+}
+
+/* Unregister connection and trigger lgr freeing if applicable
+ */
+static void smc_lgr_unregister_conn(struct smc_connection *conn)
+{
+       struct smc_link_group *lgr = conn->lgr;
+       int reduced = 0;
+
+       write_lock_bh(&lgr->conns_lock);
+       if (conn->alert_token_local) {
+               reduced = 1;
+               __smc_lgr_unregister_conn(conn);
+       }
+       write_unlock_bh(&lgr->conns_lock);
+       if (reduced && !lgr->conns_num)
+               schedule_delayed_work(&lgr->free_work, SMC_LGR_FREE_DELAY);
+}
+
+static void smc_lgr_free_work(struct work_struct *work)
+{
+       struct smc_link_group *lgr = container_of(to_delayed_work(work),
+                                                 struct smc_link_group,
+                                                 free_work);
+       bool conns;
+
+       spin_lock_bh(&smc_lgr_list.lock);
+       read_lock_bh(&lgr->conns_lock);
+       conns = RB_EMPTY_ROOT(&lgr->conns_all);
+       read_unlock_bh(&lgr->conns_lock);
+       if (!conns) { /* number of lgr connections is no longer zero */
+               spin_unlock_bh(&smc_lgr_list.lock);
+               return;
+       }
+       list_del_init(&lgr->list); /* remove from smc_lgr_list */
+       spin_unlock_bh(&smc_lgr_list.lock);
+       smc_lgr_free(lgr);
+}
+
+/* create a new SMC link group */
+static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr,
+                         struct smc_ib_device *smcibdev, u8 ibport,
+                         char *peer_systemid, unsigned short vlan_id)
+{
+       struct smc_link_group *lgr;
+       struct smc_link *lnk;
+       u8 rndvec[3];
+       int rc = 0;
+       int i;
+
+       lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
+       if (!lgr) {
+               rc = -ENOMEM;
+               goto out;
+       }
+       lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
+       lgr->sync_err = false;
+       lgr->daddr = peer_in_addr;
+       memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN);
+       lgr->vlan_id = vlan_id;
+       rwlock_init(&lgr->sndbufs_lock);
+       rwlock_init(&lgr->rmbs_lock);
+       for (i = 0; i < SMC_RMBE_SIZES; i++) {
+               INIT_LIST_HEAD(&lgr->sndbufs[i]);
+               INIT_LIST_HEAD(&lgr->rmbs[i]);
+       }
+       smc_lgr_num += SMC_LGR_NUM_INCR;
+       memcpy(&lgr->id, (u8 *)&smc_lgr_num, SMC_LGR_ID_SIZE);
+       INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
+       lgr->conns_all = RB_ROOT;
+
+       lnk = &lgr->lnk[SMC_SINGLE_LINK];
+       /* initialize link */
+       lnk->smcibdev = smcibdev;
+       lnk->ibport = ibport;
+       lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
+       if (!smcibdev->initialized)
+               smc_ib_setup_per_ibdev(smcibdev);
+       get_random_bytes(rndvec, sizeof(rndvec));
+       lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16);
+       rc = smc_wr_alloc_link_mem(lnk);
+       if (rc)
+               goto free_lgr;
+       init_waitqueue_head(&lnk->wr_tx_wait);
+       rc = smc_ib_create_protection_domain(lnk);
+       if (rc)
+               goto free_link_mem;
+       rc = smc_ib_create_queue_pair(lnk);
+       if (rc)
+               goto dealloc_pd;
+       rc = smc_wr_create_link(lnk);
+       if (rc)
+               goto destroy_qp;
+       init_completion(&lnk->llc_confirm);
+       init_completion(&lnk->llc_confirm_resp);
+
+       smc->conn.lgr = lgr;
+       rwlock_init(&lgr->conns_lock);
+       spin_lock_bh(&smc_lgr_list.lock);
+       list_add(&lgr->list, &smc_lgr_list.list);
+       spin_unlock_bh(&smc_lgr_list.lock);
+       return 0;
+
+destroy_qp:
+       smc_ib_destroy_queue_pair(lnk);
+dealloc_pd:
+       smc_ib_dealloc_protection_domain(lnk);
+free_link_mem:
+       smc_wr_free_link_mem(lnk);
+free_lgr:
+       kfree(lgr);
+out:
+       return rc;
+}
+
+static void smc_sndbuf_unuse(struct smc_connection *conn)
+{
+       if (conn->sndbuf_desc) {
+               conn->sndbuf_desc->used = 0;
+               conn->sndbuf_size = 0;
+       }
+}
+
+static void smc_rmb_unuse(struct smc_connection *conn)
+{
+       if (conn->rmb_desc) {
+               conn->rmb_desc->used = 0;
+               conn->rmbe_size = 0;
+       }
+}
+
+/* remove a finished connection from its link group */
+void smc_conn_free(struct smc_connection *conn)
+{
+       struct smc_link_group *lgr = conn->lgr;
+
+       if (!lgr)
+               return;
+       smc_cdc_tx_dismiss_slots(conn);
+       smc_lgr_unregister_conn(conn);
+       smc_rmb_unuse(conn);
+       smc_sndbuf_unuse(conn);
+}
+
+static void smc_link_clear(struct smc_link *lnk)
+{
+       lnk->peer_qpn = 0;
+       smc_ib_modify_qp_reset(lnk);
+       smc_wr_free_link(lnk);
+       smc_ib_destroy_queue_pair(lnk);
+       smc_ib_dealloc_protection_domain(lnk);
+       smc_wr_free_link_mem(lnk);
+}
+
+static void smc_lgr_free_sndbufs(struct smc_link_group *lgr)
+{
+       struct smc_buf_desc *sndbuf_desc, *bf_desc;
+       int i;
+
+       for (i = 0; i < SMC_RMBE_SIZES; i++) {
+               list_for_each_entry_safe(sndbuf_desc, bf_desc, &lgr->sndbufs[i],
+                                        list) {
+                       list_del(&sndbuf_desc->list);
+                       smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
+                                        smc_uncompress_bufsize(i),
+                                        sndbuf_desc, DMA_TO_DEVICE);
+                       kfree(sndbuf_desc->cpu_addr);
+                       kfree(sndbuf_desc);
+               }
+       }
+}
+
+static void smc_lgr_free_rmbs(struct smc_link_group *lgr)
+{
+       struct smc_buf_desc *rmb_desc, *bf_desc;
+       struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
+       int i;
+
+       for (i = 0; i < SMC_RMBE_SIZES; i++) {
+               list_for_each_entry_safe(rmb_desc, bf_desc, &lgr->rmbs[i],
+                                        list) {
+                       list_del(&rmb_desc->list);
+                       smc_ib_buf_unmap(lnk->smcibdev,
+                                        smc_uncompress_bufsize(i),
+                                        rmb_desc, DMA_FROM_DEVICE);
+                       kfree(rmb_desc->cpu_addr);
+                       kfree(rmb_desc);
+               }
+       }
+}
+
+/* remove a link group */
+void smc_lgr_free(struct smc_link_group *lgr)
+{
+       smc_lgr_free_rmbs(lgr);
+       smc_lgr_free_sndbufs(lgr);
+       smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
+       kfree(lgr);
+}
+
+/* terminate linkgroup abnormally */
+void smc_lgr_terminate(struct smc_link_group *lgr)
+{
+       struct smc_connection *conn;
+       struct smc_sock *smc;
+       struct rb_node *node;
+
+       spin_lock_bh(&smc_lgr_list.lock);
+       if (list_empty(&lgr->list)) {
+               /* termination already triggered */
+               spin_unlock_bh(&smc_lgr_list.lock);
+               return;
+       }
+       /* do not use this link group for new connections */
+       list_del_init(&lgr->list);
+       spin_unlock_bh(&smc_lgr_list.lock);
+
+       write_lock_bh(&lgr->conns_lock);
+       node = rb_first(&lgr->conns_all);
+       while (node) {
+               conn = rb_entry(node, struct smc_connection, alert_node);
+               smc = container_of(conn, struct smc_sock, conn);
+               sock_hold(&smc->sk);
+               __smc_lgr_unregister_conn(conn);
+               smc_close_active_abort(smc);
+               sock_put(&smc->sk);
+               node = rb_first(&lgr->conns_all);
+       }
+       write_unlock_bh(&lgr->conns_lock);
+}
+
+/* Determine vlan of internal TCP socket.
+ * @vlan_id: address to store the determined vlan id into
+ */
+static int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
+{
+       struct dst_entry *dst = sk_dst_get(clcsock->sk);
+       int rc = 0;
+
+       *vlan_id = 0;
+       if (!dst) {
+               rc = -ENOTCONN;
+               goto out;
+       }
+       if (!dst->dev) {
+               rc = -ENODEV;
+               goto out_rel;
+       }
+
+       if (is_vlan_dev(dst->dev))
+               *vlan_id = vlan_dev_vlan_id(dst->dev);
+
+out_rel:
+       dst_release(dst);
+out:
+       return rc;
+}
+
+/* determine the link gid matching the vlan id of the link group */
+static int smc_link_determine_gid(struct smc_link_group *lgr)
+{
+       struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
+       struct ib_gid_attr gattr;
+       union ib_gid gid;
+       int i;
+
+       if (!lgr->vlan_id) {
+               lnk->gid = lnk->smcibdev->gid[lnk->ibport - 1];
+               return 0;
+       }
+
+       for (i = 0; i < lnk->smcibdev->pattr[lnk->ibport - 1].gid_tbl_len;
+            i++) {
+               if (ib_query_gid(lnk->smcibdev->ibdev, lnk->ibport, i, &gid,
+                                &gattr))
+                       continue;
+               if (gattr.ndev &&
+                   (vlan_dev_vlan_id(gattr.ndev) == lgr->vlan_id)) {
+                       lnk->gid = gid;
+                       return 0;
+               }
+       }
+       return -ENODEV;
+}
+
+/* create a new SMC connection (and a new link group if necessary) */
+int smc_conn_create(struct smc_sock *smc, __be32 peer_in_addr,
+                   struct smc_ib_device *smcibdev, u8 ibport,
+                   struct smc_clc_msg_local *lcl, int srv_first_contact)
+{
+       struct smc_connection *conn = &smc->conn;
+       struct smc_link_group *lgr;
+       unsigned short vlan_id;
+       enum smc_lgr_role role;
+       int local_contact = SMC_FIRST_CONTACT;
+       int rc = 0;
+
+       role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
+       rc = smc_vlan_by_tcpsk(smc->clcsock, &vlan_id);
+       if (rc)
+               return rc;
+
+       if ((role == SMC_CLNT) && srv_first_contact)
+               /* create new link group as well */
+               goto create;
+
+       /* determine if an existing link group can be reused */
+       spin_lock_bh(&smc_lgr_list.lock);
+       list_for_each_entry(lgr, &smc_lgr_list.list, list) {
+               write_lock_bh(&lgr->conns_lock);
+               if (!memcmp(lgr->peer_systemid, lcl->id_for_peer,
+                           SMC_SYSTEMID_LEN) &&
+                   !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
+                           SMC_GID_SIZE) &&
+                   !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
+                           sizeof(lcl->mac)) &&
+                   !lgr->sync_err &&
+                   (lgr->role == role) &&
+                   (lgr->vlan_id == vlan_id) &&
+                   ((role == SMC_CLNT) ||
+                    (lgr->conns_num < SMC_RMBS_PER_LGR_MAX))) {
+                       /* link group found */
+                       local_contact = SMC_REUSE_CONTACT;
+                       conn->lgr = lgr;
+                       smc_lgr_register_conn(conn); /* add smc conn to lgr */
+                       write_unlock_bh(&lgr->conns_lock);
+                       break;
+               }
+               write_unlock_bh(&lgr->conns_lock);
+       }
+       spin_unlock_bh(&smc_lgr_list.lock);
+
+       if (role == SMC_CLNT && !srv_first_contact &&
+           (local_contact == SMC_FIRST_CONTACT)) {
+               /* Server reuses a link group, but Client wants to start
+                * a new one
+                * send out_of_sync decline, reason synchr. error
+                */
+               return -ENOLINK;
+       }
+
+create:
+       if (local_contact == SMC_FIRST_CONTACT) {
+               rc = smc_lgr_create(smc, peer_in_addr, smcibdev, ibport,
+                                   lcl->id_for_peer, vlan_id);
+               if (rc)
+                       goto out;
+               smc_lgr_register_conn(conn); /* add smc conn to lgr */
+               rc = smc_link_determine_gid(conn->lgr);
+       }
+       conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
+       conn->local_tx_ctrl.len = sizeof(struct smc_cdc_msg);
+#ifndef KERNEL_HAS_ATOMIC64
+       spin_lock_init(&conn->acurs_lock);
+#endif
+
+out:
+       return rc ? rc : local_contact;
+}
+
+/* try to reuse a sndbuf description slot of the sndbufs list for a certain
+ * buf_size; if not available, return NULL
+ */
+static inline
+struct smc_buf_desc *smc_sndbuf_get_slot(struct smc_link_group *lgr,
+                                        int compressed_bufsize)
+{
+       struct smc_buf_desc *sndbuf_slot;
+
+       read_lock_bh(&lgr->sndbufs_lock);
+       list_for_each_entry(sndbuf_slot, &lgr->sndbufs[compressed_bufsize],
+                           list) {
+               if (cmpxchg(&sndbuf_slot->used, 0, 1) == 0) {
+                       read_unlock_bh(&lgr->sndbufs_lock);
+                       return sndbuf_slot;
+               }
+       }
+       read_unlock_bh(&lgr->sndbufs_lock);
+       return NULL;
+}
+
+/* try to reuse an rmb description slot of the rmbs list for a certain
+ * rmbe_size; if not available, return NULL
+ */
+static inline
+struct smc_buf_desc *smc_rmb_get_slot(struct smc_link_group *lgr,
+                                     int compressed_bufsize)
+{
+       struct smc_buf_desc *rmb_slot;
+
+       read_lock_bh(&lgr->rmbs_lock);
+       list_for_each_entry(rmb_slot, &lgr->rmbs[compressed_bufsize],
+                           list) {
+               if (cmpxchg(&rmb_slot->used, 0, 1) == 0) {
+                       read_unlock_bh(&lgr->rmbs_lock);
+                       return rmb_slot;
+               }
+       }
+       read_unlock_bh(&lgr->rmbs_lock);
+       return NULL;
+}
+
+/* one of the conditions for announcing a receiver's current window size is
+ * that it "results in a minimum increase in the window size of 10% of the
+ * receive buffer space" [RFC7609]
+ */
+static inline int smc_rmb_wnd_update_limit(int rmbe_size)
+{
+       return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
+}
+
+/* create the tx buffer for an SMC socket */
+int smc_sndbuf_create(struct smc_sock *smc)
+{
+       struct smc_connection *conn = &smc->conn;
+       struct smc_link_group *lgr = conn->lgr;
+       int tmp_bufsize, tmp_bufsize_short;
+       struct smc_buf_desc *sndbuf_desc;
+       int rc;
+
+       /* use socket send buffer size (w/o overhead) as start value */
+       for (tmp_bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2);
+            tmp_bufsize_short >= 0; tmp_bufsize_short--) {
+               tmp_bufsize = smc_uncompress_bufsize(tmp_bufsize_short);
+               /* check for reusable sndbuf_slot in the link group */
+               sndbuf_desc = smc_sndbuf_get_slot(lgr, tmp_bufsize_short);
+               if (sndbuf_desc) {
+                       memset(sndbuf_desc->cpu_addr, 0, tmp_bufsize);
+                       break; /* found reusable slot */
+               }
+               /* try to alloc a new send buffer */
+               sndbuf_desc = kzalloc(sizeof(*sndbuf_desc), GFP_KERNEL);
+               if (!sndbuf_desc)
+                       break; /* give up with -ENOMEM */
+               sndbuf_desc->cpu_addr = kzalloc(tmp_bufsize,
+                                               GFP_KERNEL | __GFP_NOWARN |
+                                               __GFP_NOMEMALLOC |
+                                               __GFP_NORETRY);
+               if (!sndbuf_desc->cpu_addr) {
+                       kfree(sndbuf_desc);
+                       sndbuf_desc = NULL;
+                       /* if send buffer allocation has failed,
+                        * try a smaller one
+                        */
+                       continue;
+               }
+               rc = smc_ib_buf_map(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
+                                   tmp_bufsize, sndbuf_desc,
+                                   DMA_TO_DEVICE);
+               if (rc) {
+                       kfree(sndbuf_desc->cpu_addr);
+                       kfree(sndbuf_desc);
+                       sndbuf_desc = NULL;
+                       continue; /* if mapping failed, try smaller one */
+               }
+               sndbuf_desc->used = 1;
+               write_lock_bh(&lgr->sndbufs_lock);
+               list_add(&sndbuf_desc->list,
+                        &lgr->sndbufs[tmp_bufsize_short]);
+               write_unlock_bh(&lgr->sndbufs_lock);
+               break;
+       }
+       if (sndbuf_desc && sndbuf_desc->cpu_addr) {
+               conn->sndbuf_desc = sndbuf_desc;
+               conn->sndbuf_size = tmp_bufsize;
+               smc->sk.sk_sndbuf = tmp_bufsize * 2;
+               atomic_set(&conn->sndbuf_space, tmp_bufsize);
+               return 0;
+       } else {
+               return -ENOMEM;
+       }
+}
+
+/* create the RMB for an SMC socket (even though the SMC protocol
+ * allows more than one RMB-element per RMB, the Linux implementation
+ * uses just one RMB-element per RMB, i.e. uses an extra RMB for every
+ * connection in a link group
+ */
+int smc_rmb_create(struct smc_sock *smc)
+{
+       struct smc_connection *conn = &smc->conn;
+       struct smc_link_group *lgr = conn->lgr;
+       int tmp_bufsize, tmp_bufsize_short;
+       struct smc_buf_desc *rmb_desc;
+       int rc;
+
+       /* use socket recv buffer size (w/o overhead) as start value */
+       for (tmp_bufsize_short = smc_compress_bufsize(smc->sk.sk_rcvbuf / 2);
+            tmp_bufsize_short >= 0; tmp_bufsize_short--) {
+               tmp_bufsize = smc_uncompress_bufsize(tmp_bufsize_short);
+               /* check for reusable rmb_slot in the link group */
+               rmb_desc = smc_rmb_get_slot(lgr, tmp_bufsize_short);
+               if (rmb_desc) {
+                       memset(rmb_desc->cpu_addr, 0, tmp_bufsize);
+                       break; /* found reusable slot */
+               }
+               /* try to alloc a new RMB */
+               rmb_desc = kzalloc(sizeof(*rmb_desc), GFP_KERNEL);
+               if (!rmb_desc)
+                       break; /* give up with -ENOMEM */
+               rmb_desc->cpu_addr = kzalloc(tmp_bufsize,
+                                            GFP_KERNEL | __GFP_NOWARN |
+                                            __GFP_NOMEMALLOC |
+                                            __GFP_NORETRY);
+               if (!rmb_desc->cpu_addr) {
+                       kfree(rmb_desc);
+                       rmb_desc = NULL;
+                       /* if RMB allocation has failed,
+                        * try a smaller one
+                        */
+                       continue;
+               }
+               rc = smc_ib_buf_map(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
+                                   tmp_bufsize, rmb_desc,
+                                   DMA_FROM_DEVICE);
+               if (rc) {
+                       kfree(rmb_desc->cpu_addr);
+                       kfree(rmb_desc);
+                       rmb_desc = NULL;
+                       continue; /* if mapping failed, try smaller one */
+               }
+               rc = smc_ib_get_memory_region(lgr->lnk[SMC_SINGLE_LINK].roce_pd,
+                                             IB_ACCESS_REMOTE_WRITE |
+                                             IB_ACCESS_LOCAL_WRITE,
+                                            &rmb_desc->mr_rx[SMC_SINGLE_LINK]);
+               if (rc) {
+                       smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
+                                        tmp_bufsize, rmb_desc,
+                                        DMA_FROM_DEVICE);
+                       kfree(rmb_desc->cpu_addr);
+                       kfree(rmb_desc);
+                       rmb_desc = NULL;
+                       continue;
+               }
+               rmb_desc->used = 1;
+               write_lock_bh(&lgr->rmbs_lock);
+               list_add(&rmb_desc->list,
+                        &lgr->rmbs[tmp_bufsize_short]);
+               write_unlock_bh(&lgr->rmbs_lock);
+               break;
+       }
+       if (rmb_desc && rmb_desc->cpu_addr) {
+               conn->rmb_desc = rmb_desc;
+               conn->rmbe_size = tmp_bufsize;
+               conn->rmbe_size_short = tmp_bufsize_short;
+               smc->sk.sk_rcvbuf = tmp_bufsize * 2;
+               atomic_set(&conn->bytes_to_rcv, 0);
+               conn->rmbe_update_limit = smc_rmb_wnd_update_limit(tmp_bufsize);
+               return 0;
+       } else {
+               return -ENOMEM;
+       }
+}
+
+static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
+{
+       int i;
+
+       for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
+               if (!test_and_set_bit(i, lgr->rtokens_used_mask))
+                       return i;
+       }
+       return -ENOSPC;
+}
+
+/* save rkey and dma_addr received from peer during clc handshake */
+int smc_rmb_rtoken_handling(struct smc_connection *conn,
+                           struct smc_clc_msg_accept_confirm *clc)
+{
+       u64 dma_addr = be64_to_cpu(clc->rmb_dma_addr);
+       struct smc_link_group *lgr = conn->lgr;
+       u32 rkey = ntohl(clc->rmb_rkey);
+       int i;
+
+       for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
+               if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
+                   test_bit(i, lgr->rtokens_used_mask)) {
+                       conn->rtoken_idx = i;
+                       return 0;
+               }
+       }
+       conn->rtoken_idx = smc_rmb_reserve_rtoken_idx(lgr);
+       if (conn->rtoken_idx < 0)
+               return conn->rtoken_idx;
+       lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey = rkey;
+       lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr = dma_addr;
+       return 0;
+}
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
new file mode 100644 (file)
index 0000000..27eb380
--- /dev/null
@@ -0,0 +1,181 @@
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ *  Definitions for SMC Connections, Link Groups and Links
+ *
+ *  Copyright IBM Corp. 2016
+ *
+ *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#ifndef _SMC_CORE_H
+#define _SMC_CORE_H
+
+#include <linux/atomic.h>
+#include <rdma/ib_verbs.h>
+
+#include "smc.h"
+#include "smc_ib.h"
+
+#define SMC_RMBS_PER_LGR_MAX   255     /* max. # of RMBs per link group */
+
+struct smc_lgr_list {                  /* list of link group definition */
+       struct list_head        list;
+       spinlock_t              lock;   /* protects list of link groups */
+};
+
+extern struct smc_lgr_list     smc_lgr_list; /* list of link groups */
+
+enum smc_lgr_role {            /* possible roles of a link group */
+       SMC_CLNT,       /* client */
+       SMC_SERV        /* server */
+};
+
+#define SMC_WR_BUF_SIZE                48      /* size of work request buffer */
+
+struct smc_wr_buf {
+       u8      raw[SMC_WR_BUF_SIZE];
+};
+
+struct smc_link {
+       struct smc_ib_device    *smcibdev;      /* ib-device */
+       u8                      ibport;         /* port - values 1 | 2 */
+       struct ib_pd            *roce_pd;       /* IB protection domain,
+                                                * unique for every RoCE QP
+                                                */
+       struct ib_qp            *roce_qp;       /* IB queue pair */
+       struct ib_qp_attr       qp_attr;        /* IB queue pair attributes */
+
+       struct smc_wr_buf       *wr_tx_bufs;    /* WR send payload buffers */
+       struct ib_send_wr       *wr_tx_ibs;     /* WR send meta data */
+       struct ib_sge           *wr_tx_sges;    /* WR send gather meta data */
+       struct smc_wr_tx_pend   *wr_tx_pends;   /* WR send waiting for CQE */
+       /* above four vectors have wr_tx_cnt elements and use the same index */
+       dma_addr_t              wr_tx_dma_addr; /* DMA address of wr_tx_bufs */
+       atomic_long_t           wr_tx_id;       /* seq # of last sent WR */
+       unsigned long           *wr_tx_mask;    /* bit mask of used indexes */
+       u32                     wr_tx_cnt;      /* number of WR send buffers */
+       wait_queue_head_t       wr_tx_wait;     /* wait for free WR send buf */
+
+       struct smc_wr_buf       *wr_rx_bufs;    /* WR recv payload buffers */
+       struct ib_recv_wr       *wr_rx_ibs;     /* WR recv meta data */
+       struct ib_sge           *wr_rx_sges;    /* WR recv scatter meta data */
+       /* above three vectors have wr_rx_cnt elements and use the same index */
+       dma_addr_t              wr_rx_dma_addr; /* DMA address of wr_rx_bufs */
+       u64                     wr_rx_id;       /* seq # of last recv WR */
+       u32                     wr_rx_cnt;      /* number of WR recv buffers */
+
+       union ib_gid            gid;            /* gid matching used vlan id */
+       u32                     peer_qpn;       /* QP number of peer */
+       enum ib_mtu             path_mtu;       /* used mtu */
+       enum ib_mtu             peer_mtu;       /* mtu size of peer */
+       u32                     psn_initial;    /* QP tx initial packet seqno */
+       u32                     peer_psn;       /* QP rx initial packet seqno */
+       u8                      peer_mac[ETH_ALEN];     /* = gid[8:10||13:15] */
+       u8                      peer_gid[sizeof(union ib_gid)]; /* gid of peer*/
+       u8                      link_id;        /* unique # within link group */
+       struct completion       llc_confirm;    /* wait for rx of conf link */
+       struct completion       llc_confirm_resp; /* wait 4 rx of cnf lnk rsp */
+};
+
+/* For now we just allow one parallel link per link group. The SMC protocol
+ * allows more (up to 8).
+ */
+#define SMC_LINKS_PER_LGR_MAX  1
+#define SMC_SINGLE_LINK                0
+
+#define SMC_FIRST_CONTACT      1               /* first contact to a peer */
+#define SMC_REUSE_CONTACT      0               /* follow-on contact to a peer*/
+
+/* tx/rx buffer list element for sndbufs list and rmbs list of a lgr */
+struct smc_buf_desc {
+       struct list_head        list;
+       u64                     dma_addr[SMC_LINKS_PER_LGR_MAX];
+                                               /* mapped address of buffer */
+       void                    *cpu_addr;      /* virtual address of buffer */
+       struct ib_mr            *mr_rx[SMC_LINKS_PER_LGR_MAX];
+                                               /* for rmb only:
+                                                * rkey provided to peer
+                                                */
+       u32                     used;           /* currently used / unused */
+};
+
+struct smc_rtoken {                            /* address/key of remote RMB */
+       u64                     dma_addr;
+       u32                     rkey;
+};
+
+#define SMC_LGR_ID_SIZE                4
+
+struct smc_link_group {
+       struct list_head        list;
+       enum smc_lgr_role       role;           /* client or server */
+       __be32                  daddr;          /* destination ip address */
+       struct smc_link         lnk[SMC_LINKS_PER_LGR_MAX];     /* smc link */
+       char                    peer_systemid[SMC_SYSTEMID_LEN];
+                                               /* unique system_id of peer */
+       struct rb_root          conns_all;      /* connection tree */
+       rwlock_t                conns_lock;     /* protects conns_all */
+       unsigned int            conns_num;      /* current # of connections */
+       unsigned short          vlan_id;        /* vlan id of link group */
+
+       struct list_head        sndbufs[SMC_RMBE_SIZES];/* tx buffers */
+       rwlock_t                sndbufs_lock;   /* protects tx buffers */
+       struct list_head        rmbs[SMC_RMBE_SIZES];   /* rx buffers */
+       rwlock_t                rmbs_lock;      /* protects rx buffers */
+       struct smc_rtoken       rtokens[SMC_RMBS_PER_LGR_MAX]
+                                      [SMC_LINKS_PER_LGR_MAX];
+                                               /* remote addr/key pairs */
+       unsigned long           rtokens_used_mask[BITS_TO_LONGS(
+                                                       SMC_RMBS_PER_LGR_MAX)];
+                                               /* used rtoken elements */
+
+       u8                      id[SMC_LGR_ID_SIZE];    /* unique lgr id */
+       struct delayed_work     free_work;      /* delayed freeing of an lgr */
+       bool                    sync_err;       /* lgr no longer fits to peer */
+};
+
+/* Find the connection associated with the given alert token in the link group.
+ * To use rbtrees we have to implement our own search core.
+ * Requires @conns_lock
+ * @token      alert token to search for
+ * @lgr                 link group to search in
+ * Returns connection associated with token if found, NULL otherwise.
+ */
+static inline struct smc_connection *smc_lgr_find_conn(
+       u32 token, struct smc_link_group *lgr)
+{
+       struct smc_connection *res = NULL;
+       struct rb_node *node;
+
+       node = lgr->conns_all.rb_node;
+       while (node) {
+               struct smc_connection *cur = rb_entry(node,
+                                       struct smc_connection, alert_node);
+
+               if (cur->alert_token_local > token) {
+                       node = node->rb_left;
+               } else {
+                       if (cur->alert_token_local < token) {
+                               node = node->rb_right;
+                       } else {
+                               res = cur;
+                               break;
+                       }
+               }
+       }
+
+       return res;
+}
+
+struct smc_sock;
+struct smc_clc_msg_accept_confirm;
+
+void smc_lgr_free(struct smc_link_group *lgr);
+void smc_lgr_terminate(struct smc_link_group *lgr);
+int smc_sndbuf_create(struct smc_sock *smc);
+int smc_rmb_create(struct smc_sock *smc);
+int smc_rmb_rtoken_handling(struct smc_connection *conn,
+                           struct smc_clc_msg_accept_confirm *clc);
+
+#endif
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
new file mode 100644 (file)
index 0000000..d2d01cf
--- /dev/null
@@ -0,0 +1,215 @@
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Monitoring SMC transport protocol sockets
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/sock_diag.h>
+#include <linux/inet_diag.h>
+#include <linux/smc_diag.h>
+#include <net/netlink.h>
+#include <net/smc.h>
+
+#include "smc.h"
+#include "smc_core.h"
+
+static void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw)
+{
+       sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
+               be16_to_cpu(((__be16 *)gid_raw)[0]),
+               be16_to_cpu(((__be16 *)gid_raw)[1]),
+               be16_to_cpu(((__be16 *)gid_raw)[2]),
+               be16_to_cpu(((__be16 *)gid_raw)[3]),
+               be16_to_cpu(((__be16 *)gid_raw)[4]),
+               be16_to_cpu(((__be16 *)gid_raw)[5]),
+               be16_to_cpu(((__be16 *)gid_raw)[6]),
+               be16_to_cpu(((__be16 *)gid_raw)[7]));
+}
+
+static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
+{
+       struct smc_sock *smc = smc_sk(sk);
+
+       r->diag_family = sk->sk_family;
+       if (!smc->clcsock)
+               return;
+       r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
+       r->id.idiag_dport = smc->clcsock->sk->sk_dport;
+       r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
+       sock_diag_save_cookie(sk, r->id.idiag_cookie);
+       memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+       memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+       r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
+       r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
+}
+
+static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
+                                  struct smc_diag_msg *r,
+                                  struct user_namespace *user_ns)
+{
+       if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown))
+               return 1;
+
+       r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
+       r->diag_inode = sock_i_ino(sk);
+       return 0;
+}
+
+static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
+                          struct netlink_callback *cb,
+                          const struct smc_diag_req *req,
+                          struct nlattr *bc)
+{
+       struct smc_sock *smc = smc_sk(sk);
+       struct user_namespace *user_ns;
+       struct smc_diag_msg *r;
+       struct nlmsghdr *nlh;
+
+       nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+                       cb->nlh->nlmsg_type, sizeof(*r), NLM_F_MULTI);
+       if (!nlh)
+               return -EMSGSIZE;
+
+       r = nlmsg_data(nlh);
+       smc_diag_msg_common_fill(r, sk);
+       r->diag_state = sk->sk_state;
+       r->diag_fallback = smc->use_fallback;
+       user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk);
+       if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns))
+               goto errout;
+
+       if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) && smc->conn.lgr) {
+               struct smc_connection *conn = &smc->conn;
+               struct smc_diag_conninfo cinfo = {
+                       .token = conn->alert_token_local,
+                       .sndbuf_size = conn->sndbuf_size,
+                       .rmbe_size = conn->rmbe_size,
+                       .peer_rmbe_size = conn->peer_rmbe_size,
+
+                       .rx_prod.wrap = conn->local_rx_ctrl.prod.wrap,
+                       .rx_prod.count = conn->local_rx_ctrl.prod.count,
+                       .rx_cons.wrap = conn->local_rx_ctrl.cons.wrap,
+                       .rx_cons.count = conn->local_rx_ctrl.cons.count,
+
+                       .tx_prod.wrap = conn->local_tx_ctrl.prod.wrap,
+                       .tx_prod.count = conn->local_tx_ctrl.prod.count,
+                       .tx_cons.wrap = conn->local_tx_ctrl.cons.wrap,
+                       .tx_cons.count = conn->local_tx_ctrl.cons.count,
+
+                       .tx_prod_flags =
+                               *(u8 *)&conn->local_tx_ctrl.prod_flags,
+                       .tx_conn_state_flags =
+                               *(u8 *)&conn->local_tx_ctrl.conn_state_flags,
+                       .rx_prod_flags = *(u8 *)&conn->local_rx_ctrl.prod_flags,
+                       .rx_conn_state_flags =
+                               *(u8 *)&conn->local_rx_ctrl.conn_state_flags,
+
+                       .tx_prep.wrap = conn->tx_curs_prep.wrap,
+                       .tx_prep.count = conn->tx_curs_prep.count,
+                       .tx_sent.wrap = conn->tx_curs_sent.wrap,
+                       .tx_sent.count = conn->tx_curs_sent.count,
+                       .tx_fin.wrap = conn->tx_curs_fin.wrap,
+                       .tx_fin.count = conn->tx_curs_fin.count,
+               };
+
+               if (nla_put(skb, SMC_DIAG_CONNINFO, sizeof(cinfo), &cinfo) < 0)
+                       goto errout;
+       }
+
+       if ((req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) && smc->conn.lgr) {
+               struct smc_diag_lgrinfo linfo = {
+                       .role = smc->conn.lgr->role,
+                       .lnk[0].ibport = smc->conn.lgr->lnk[0].ibport,
+                       .lnk[0].link_id = smc->conn.lgr->lnk[0].link_id,
+               };
+
+               memcpy(linfo.lnk[0].ibname,
+                      smc->conn.lgr->lnk[0].smcibdev->ibdev->name,
+                      sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name));
+               smc_gid_be16_convert(linfo.lnk[0].gid,
+                                    smc->conn.lgr->lnk[0].gid.raw);
+               smc_gid_be16_convert(linfo.lnk[0].peer_gid,
+                                    smc->conn.lgr->lnk[0].peer_gid);
+
+               if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0)
+                       goto errout;
+       }
+
+       nlmsg_end(skb, nlh);
+       return 0;
+
+errout:
+       nlmsg_cancel(skb, nlh);
+       return -EMSGSIZE;
+}
+
+static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct net *net = sock_net(skb->sk);
+       struct nlattr *bc = NULL;
+       struct hlist_head *head;
+       struct sock *sk;
+       int rc = 0;
+
+       read_lock(&smc_proto.h.smc_hash->lock);
+       head = &smc_proto.h.smc_hash->ht;
+       if (hlist_empty(head))
+               goto out;
+
+       sk_for_each(sk, head) {
+               if (!net_eq(sock_net(sk), net))
+                       continue;
+               rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc);
+               if (rc)
+                       break;
+       }
+
+out:
+       read_unlock(&smc_proto.h.smc_hash->lock);
+       return rc;
+}
+
+static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
+{
+       struct net *net = sock_net(skb->sk);
+
+       if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
+           h->nlmsg_flags & NLM_F_DUMP) {
+               {
+                       struct netlink_dump_control c = {
+                               .dump = smc_diag_dump,
+                               .min_dump_alloc = SKB_WITH_OVERHEAD(32768),
+                       };
+                       return netlink_dump_start(net->diag_nlsk, skb, h, &c);
+               }
+       }
+       return 0;
+}
+
+static const struct sock_diag_handler smc_diag_handler = {
+       .family = AF_SMC,
+       .dump = smc_diag_handler_dump,
+};
+
+static int __init smc_diag_init(void)
+{
+       return sock_diag_register(&smc_diag_handler);
+}
+
+static void __exit smc_diag_exit(void)
+{
+       sock_diag_unregister(&smc_diag_handler);
+}
+
+module_init(smc_diag_init);
+module_exit(smc_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 43 /* AF_SMC */);
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
new file mode 100644 (file)
index 0000000..e6743c0
--- /dev/null
@@ -0,0 +1,466 @@
+/*
+ *  Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ *  IB infrastructure:
+ *  Establish SMC-R as an Infiniband Client to be notified about added and
+ *  removed IB devices of type RDMA.
+ *  Determine device and port characteristics for these IB devices.
+ *
+ *  Copyright IBM Corp. 2016
+ *
+ *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#include <linux/random.h>
+#include <linux/workqueue.h>
+#include <rdma/ib_verbs.h>
+
+#include "smc_pnet.h"
+#include "smc_ib.h"
+#include "smc_core.h"
+#include "smc_wr.h"
+#include "smc.h"
+
+#define SMC_QP_MIN_RNR_TIMER           5
+#define SMC_QP_TIMEOUT                 15 /* 4096 * 2 ** timeout usec */
+#define SMC_QP_RETRY_CNT                       7 /* 7: infinite */
+#define SMC_QP_RNR_RETRY                       7 /* 7: infinite */
+
+struct smc_ib_devices smc_ib_devices = {       /* smc-registered ib devices */
+       .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock),
+       .list = LIST_HEAD_INIT(smc_ib_devices.list),
+};
+
+#define SMC_LOCAL_SYSTEMID_RESET       "%%%%%%%"
+
+u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET;        /* unique system
+                                                                * identifier
+                                                                */
+
+int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
+                            struct ib_mr **mr)
+{
+       int rc;
+
+       if (*mr)
+               return 0; /* already done */
+
+       /* obtain unique key -
+        * next invocation of get_dma_mr returns a different key!
+        */
+       *mr = pd->device->get_dma_mr(pd, access_flags);
+       rc = PTR_ERR_OR_ZERO(*mr);
+       if (IS_ERR(*mr))
+               *mr = NULL;
+       return rc;
+}
+
+static int smc_ib_modify_qp_init(struct smc_link *lnk)
+{
+       struct ib_qp_attr qp_attr;
+
+       memset(&qp_attr, 0, sizeof(qp_attr));
+       qp_attr.qp_state = IB_QPS_INIT;
+       qp_attr.pkey_index = 0;
+       qp_attr.port_num = lnk->ibport;
+       qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE
+                               | IB_ACCESS_REMOTE_WRITE;
+       return ib_modify_qp(lnk->roce_qp, &qp_attr,
+                           IB_QP_STATE | IB_QP_PKEY_INDEX |
+                           IB_QP_ACCESS_FLAGS | IB_QP_PORT);
+}
+
+static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
+{
+       enum ib_qp_attr_mask qp_attr_mask =
+               IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
+               IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
+       struct ib_qp_attr qp_attr;
+
+       memset(&qp_attr, 0, sizeof(qp_attr));
+       qp_attr.qp_state = IB_QPS_RTR;
+       qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
+       qp_attr.ah_attr.port_num = lnk->ibport;
+       qp_attr.ah_attr.ah_flags = IB_AH_GRH;
+       qp_attr.ah_attr.grh.hop_limit = 1;
+       memcpy(&qp_attr.ah_attr.grh.dgid, lnk->peer_gid,
+              sizeof(lnk->peer_gid));
+       memcpy(&qp_attr.ah_attr.dmac, lnk->peer_mac,
+              sizeof(lnk->peer_mac));
+       qp_attr.dest_qp_num = lnk->peer_qpn;
+       qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */
+       qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming
+                                        * requests
+                                        */
+       qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER;
+
+       return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask);
+}
+
+int smc_ib_modify_qp_rts(struct smc_link *lnk)
+{
+       struct ib_qp_attr qp_attr;
+
+       memset(&qp_attr, 0, sizeof(qp_attr));
+       qp_attr.qp_state = IB_QPS_RTS;
+       qp_attr.timeout = SMC_QP_TIMEOUT;       /* local ack timeout */
+       qp_attr.retry_cnt = SMC_QP_RETRY_CNT;   /* retry count */
+       qp_attr.rnr_retry = SMC_QP_RNR_RETRY;   /* RNR retries, 7=infinite */
+       qp_attr.sq_psn = lnk->psn_initial;      /* starting send packet seq # */
+       qp_attr.max_rd_atomic = 1;      /* # of outstanding RDMA reads and
+                                        * atomic ops allowed
+                                        */
+       return ib_modify_qp(lnk->roce_qp, &qp_attr,
+                           IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
+                           IB_QP_SQ_PSN | IB_QP_RNR_RETRY |
+                           IB_QP_MAX_QP_RD_ATOMIC);
+}
+
+int smc_ib_modify_qp_reset(struct smc_link *lnk)
+{
+       struct ib_qp_attr qp_attr;
+
+       memset(&qp_attr, 0, sizeof(qp_attr));
+       qp_attr.qp_state = IB_QPS_RESET;
+       return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
+}
+
+int smc_ib_ready_link(struct smc_link *lnk)
+{
+       struct smc_link_group *lgr =
+               container_of(lnk, struct smc_link_group, lnk[0]);
+       int rc = 0;
+
+       rc = smc_ib_modify_qp_init(lnk);
+       if (rc)
+               goto out;
+
+       rc = smc_ib_modify_qp_rtr(lnk);
+       if (rc)
+               goto out;
+       smc_wr_remember_qp_attr(lnk);
+       rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
+                             IB_CQ_SOLICITED_MASK);
+       if (rc)
+               goto out;
+       rc = smc_wr_rx_post_init(lnk);
+       if (rc)
+               goto out;
+       smc_wr_remember_qp_attr(lnk);
+
+       if (lgr->role == SMC_SERV) {
+               rc = smc_ib_modify_qp_rts(lnk);
+               if (rc)
+                       goto out;
+               smc_wr_remember_qp_attr(lnk);
+       }
+out:
+       return rc;
+}
+
+/* process context wrapper for might_sleep smc_ib_remember_port_attr */
+static void smc_ib_port_event_work(struct work_struct *work)
+{
+       struct smc_ib_device *smcibdev = container_of(
+               work, struct smc_ib_device, port_event_work);
+       u8 port_idx;
+
+       for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
+               smc_ib_remember_port_attr(smcibdev, port_idx + 1);
+               clear_bit(port_idx, &smcibdev->port_event_mask);
+       }
+}
+
+/* can be called in IRQ context */
+static void smc_ib_global_event_handler(struct ib_event_handler *handler,
+                                       struct ib_event *ibevent)
+{
+       struct smc_ib_device *smcibdev;
+       u8 port_idx;
+
+       smcibdev = container_of(handler, struct smc_ib_device, event_handler);
+       if (!smc_pnet_find_ib(smcibdev->ibdev->name))
+               return;
+
+       switch (ibevent->event) {
+       case IB_EVENT_PORT_ERR:
+               port_idx = ibevent->element.port_num - 1;
+               set_bit(port_idx, &smcibdev->port_event_mask);
+               schedule_work(&smcibdev->port_event_work);
+               /* fall through */
+       case IB_EVENT_DEVICE_FATAL:
+               /* tbd in follow-on patch:
+                * abnormal close of corresponding connections
+                */
+               break;
+       case IB_EVENT_PORT_ACTIVE:
+               port_idx = ibevent->element.port_num - 1;
+               set_bit(port_idx, &smcibdev->port_event_mask);
+               schedule_work(&smcibdev->port_event_work);
+               break;
+       default:
+               break;
+       }
+}
+
+void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
+{
+       ib_dealloc_pd(lnk->roce_pd);
+       lnk->roce_pd = NULL;
+}
+
+int smc_ib_create_protection_domain(struct smc_link *lnk)
+{
+       int rc;
+
+       lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
+       rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
+       if (IS_ERR(lnk->roce_pd))
+               lnk->roce_pd = NULL;
+       return rc;
+}
+
+static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
+{
+       switch (ibevent->event) {
+       case IB_EVENT_DEVICE_FATAL:
+       case IB_EVENT_GID_CHANGE:
+       case IB_EVENT_PORT_ERR:
+       case IB_EVENT_QP_ACCESS_ERR:
+               /* tbd in follow-on patch:
+                * abnormal close of corresponding connections
+                */
+               break;
+       default:
+               break;
+       }
+}
+
+void smc_ib_destroy_queue_pair(struct smc_link *lnk)
+{
+       ib_destroy_qp(lnk->roce_qp);
+       lnk->roce_qp = NULL;
+}
+
+/* create a queue pair within the protection domain for a link */
+int smc_ib_create_queue_pair(struct smc_link *lnk)
+{
+       struct ib_qp_init_attr qp_attr = {
+               .event_handler = smc_ib_qp_event_handler,
+               .qp_context = lnk,
+               .send_cq = lnk->smcibdev->roce_cq_send,
+               .recv_cq = lnk->smcibdev->roce_cq_recv,
+               .srq = NULL,
+               .cap = {
+                       .max_send_wr = SMC_WR_BUF_CNT,
+                               /* include unsolicited rdma_writes as well,
+                                * there are max. 2 RDMA_WRITE per 1 WR_SEND
+                                */
+                       .max_recv_wr = SMC_WR_BUF_CNT * 3,
+                       .max_send_sge = SMC_IB_MAX_SEND_SGE,
+                       .max_recv_sge = 1,
+                       .max_inline_data = SMC_WR_TX_SIZE,
+               },
+               .sq_sig_type = IB_SIGNAL_REQ_WR,
+               .qp_type = IB_QPT_RC,
+       };
+       int rc;
+
+       lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr);
+       rc = PTR_ERR_OR_ZERO(lnk->roce_qp);
+       if (IS_ERR(lnk->roce_qp))
+               lnk->roce_qp = NULL;
+       else
+               smc_wr_remember_qp_attr(lnk);
+       return rc;
+}
+
+/* map a new TX or RX buffer to DMA */
+int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size,
+                  struct smc_buf_desc *buf_slot,
+                  enum dma_data_direction data_direction)
+{
+       int rc = 0;
+
+       if (buf_slot->dma_addr[SMC_SINGLE_LINK])
+               return rc; /* already mapped */
+       buf_slot->dma_addr[SMC_SINGLE_LINK] =
+               ib_dma_map_single(smcibdev->ibdev, buf_slot->cpu_addr,
+                                 buf_size, data_direction);
+       if (ib_dma_mapping_error(smcibdev->ibdev,
+                                buf_slot->dma_addr[SMC_SINGLE_LINK]))
+               rc = -EIO;
+       return rc;
+}
+
+void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int buf_size,
+                     struct smc_buf_desc *buf_slot,
+                     enum dma_data_direction data_direction)
+{
+       if (!buf_slot->dma_addr[SMC_SINGLE_LINK])
+               return; /* already unmapped */
+       ib_dma_unmap_single(smcibdev->ibdev, *buf_slot->dma_addr, buf_size,
+                           data_direction);
+       buf_slot->dma_addr[SMC_SINGLE_LINK] = 0;
+}
+
+static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport)
+{
+       struct net_device *ndev;
+       int rc;
+
+       rc = ib_query_gid(smcibdev->ibdev, ibport, 0,
+                         &smcibdev->gid[ibport - 1], NULL);
+       /* the SMC protocol requires specification of the roce MAC address;
+        * if net_device cannot be determined, it can be derived from gid 0
+        */
+       ndev = smcibdev->ibdev->get_netdev(smcibdev->ibdev, ibport);
+       if (ndev) {
+               memcpy(&smcibdev->mac, ndev->dev_addr, ETH_ALEN);
+       } else if (!rc) {
+               memcpy(&smcibdev->mac[ibport - 1][0],
+                      &smcibdev->gid[ibport - 1].raw[8], 3);
+               memcpy(&smcibdev->mac[ibport - 1][3],
+                      &smcibdev->gid[ibport - 1].raw[13], 3);
+               smcibdev->mac[ibport - 1][0] &= ~0x02;
+       }
+       return rc;
+}
+
+/* Create an identifier unique for this instance of SMC-R.
+ * The MAC-address of the first active registered IB device
+ * plus a random 2-byte number is used to create this identifier.
+ * This name is delivered to the peer during connection initialization.
+ */
+static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
+                                               u8 ibport)
+{
+       memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
+              sizeof(smcibdev->mac[ibport - 1]));
+       get_random_bytes(&local_systemid[0], 2);
+}
+
+bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
+{
+       return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
+}
+
+int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
+{
+       int rc;
+
+       memset(&smcibdev->pattr[ibport - 1], 0,
+              sizeof(smcibdev->pattr[ibport - 1]));
+       rc = ib_query_port(smcibdev->ibdev, ibport,
+                          &smcibdev->pattr[ibport - 1]);
+       if (rc)
+               goto out;
+       rc = smc_ib_fill_gid_and_mac(smcibdev, ibport);
+       if (rc)
+               goto out;
+       if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET,
+                    sizeof(local_systemid)) &&
+           smc_ib_port_active(smcibdev, ibport))
+               /* create unique system identifier */
+               smc_ib_define_local_systemid(smcibdev, ibport);
+out:
+       return rc;
+}
+
+long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
+{
+       struct ib_cq_init_attr cqattr = {
+               .cqe = SMC_WR_MAX_CQE, .comp_vector = 0 };
+       long rc;
+
+       smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
+                                             smc_wr_tx_cq_handler, NULL,
+                                             smcibdev, &cqattr);
+       rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send);
+       if (IS_ERR(smcibdev->roce_cq_send)) {
+               smcibdev->roce_cq_send = NULL;
+               return rc;
+       }
+       smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev,
+                                             smc_wr_rx_cq_handler, NULL,
+                                             smcibdev, &cqattr);
+       rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv);
+       if (IS_ERR(smcibdev->roce_cq_recv)) {
+               smcibdev->roce_cq_recv = NULL;
+               goto err;
+       }
+       INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
+                             smc_ib_global_event_handler);
+       ib_register_event_handler(&smcibdev->event_handler);
+       smc_wr_add_dev(smcibdev);
+       smcibdev->initialized = 1;
+       return rc;
+
+err:
+       ib_destroy_cq(smcibdev->roce_cq_send);
+       return rc;
+}
+
+static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
+{
+       if (!smcibdev->initialized)
+               return;
+       smc_wr_remove_dev(smcibdev);
+       ib_unregister_event_handler(&smcibdev->event_handler);
+       ib_destroy_cq(smcibdev->roce_cq_recv);
+       ib_destroy_cq(smcibdev->roce_cq_send);
+}
+
+static struct ib_client smc_ib_client;
+
+/* callback function for ib_register_client() */
+static void smc_ib_add_dev(struct ib_device *ibdev)
+{
+       struct smc_ib_device *smcibdev;
+
+       if (ibdev->node_type != RDMA_NODE_IB_CA)
+               return;
+
+       smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL);
+       if (!smcibdev)
+               return;
+
+       smcibdev->ibdev = ibdev;
+       INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
+
+       spin_lock(&smc_ib_devices.lock);
+       list_add_tail(&smcibdev->list, &smc_ib_devices.list);
+       spin_unlock(&smc_ib_devices.lock);
+       ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
+}
+
+/* callback function for ib_register_client() */
+static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
+{
+       struct smc_ib_device *smcibdev;
+
+       smcibdev = ib_get_client_data(ibdev, &smc_ib_client);
+       ib_set_client_data(ibdev, &smc_ib_client, NULL);
+       spin_lock(&smc_ib_devices.lock);
+       list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
+       spin_unlock(&smc_ib_devices.lock);
+       smc_pnet_remove_by_ibdev(smcibdev);
+       smc_ib_cleanup_per_ibdev(smcibdev);
+       kfree(smcibdev);
+}
+
+static struct ib_client smc_ib_client = {
+       .name   = "smc_ib",
+       .add    = smc_ib_add_dev,
+       .remove = smc_ib_remove_dev,
+};
+
+int __init smc_ib_register_client(void)
+{
+       return ib_register_client(&smc_ib_client);
+}
+
+void smc_ib_unregister_client(void)
+{
+       ib_unregister_client(&smc_ib_client);
+}
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
new file mode 100644 (file)
index 0000000..a95f74b
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ *  Definitions for IB environment
+ *
+ *  Copyright IBM Corp. 2016
+ *
+ *  Author(s):  Ursula Braun <Ursula Braun@linux.vnet.ibm.com>
+ */
+
+#ifndef _SMC_IB_H
+#define _SMC_IB_H
+
+#include <linux/if_ether.h>
+#include <rdma/ib_verbs.h>
+
+#define SMC_MAX_PORTS                  2       /* Max # of ports */
+#define SMC_GID_SIZE                   sizeof(union ib_gid)
+
+#define SMC_IB_MAX_SEND_SGE            2
+
+struct smc_ib_devices {                        /* list of smc ib devices definition */
+       struct list_head        list;
+       spinlock_t              lock;   /* protects list of smc ib devices */
+};
+
+extern struct smc_ib_devices   smc_ib_devices; /* list of smc ib devices */
+
+struct smc_ib_device {                         /* ib-device infos for smc */
+       struct list_head        list;
+       struct ib_device        *ibdev;
+       struct ib_port_attr     pattr[SMC_MAX_PORTS];   /* ib dev. port attrs */
+       struct ib_event_handler event_handler;  /* global ib_event handler */
+       struct ib_cq            *roce_cq_send;  /* send completion queue */
+       struct ib_cq            *roce_cq_recv;  /* recv completion queue */
+       struct tasklet_struct   send_tasklet;   /* called by send cq handler */
+       struct tasklet_struct   recv_tasklet;   /* called by recv cq handler */
+       char                    mac[SMC_MAX_PORTS][ETH_ALEN];
+                                               /* mac address per port*/
+       union ib_gid            gid[SMC_MAX_PORTS]; /* gid per port */
+       u8                      initialized : 1; /* ib dev CQ, evthdl done */
+       struct work_struct      port_event_work;
+       unsigned long           port_event_mask;
+};
+
+struct smc_buf_desc;
+struct smc_link;
+
+int smc_ib_register_client(void) __init;
+void smc_ib_unregister_client(void);
+bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport);
+int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport);
+int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size,
+                  struct smc_buf_desc *buf_slot,
+                  enum dma_data_direction data_direction);
+void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int bufsize,
+                     struct smc_buf_desc *buf_slot,
+                     enum dma_data_direction data_direction);
+void smc_ib_dealloc_protection_domain(struct smc_link *lnk);
+int smc_ib_create_protection_domain(struct smc_link *lnk);
+void smc_ib_destroy_queue_pair(struct smc_link *lnk);
+int smc_ib_create_queue_pair(struct smc_link *lnk);
+int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
+                            struct ib_mr **mr);
+int smc_ib_ready_link(struct smc_link *lnk);
+int smc_ib_modify_qp_rts(struct smc_link *lnk);
+int smc_ib_modify_qp_reset(struct smc_link *lnk);
+long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
+
+
+#endif
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
new file mode 100644 (file)
index 0000000..c2f9165
--- /dev/null
@@ -0,0 +1,158 @@
+/*
+ *  Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ *  Link Layer Control (LLC)
+ *
+ *  For now, we only support the necessary "confirm link" functionality
+ *  which happens for the first RoCE link after successful CLC handshake.
+ *
+ *  Copyright IBM Corp. 2016
+ *
+ *  Author(s):  Klaus Wacker <Klaus.Wacker@de.ibm.com>
+ *              Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#include <net/tcp.h>
+#include <rdma/ib_verbs.h>
+
+#include "smc.h"
+#include "smc_core.h"
+#include "smc_clc.h"
+#include "smc_llc.h"
+
+/********************************** send *************************************/
+
+struct smc_llc_tx_pend {
+};
+
+/* handler for send/transmission completion of an LLC msg */
+static void smc_llc_tx_handler(struct smc_wr_tx_pend_priv *pend,
+                              struct smc_link *link,
+                              enum ib_wc_status wc_status)
+{
+       /* future work: handle wc_status error for recovery and failover */
+}
+
+/**
+ * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits
+ * @link: Pointer to SMC link used for sending LLC control message.
+ * @wr_buf: Out variable returning pointer to work request payload buffer.
+ * @pend: Out variable returning pointer to private pending WR tracking.
+ *       It's the context the transmit complete handler will get.
+ *
+ * Reserves and pre-fills an entry for a pending work request send/tx.
+ * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx.
+ * Can sleep due to smc_get_ctrl_buf (if not in softirq context).
+ *
+ * Return: 0 on success, otherwise an error value.
+ */
+static int smc_llc_add_pending_send(struct smc_link *link,
+                                   struct smc_wr_buf **wr_buf,
+                                   struct smc_wr_tx_pend_priv **pend)
+{
+       int rc;
+
+       rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, pend);
+       if (rc < 0)
+               return rc;
+       BUILD_BUG_ON_MSG(
+               sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE,
+               "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)");
+       BUILD_BUG_ON_MSG(
+               sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE,
+               "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
+       BUILD_BUG_ON_MSG(
+               sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
+               "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)");
+       return 0;
+}
+
+/* high-level API to send LLC confirm link */
+int smc_llc_send_confirm_link(struct smc_link *link, u8 mac[],
+                             union ib_gid *gid,
+                             enum smc_llc_reqresp reqresp)
+{
+       struct smc_link_group *lgr = container_of(link, struct smc_link_group,
+                                                 lnk[SMC_SINGLE_LINK]);
+       struct smc_llc_msg_confirm_link *confllc;
+       struct smc_wr_tx_pend_priv *pend;
+       struct smc_wr_buf *wr_buf;
+       int rc;
+
+       rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
+       if (rc)
+               return rc;
+       confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
+       memset(confllc, 0, sizeof(*confllc));
+       confllc->hd.common.type = SMC_LLC_CONFIRM_LINK;
+       confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link);
+       if (reqresp == SMC_LLC_RESP)
+               confllc->hd.flags |= SMC_LLC_FLAG_RESP;
+       memcpy(confllc->sender_mac, mac, ETH_ALEN);
+       memcpy(confllc->sender_gid, gid, SMC_GID_SIZE);
+       hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
+       /* confllc->link_num = SMC_SINGLE_LINK; already done by memset above */
+       memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE);
+       confllc->max_links = SMC_LINKS_PER_LGR_MAX;
+       /* send llc message */
+       rc = smc_wr_tx_send(link, pend);
+       return rc;
+}
+
+/********************************* receive ***********************************/
+
+static void smc_llc_rx_confirm_link(struct smc_link *link,
+                                   struct smc_llc_msg_confirm_link *llc)
+{
+       struct smc_link_group *lgr;
+
+       lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]);
+       if (llc->hd.flags & SMC_LLC_FLAG_RESP) {
+               if (lgr->role == SMC_SERV)
+                       complete(&link->llc_confirm_resp);
+       } else {
+               if (lgr->role == SMC_CLNT) {
+                       link->link_id = llc->link_num;
+                       complete(&link->llc_confirm);
+               }
+       }
+}
+
+static void smc_llc_rx_handler(struct ib_wc *wc, void *buf)
+{
+       struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
+       union smc_llc_msg *llc = buf;
+
+       if (wc->byte_len < sizeof(*llc))
+               return; /* short message */
+       if (llc->raw.hdr.length != sizeof(*llc))
+               return; /* invalid message */
+       if (llc->raw.hdr.common.type == SMC_LLC_CONFIRM_LINK)
+               smc_llc_rx_confirm_link(link, &llc->confirm_link);
+}
+
+/***************************** init, exit, misc ******************************/
+
+static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
+       {
+               .handler        = smc_llc_rx_handler,
+               .type           = SMC_LLC_CONFIRM_LINK
+       },
+       {
+               .handler        = NULL,
+       }
+};
+
+int __init smc_llc_init(void)
+{
+       struct smc_wr_rx_handler *handler;
+       int rc = 0;
+
+       for (handler = smc_llc_rx_handlers; handler->handler; handler++) {
+               INIT_HLIST_NODE(&handler->list);
+               rc = smc_wr_rx_register_handler(handler);
+               if (rc)
+                       break;
+       }
+       return rc;
+}
diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h
new file mode 100644 (file)
index 0000000..b472f85
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ *  Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ *  Definitions for LLC (link layer control) message handling
+ *
+ *  Copyright IBM Corp. 2016
+ *
+ *  Author(s):  Klaus Wacker <Klaus.Wacker@de.ibm.com>
+ *              Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#ifndef SMC_LLC_H
+#define SMC_LLC_H
+
+#include "smc_wr.h"
+
+#define SMC_LLC_FLAG_RESP              0x80
+
+#define SMC_LLC_WAIT_FIRST_TIME                (5 * HZ)
+
+enum smc_llc_reqresp {
+       SMC_LLC_REQ,
+       SMC_LLC_RESP
+};
+
+enum smc_llc_msg_type {
+       SMC_LLC_CONFIRM_LINK            = 0x01,
+};
+
+#define SMC_LLC_DATA_LEN               40
+
+struct smc_llc_hdr {
+       struct smc_wr_rx_hdr common;
+       u8 length;      /* 44 */
+       u8 reserved;
+       u8 flags;
+};
+
+struct smc_llc_msg_confirm_link {      /* type 0x01 */
+       struct smc_llc_hdr hd;
+       u8 sender_mac[ETH_ALEN];
+       u8 sender_gid[SMC_GID_SIZE];
+       u8 sender_qp_num[3];
+       u8 link_num;
+       u8 link_uid[SMC_LGR_ID_SIZE];
+       u8 max_links;
+       u8 reserved[9];
+};
+
+union smc_llc_msg {
+       struct smc_llc_msg_confirm_link confirm_link;
+       struct {
+               struct smc_llc_hdr hdr;
+               u8 data[SMC_LLC_DATA_LEN];
+       } raw;
+};
+
+/* transmit */
+int smc_llc_send_confirm_link(struct smc_link *lnk, u8 mac[], union ib_gid *gid,
+                             enum smc_llc_reqresp reqresp);
+int smc_llc_init(void) __init;
+
+#endif /* SMC_LLC_H */
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
new file mode 100644 (file)
index 0000000..9d3e7fb
--- /dev/null
@@ -0,0 +1,534 @@
+/*
+ *  Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ *  Generic netlink support functions to configure an SMC-R PNET table
+ *
+ *  Copyright IBM Corp. 2016
+ *
+ *  Author(s):  Thomas Richter <tmricht@linux.vnet.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/if.h>
+#include <uapi/linux/smc.h>
+
+#include <rdma/ib_verbs.h>
+
+#include "smc_pnet.h"
+#include "smc_ib.h"
+
+#define SMC_MAX_PNET_ID_LEN    16      /* Max. length of PNET id */
+
+static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = {
+       [SMC_PNETID_NAME] = {
+               .type = NLA_NUL_STRING,
+               .len = SMC_MAX_PNET_ID_LEN - 1
+       },
+       [SMC_PNETID_ETHNAME] = {
+               .type = NLA_NUL_STRING,
+               .len = IFNAMSIZ - 1
+       },
+       [SMC_PNETID_IBNAME] = {
+               .type = NLA_NUL_STRING,
+               .len = IB_DEVICE_NAME_MAX - 1
+       },
+       [SMC_PNETID_IBPORT] = { .type = NLA_U8 }
+};
+
+static struct genl_family smc_pnet_nl_family;
+
+/**
+ * struct smc_pnettable - SMC PNET table anchor
+ * @lock: Lock for list action
+ * @pnetlist: List of PNETIDs
+ */
+static struct smc_pnettable {
+       rwlock_t lock;
+       struct list_head pnetlist;
+} smc_pnettable = {
+       .pnetlist = LIST_HEAD_INIT(smc_pnettable.pnetlist),
+       .lock = __RW_LOCK_UNLOCKED(smc_pnettable.lock)
+};
+
+/**
+ * struct smc_pnetentry - pnet identifier name entry
+ * @list: List node.
+ * @pnet_name: Pnet identifier name
+ * @ndev: pointer to network device.
+ * @smcibdev: Pointer to IB device.
+ */
+struct smc_pnetentry {
+       struct list_head list;
+       char pnet_name[SMC_MAX_PNET_ID_LEN + 1];
+       struct net_device *ndev;
+       struct smc_ib_device *smcibdev;
+       u8 ib_port;
+};
+
+/* Check if two RDMA device entries are identical. Use device name and port
+ * number for comparison.
+ */
+static bool smc_pnet_same_ibname(struct smc_pnetentry *pnetelem, char *ibname,
+                                u8 ibport)
+{
+       return pnetelem->ib_port == ibport &&
+              !strncmp(pnetelem->smcibdev->ibdev->name, ibname,
+                       sizeof(pnetelem->smcibdev->ibdev->name));
+}
+
+/* Find a pnetid in the pnet table.
+ */
+static struct smc_pnetentry *smc_pnet_find_pnetid(char *pnet_name)
+{
+       struct smc_pnetentry *pnetelem, *found_pnetelem = NULL;
+
+       read_lock(&smc_pnettable.lock);
+       list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) {
+               if (!strncmp(pnetelem->pnet_name, pnet_name,
+                            sizeof(pnetelem->pnet_name))) {
+                       found_pnetelem = pnetelem;
+                       break;
+               }
+       }
+       read_unlock(&smc_pnettable.lock);
+       return found_pnetelem;
+}
+
+/* Remove a pnetid from the pnet table.
+ */
+static int smc_pnet_remove_by_pnetid(char *pnet_name)
+{
+       struct smc_pnetentry *pnetelem, *tmp_pe;
+       int rc = -ENOENT;
+
+       write_lock(&smc_pnettable.lock);
+       list_for_each_entry_safe(pnetelem, tmp_pe, &smc_pnettable.pnetlist,
+                                list) {
+               if (!strncmp(pnetelem->pnet_name, pnet_name,
+                            sizeof(pnetelem->pnet_name))) {
+                       list_del(&pnetelem->list);
+                       dev_put(pnetelem->ndev);
+                       kfree(pnetelem);
+                       rc = 0;
+                       break;
+               }
+       }
+       write_unlock(&smc_pnettable.lock);
+       return rc;
+}
+
+/* Remove a pnet entry mentioning a given network device from the pnet table.
+ */
+static int smc_pnet_remove_by_ndev(struct net_device *ndev)
+{
+       struct smc_pnetentry *pnetelem, *tmp_pe;
+       int rc = -ENOENT;
+
+       write_lock(&smc_pnettable.lock);
+       list_for_each_entry_safe(pnetelem, tmp_pe, &smc_pnettable.pnetlist,
+                                list) {
+               if (pnetelem->ndev == ndev) {
+                       list_del(&pnetelem->list);
+                       dev_put(pnetelem->ndev);
+                       kfree(pnetelem);
+                       rc = 0;
+                       break;
+               }
+       }
+       write_unlock(&smc_pnettable.lock);
+       return rc;
+}
+
+/* Remove a pnet entry mentioning a given ib device from the pnet table.
+ */
+int smc_pnet_remove_by_ibdev(struct smc_ib_device *ibdev)
+{
+       struct smc_pnetentry *pnetelem, *tmp_pe;
+       int rc = -ENOENT;
+
+       write_lock(&smc_pnettable.lock);
+       list_for_each_entry_safe(pnetelem, tmp_pe, &smc_pnettable.pnetlist,
+                                list) {
+               if (pnetelem->smcibdev == ibdev) {
+                       list_del(&pnetelem->list);
+                       dev_put(pnetelem->ndev);
+                       kfree(pnetelem);
+                       rc = 0;
+                       break;
+               }
+       }
+       write_unlock(&smc_pnettable.lock);
+       return rc;
+}
+
+/* Append a pnetid to the end of the pnet table if not already on this list.
+ */
+static int smc_pnet_enter(struct smc_pnetentry *new_pnetelem)
+{
+       struct smc_pnetentry *pnetelem;
+       int rc = -EEXIST;
+
+       write_lock(&smc_pnettable.lock);
+       list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) {
+               if (!strncmp(pnetelem->pnet_name, new_pnetelem->pnet_name,
+                            sizeof(new_pnetelem->pnet_name)) ||
+                   !strncmp(pnetelem->ndev->name, new_pnetelem->ndev->name,
+                            sizeof(new_pnetelem->ndev->name)) ||
+                   smc_pnet_same_ibname(pnetelem,
+                                        new_pnetelem->smcibdev->ibdev->name,
+                                        new_pnetelem->ib_port))
+                       goto found;
+       }
+       list_add_tail(&new_pnetelem->list, &smc_pnettable.pnetlist);
+       rc = 0;
+found:
+       write_unlock(&smc_pnettable.lock);
+       return rc;
+}
+
+/* The limit for pnetid is 16 characters.
+ * Valid characters should be (single-byte character set) a-z, A-Z, 0-9.
+ * Lower case letters are converted to upper case.
+ * Interior blanks should not be used.
+ */
+static bool smc_pnetid_valid(const char *pnet_name, char *pnetid)
+{
+       char *bf = skip_spaces(pnet_name);
+       size_t len = strlen(bf);
+       char *end = bf + len;
+
+       if (!len)
+               return false;
+       while (--end >= bf && isspace(*end))
+               ;
+       if (end - bf >= SMC_MAX_PNET_ID_LEN)
+               return false;
+       while (bf <= end) {
+               if (!isalnum(*bf))
+                       return false;
+               *pnetid++ = islower(*bf) ? toupper(*bf) : *bf;
+               bf++;
+       }
+       *pnetid = '\0';
+       return true;
+}
+
+/* Find an infiniband device by a given name. The device might not exist. */
+struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
+{
+       struct smc_ib_device *ibdev;
+
+       spin_lock(&smc_ib_devices.lock);
+       list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
+               if (!strncmp(ibdev->ibdev->name, ib_name,
+                            sizeof(ibdev->ibdev->name))) {
+                       goto out;
+               }
+       }
+       ibdev = NULL;
+out:
+       spin_unlock(&smc_ib_devices.lock);
+       return ibdev;
+}
+
+/* Parse the supplied netlink attributes and fill a pnetentry structure.
+ * For ethernet and infiniband device names verify that the devices exist.
+ */
+static int smc_pnet_fill_entry(struct net *net, struct smc_pnetentry *pnetelem,
+                              struct nlattr *tb[])
+{
+       char *string, *ibname = NULL;
+       int rc = 0;
+
+       memset(pnetelem, 0, sizeof(*pnetelem));
+       INIT_LIST_HEAD(&pnetelem->list);
+       if (tb[SMC_PNETID_NAME]) {
+               string = (char *)nla_data(tb[SMC_PNETID_NAME]);
+               if (!smc_pnetid_valid(string, pnetelem->pnet_name)) {
+                       rc = -EINVAL;
+                       goto error;
+               }
+       }
+       if (tb[SMC_PNETID_ETHNAME]) {
+               string = (char *)nla_data(tb[SMC_PNETID_ETHNAME]);
+               pnetelem->ndev = dev_get_by_name(net, string);
+               if (!pnetelem->ndev)
+                       return -ENOENT;
+       }
+       if (tb[SMC_PNETID_IBNAME]) {
+               ibname = (char *)nla_data(tb[SMC_PNETID_IBNAME]);
+               ibname = strim(ibname);
+               pnetelem->smcibdev = smc_pnet_find_ib(ibname);
+               if (!pnetelem->smcibdev) {
+                       rc = -ENOENT;
+                       goto error;
+               }
+       }
+       if (tb[SMC_PNETID_IBPORT]) {
+               pnetelem->ib_port = nla_get_u8(tb[SMC_PNETID_IBPORT]);
+               if (pnetelem->ib_port > SMC_MAX_PORTS) {
+                       rc = -EINVAL;
+                       goto error;
+               }
+       }
+       return 0;
+
+error:
+       if (pnetelem->ndev)
+               dev_put(pnetelem->ndev);
+       return rc;
+}
+
+/* Convert an smc_pnetentry to a netlink attribute sequence */
+static int smc_pnet_set_nla(struct sk_buff *msg, struct smc_pnetentry *pnetelem)
+{
+       if (nla_put_string(msg, SMC_PNETID_NAME, pnetelem->pnet_name) ||
+           nla_put_string(msg, SMC_PNETID_ETHNAME, pnetelem->ndev->name) ||
+           nla_put_string(msg, SMC_PNETID_IBNAME,
+                          pnetelem->smcibdev->ibdev->name) ||
+           nla_put_u8(msg, SMC_PNETID_IBPORT, pnetelem->ib_port))
+               return -1;
+       return 0;
+}
+
+/* Retrieve one PNETID entry */
+static int smc_pnet_get(struct sk_buff *skb, struct genl_info *info)
+{
+       struct smc_pnetentry *pnetelem;
+       struct sk_buff *msg;
+       void *hdr;
+       int rc;
+
+       pnetelem = smc_pnet_find_pnetid(
+                               (char *)nla_data(info->attrs[SMC_PNETID_NAME]));
+       if (!pnetelem)
+               return -ENOENT;
+       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       if (!msg)
+               return -ENOMEM;
+
+       hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
+                         &smc_pnet_nl_family, 0, SMC_PNETID_GET);
+       if (!hdr) {
+               rc = -EMSGSIZE;
+               goto err_out;
+       }
+
+       if (smc_pnet_set_nla(msg, pnetelem)) {
+               rc = -ENOBUFS;
+               goto err_out;
+       }
+
+       genlmsg_end(msg, hdr);
+       return genlmsg_reply(msg, info);
+
+err_out:
+       nlmsg_free(msg);
+       return rc;
+}
+
+static int smc_pnet_add(struct sk_buff *skb, struct genl_info *info)
+{
+       struct net *net = genl_info_net(info);
+       struct smc_pnetentry *pnetelem;
+       int rc;
+
+       pnetelem = kzalloc(sizeof(*pnetelem), GFP_KERNEL);
+       if (!pnetelem)
+               return -ENOMEM;
+       rc = smc_pnet_fill_entry(net, pnetelem, info->attrs);
+       if (!rc)
+               rc = smc_pnet_enter(pnetelem);
+       if (rc) {
+               kfree(pnetelem);
+               return rc;
+       }
+       rc = smc_ib_remember_port_attr(pnetelem->smcibdev, pnetelem->ib_port);
+       if (rc)
+               smc_pnet_remove_by_pnetid(pnetelem->pnet_name);
+       return rc;
+}
+
+static int smc_pnet_del(struct sk_buff *skb, struct genl_info *info)
+{
+       return smc_pnet_remove_by_pnetid(
+                               (char *)nla_data(info->attrs[SMC_PNETID_NAME]));
+}
+
+static int smc_pnet_dump_start(struct netlink_callback *cb)
+{
+       cb->args[0] = 0;
+       return 0;
+}
+
+static int smc_pnet_dumpinfo(struct sk_buff *skb,
+                            u32 portid, u32 seq, u32 flags,
+                            struct smc_pnetentry *pnetelem)
+{
+       void *hdr;
+
+       hdr = genlmsg_put(skb, portid, seq, &smc_pnet_nl_family,
+                         flags, SMC_PNETID_GET);
+       if (!hdr)
+               return -ENOMEM;
+       if (smc_pnet_set_nla(skb, pnetelem) < 0) {
+               genlmsg_cancel(skb, hdr);
+               return -EMSGSIZE;
+       }
+       genlmsg_end(skb, hdr);
+       return 0;
+}
+
+static int smc_pnet_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct smc_pnetentry *pnetelem;
+       int idx = 0;
+
+       read_lock(&smc_pnettable.lock);
+       list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) {
+               if (idx++ < cb->args[0])
+                       continue;
+               if (smc_pnet_dumpinfo(skb, NETLINK_CB(cb->skb).portid,
+                                     cb->nlh->nlmsg_seq, NLM_F_MULTI,
+                                     pnetelem)) {
+                       --idx;
+                       break;
+               }
+       }
+       cb->args[0] = idx;
+       read_unlock(&smc_pnettable.lock);
+       return skb->len;
+}
+
+/* Remove and delete all pnetids from pnet table.
+ */
+static int smc_pnet_flush(struct sk_buff *skb, struct genl_info *info)
+{
+       struct smc_pnetentry *pnetelem, *tmp_pe;
+
+       write_lock(&smc_pnettable.lock);
+       list_for_each_entry_safe(pnetelem, tmp_pe, &smc_pnettable.pnetlist,
+                                list) {
+               list_del(&pnetelem->list);
+               dev_put(pnetelem->ndev);
+               kfree(pnetelem);
+       }
+       write_unlock(&smc_pnettable.lock);
+       return 0;
+}
+
+/* SMC_PNETID generic netlink operation definition */
+static const struct genl_ops smc_pnet_ops[] = {
+       {
+               .cmd = SMC_PNETID_GET,
+               .flags = GENL_ADMIN_PERM,
+               .policy = smc_pnet_policy,
+               .doit = smc_pnet_get,
+               .dumpit = smc_pnet_dump,
+               .start = smc_pnet_dump_start
+       },
+       {
+               .cmd = SMC_PNETID_ADD,
+               .flags = GENL_ADMIN_PERM,
+               .policy = smc_pnet_policy,
+               .doit = smc_pnet_add
+       },
+       {
+               .cmd = SMC_PNETID_DEL,
+               .flags = GENL_ADMIN_PERM,
+               .policy = smc_pnet_policy,
+               .doit = smc_pnet_del
+       },
+       {
+               .cmd = SMC_PNETID_FLUSH,
+               .flags = GENL_ADMIN_PERM,
+               .policy = smc_pnet_policy,
+               .doit = smc_pnet_flush
+       }
+};
+
+/* SMC_PNETID family definition */
+static struct genl_family smc_pnet_nl_family = {
+       .hdrsize = 0,
+       .name = SMCR_GENL_FAMILY_NAME,
+       .version = SMCR_GENL_FAMILY_VERSION,
+       .maxattr = SMC_PNETID_MAX,
+       .netnsok = true,
+       .module = THIS_MODULE,
+       .ops = smc_pnet_ops,
+       .n_ops =  ARRAY_SIZE(smc_pnet_ops)
+};
+
+static int smc_pnet_netdev_event(struct notifier_block *this,
+                                unsigned long event, void *ptr)
+{
+       struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+
+       switch (event) {
+       case NETDEV_REBOOT:
+       case NETDEV_UNREGISTER:
+               smc_pnet_remove_by_ndev(event_dev);
+       default:
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block smc_netdev_notifier = {
+       .notifier_call = smc_pnet_netdev_event
+};
+
+int __init smc_pnet_init(void)
+{
+       int rc;
+
+       rc = genl_register_family(&smc_pnet_nl_family);
+       if (rc)
+               return rc;
+       rc = register_netdevice_notifier(&smc_netdev_notifier);
+       if (rc)
+               genl_unregister_family(&smc_pnet_nl_family);
+       return rc;
+}
+
+void smc_pnet_exit(void)
+{
+       smc_pnet_flush(NULL, NULL);
+       unregister_netdevice_notifier(&smc_netdev_notifier);
+       genl_unregister_family(&smc_pnet_nl_family);
+}
+
+/* PNET table analysis for a given sock:
+ * determine ib_device and port belonging to used internal TCP socket
+ * ethernet interface.
+ */
+void smc_pnet_find_roce_resource(struct sock *sk,
+                                struct smc_ib_device **smcibdev, u8 *ibport)
+{
+       struct dst_entry *dst = sk_dst_get(sk);
+       struct smc_pnetentry *pnetelem;
+
+       *smcibdev = NULL;
+       *ibport = 0;
+
+       if (!dst)
+               return;
+       if (!dst->dev)
+               goto out_rel;
+       read_lock(&smc_pnettable.lock);
+       list_for_each_entry(pnetelem, &smc_pnettable.pnetlist, list) {
+               if (dst->dev == pnetelem->ndev) {
+                       *smcibdev = pnetelem->smcibdev;
+                       *ibport = pnetelem->ib_port;
+                       break;
+               }
+       }
+       read_unlock(&smc_pnettable.lock);
+out_rel:
+       dst_release(dst);
+}
diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h
new file mode 100644 (file)
index 0000000..32ab3df
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ *  PNET table queries
+ *
+ *  Copyright IBM Corp. 2016
+ *
+ *  Author(s):  Thomas Richter <tmricht@linux.vnet.ibm.com>
+ */
+
+#ifndef _SMC_PNET_H
+#define _SMC_PNET_H
+
+struct smc_ib_device;
+
+int smc_pnet_init(void) __init;
+void smc_pnet_exit(void);
+int smc_pnet_remove_by_ibdev(struct smc_ib_device *ibdev);
+struct smc_ib_device *smc_pnet_find_ib(char *ib_name);
+void smc_pnet_find_roce_resource(struct sock *sk,
+                                struct smc_ib_device **smcibdev, u8 *ibport);
+
+#endif
diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
new file mode 100644 (file)
index 0000000..5d18787
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Manage RMBE
+ * copy new RMBE data into user space
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#include <linux/net.h>
+#include <linux/rcupdate.h>
+#include <net/sock.h>
+
+#include "smc.h"
+#include "smc_core.h"
+#include "smc_cdc.h"
+#include "smc_tx.h" /* smc_tx_consumer_update() */
+#include "smc_rx.h"
+
+/* callback implementation for sk.sk_data_ready()
+ * to wakeup rcvbuf consumers that blocked with smc_rx_wait_data().
+ * indirectly called by smc_cdc_msg_recv_action().
+ */
+static void smc_rx_data_ready(struct sock *sk)
+{
+       struct socket_wq *wq;
+
+       /* derived from sock_def_readable() */
+       /* called already in smc_listen_work() */
+       rcu_read_lock();
+       wq = rcu_dereference(sk->sk_wq);
+       if (skwq_has_sleeper(wq))
+               wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI |
+                                               POLLRDNORM | POLLRDBAND);
+       if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
+           (sk->sk_state == SMC_CLOSED))
+               sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
+       else
+               sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+       rcu_read_unlock();
+}
+
+/* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted
+ *   @smc    smc socket
+ *   @timeo  pointer to max seconds to wait, pointer to value 0 for no timeout
+ * Returns:
+ * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown.
+ * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted).
+ */
+static int smc_rx_wait_data(struct smc_sock *smc, long *timeo)
+{
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
+       struct smc_connection *conn = &smc->conn;
+       struct sock *sk = &smc->sk;
+       int rc;
+
+       if (atomic_read(&conn->bytes_to_rcv))
+               return 1;
+       sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+       add_wait_queue(sk_sleep(sk), &wait);
+       rc = sk_wait_event(sk, timeo,
+                          sk->sk_err ||
+                          sk->sk_shutdown & RCV_SHUTDOWN ||
+                          sock_flag(sk, SOCK_DONE) ||
+                          atomic_read(&conn->bytes_to_rcv) ||
+                          smc_cdc_rxed_any_close_or_senddone(conn),
+                          &wait);
+       remove_wait_queue(sk_sleep(sk), &wait);
+       sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+       return rc;
+}
+
+/* rcvbuf consumer: main API called by socket layer.
+ * called under sk lock.
+ */
+int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
+                  int flags)
+{
+       size_t copylen, read_done = 0, read_remaining = len;
+       size_t chunk_len, chunk_off, chunk_len_sum;
+       struct smc_connection *conn = &smc->conn;
+       union smc_host_cursor cons;
+       int readable, chunk;
+       char *rcvbuf_base;
+       struct sock *sk;
+       long timeo;
+       int target;             /* Read at least these many bytes */
+       int rc;
+
+       if (unlikely(flags & MSG_ERRQUEUE))
+               return -EINVAL; /* future work for sk.sk_family == AF_SMC */
+       if (flags & MSG_OOB)
+               return -EINVAL; /* future work */
+
+       sk = &smc->sk;
+       if (sk->sk_state == SMC_LISTEN)
+               return -ENOTCONN;
+       timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+       target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+
+       msg->msg_namelen = 0;
+       /* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */
+       rcvbuf_base = conn->rmb_desc->cpu_addr;
+
+       do { /* while (read_remaining) */
+               if (read_done >= target)
+                       break;
+
+               if (atomic_read(&conn->bytes_to_rcv))
+                       goto copy;
+
+               if (read_done) {
+                       if (sk->sk_err ||
+                           sk->sk_state == SMC_CLOSED ||
+                           (sk->sk_shutdown & RCV_SHUTDOWN) ||
+                           !timeo ||
+                           signal_pending(current) ||
+                           smc_cdc_rxed_any_close_or_senddone(conn) ||
+                           conn->local_tx_ctrl.conn_state_flags.
+                           peer_conn_abort)
+                               break;
+               } else {
+                       if (sock_flag(sk, SOCK_DONE))
+                               break;
+                       if (sk->sk_err) {
+                               read_done = sock_error(sk);
+                               break;
+                       }
+                       if (sk->sk_shutdown & RCV_SHUTDOWN ||
+                           smc_cdc_rxed_any_close_or_senddone(conn) ||
+                           conn->local_tx_ctrl.conn_state_flags.
+                           peer_conn_abort)
+                               break;
+                       if (sk->sk_state == SMC_CLOSED) {
+                               if (!sock_flag(sk, SOCK_DONE)) {
+                                       /* This occurs when user tries to read
+                                        * from never connected socket.
+                                        */
+                                       read_done = -ENOTCONN;
+                                       break;
+                               }
+                               break;
+                       }
+                       if (signal_pending(current)) {
+                               read_done = sock_intr_errno(timeo);
+                               break;
+                       }
+               }
+
+               if (!atomic_read(&conn->bytes_to_rcv)) {
+                       smc_rx_wait_data(smc, &timeo);
+                       continue;
+               }
+
+copy:
+               /* initialize variables for 1st iteration of subsequent loop */
+               /* could be just 1 byte, even after smc_rx_wait_data above */
+               readable = atomic_read(&conn->bytes_to_rcv);
+               /* not more than what user space asked for */
+               copylen = min_t(size_t, read_remaining, readable);
+               smc_curs_write(&cons,
+                              smc_curs_read(&conn->local_tx_ctrl.cons, conn),
+                              conn);
+               /* determine chunks where to read from rcvbuf */
+               /* either unwrapped case, or 1st chunk of wrapped case */
+               chunk_len = min_t(size_t,
+                                 copylen, conn->rmbe_size - cons.count);
+               chunk_len_sum = chunk_len;
+               chunk_off = cons.count;
+               for (chunk = 0; chunk < 2; chunk++) {
+                       if (!(flags & MSG_TRUNC)) {
+                               rc = memcpy_to_msg(msg, rcvbuf_base + chunk_off,
+                                                  chunk_len);
+                               if (rc) {
+                                       if (!read_done)
+                                               read_done = -EFAULT;
+                                       goto out;
+                               }
+                       }
+                       read_remaining -= chunk_len;
+                       read_done += chunk_len;
+
+                       if (chunk_len_sum == copylen)
+                               break; /* either on 1st or 2nd iteration */
+                       /* prepare next (== 2nd) iteration */
+                       chunk_len = copylen - chunk_len; /* remainder */
+                       chunk_len_sum += chunk_len;
+                       chunk_off = 0; /* modulo offset in recv ring buffer */
+               }
+
+               /* update cursors */
+               if (!(flags & MSG_PEEK)) {
+                       smc_curs_add(conn->rmbe_size, &cons, copylen);
+                       /* increased in recv tasklet smc_cdc_msg_rcv() */
+                       smp_mb__before_atomic();
+                       atomic_sub(copylen, &conn->bytes_to_rcv);
+                       /* guarantee 0 <= bytes_to_rcv <= rmbe_size */
+                       smp_mb__after_atomic();
+                       smc_curs_write(&conn->local_tx_ctrl.cons,
+                                      smc_curs_read(&cons, conn),
+                                      conn);
+                       /* send consumer cursor update if required */
+                       /* similar to advertising new TCP rcv_wnd if required */
+                       smc_tx_consumer_update(conn);
+               }
+       } while (read_remaining);
+out:
+       return read_done;
+}
+
+/* Initialize receive properties on connection establishment. NB: not __init! */
+void smc_rx_init(struct smc_sock *smc)
+{
+       smc->sk.sk_data_ready = smc_rx_data_ready;
+}
diff --git a/net/smc/smc_rx.h b/net/smc/smc_rx.h
new file mode 100644 (file)
index 0000000..b5b80e1
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Manage RMBE
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#ifndef SMC_RX_H
+#define SMC_RX_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+#include "smc.h"
+
+void smc_rx_init(struct smc_sock *smc);
+int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, size_t len,
+                  int flags);
+
+#endif /* SMC_RX_H */
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
new file mode 100644 (file)
index 0000000..6e73b28
--- /dev/null
@@ -0,0 +1,483 @@
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Manage send buffer.
+ * Producer:
+ * Copy user space data into send buffer, if send buffer space available.
+ * Consumer:
+ * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available.
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#include <linux/net.h>
+#include <linux/rcupdate.h>
+#include <linux/workqueue.h>
+#include <net/sock.h>
+
+#include "smc.h"
+#include "smc_wr.h"
+#include "smc_cdc.h"
+#include "smc_tx.h"
+
+/***************************** sndbuf producer *******************************/
+
+/* callback implementation for sk.sk_write_space()
+ * to wakeup sndbuf producers that blocked with smc_tx_wait_memory().
+ * called under sk_socket lock.
+ */
+static void smc_tx_write_space(struct sock *sk)
+{
+       struct socket *sock = sk->sk_socket;
+       struct smc_sock *smc = smc_sk(sk);
+       struct socket_wq *wq;
+
+       /* similar to sk_stream_write_space */
+       if (atomic_read(&smc->conn.sndbuf_space) && sock) {
+               clear_bit(SOCK_NOSPACE, &sock->flags);
+               rcu_read_lock();
+               wq = rcu_dereference(sk->sk_wq);
+               if (skwq_has_sleeper(wq))
+                       wake_up_interruptible_poll(&wq->wait,
+                                                  POLLOUT | POLLWRNORM |
+                                                  POLLWRBAND);
+               if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
+                       sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
+               rcu_read_unlock();
+       }
+}
+
+/* Wakeup sndbuf producers that blocked with smc_tx_wait_memory().
+ * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space().
+ */
+void smc_tx_sndbuf_nonfull(struct smc_sock *smc)
+{
+       if (smc->sk.sk_socket &&
+           test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags))
+               smc->sk.sk_write_space(&smc->sk);
+}
+
+/* blocks sndbuf producer until at least one byte of free space available */
+static int smc_tx_wait_memory(struct smc_sock *smc, int flags)
+{
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
+       struct smc_connection *conn = &smc->conn;
+       struct sock *sk = &smc->sk;
+       bool noblock;
+       long timeo;
+       int rc = 0;
+
+       /* similar to sk_stream_wait_memory */
+       timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+       noblock = timeo ? false : true;
+       add_wait_queue(sk_sleep(sk), &wait);
+       while (1) {
+               sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+               if (sk->sk_err ||
+                   (sk->sk_shutdown & SEND_SHUTDOWN) ||
+                   conn->local_tx_ctrl.conn_state_flags.peer_done_writing) {
+                       rc = -EPIPE;
+                       break;
+               }
+               if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
+                       rc = -ECONNRESET;
+                       break;
+               }
+               if (!timeo) {
+                       if (noblock)
+                               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+                       rc = -EAGAIN;
+                       break;
+               }
+               if (signal_pending(current)) {
+                       rc = sock_intr_errno(timeo);
+                       break;
+               }
+               sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+               if (atomic_read(&conn->sndbuf_space))
+                       break; /* at least 1 byte of free space available */
+               set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+               sk->sk_write_pending++;
+               sk_wait_event(sk, &timeo,
+                             sk->sk_err ||
+                             (sk->sk_shutdown & SEND_SHUTDOWN) ||
+                             smc_cdc_rxed_any_close_or_senddone(conn) ||
+                             atomic_read(&conn->sndbuf_space),
+                             &wait);
+               sk->sk_write_pending--;
+       }
+       remove_wait_queue(sk_sleep(sk), &wait);
+       return rc;
+}
+
+/* sndbuf producer: main API called by socket layer.
+ * called under sock lock.
+ */
+int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
+{
+       size_t copylen, send_done = 0, send_remaining = len;
+       size_t chunk_len, chunk_off, chunk_len_sum;
+       struct smc_connection *conn = &smc->conn;
+       union smc_host_cursor prep;
+       struct sock *sk = &smc->sk;
+       char *sndbuf_base;
+       int tx_cnt_prep;
+       int writespace;
+       int rc, chunk;
+
+       /* This should be in poll */
+       sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
+
+       if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
+               rc = -EPIPE;
+               goto out_err;
+       }
+
+       while (msg_data_left(msg)) {
+               if (sk->sk_state == SMC_INIT)
+                       return -ENOTCONN;
+               if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
+                   (smc->sk.sk_err == ECONNABORTED) ||
+                   conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
+                       return -EPIPE;
+               if (smc_cdc_rxed_any_close(conn))
+                       return send_done ?: -ECONNRESET;
+
+               if (!atomic_read(&conn->sndbuf_space)) {
+                       rc = smc_tx_wait_memory(smc, msg->msg_flags);
+                       if (rc) {
+                               if (send_done)
+                                       return send_done;
+                               goto out_err;
+                       }
+                       continue;
+               }
+
+               /* initialize variables for 1st iteration of subsequent loop */
+               /* could be just 1 byte, even after smc_tx_wait_memory above */
+               writespace = atomic_read(&conn->sndbuf_space);
+               /* not more than what user space asked for */
+               copylen = min_t(size_t, send_remaining, writespace);
+               /* determine start of sndbuf */
+               sndbuf_base = conn->sndbuf_desc->cpu_addr;
+               smc_curs_write(&prep,
+                              smc_curs_read(&conn->tx_curs_prep, conn),
+                              conn);
+               tx_cnt_prep = prep.count;
+               /* determine chunks where to write into sndbuf */
+               /* either unwrapped case, or 1st chunk of wrapped case */
+               chunk_len = min_t(size_t,
+                                 copylen, conn->sndbuf_size - tx_cnt_prep);
+               chunk_len_sum = chunk_len;
+               chunk_off = tx_cnt_prep;
+               for (chunk = 0; chunk < 2; chunk++) {
+                       rc = memcpy_from_msg(sndbuf_base + chunk_off,
+                                            msg, chunk_len);
+                       if (rc) {
+                               if (send_done)
+                                       return send_done;
+                               goto out_err;
+                       }
+                       send_done += chunk_len;
+                       send_remaining -= chunk_len;
+
+                       if (chunk_len_sum == copylen)
+                               break; /* either on 1st or 2nd iteration */
+                       /* prepare next (== 2nd) iteration */
+                       chunk_len = copylen - chunk_len; /* remainder */
+                       chunk_len_sum += chunk_len;
+                       chunk_off = 0; /* modulo offset in send ring buffer */
+               }
+               /* update cursors */
+               smc_curs_add(conn->sndbuf_size, &prep, copylen);
+               smc_curs_write(&conn->tx_curs_prep,
+                              smc_curs_read(&prep, conn),
+                              conn);
+               /* increased in send tasklet smc_cdc_tx_handler() */
+               smp_mb__before_atomic();
+               atomic_sub(copylen, &conn->sndbuf_space);
+               /* guarantee 0 <= sndbuf_space <= sndbuf_size */
+               smp_mb__after_atomic();
+               /* since we just produced more new data into sndbuf,
+                * trigger sndbuf consumer: RDMA write into peer RMBE and CDC
+                */
+               smc_tx_sndbuf_nonempty(conn);
+       } /* while (msg_data_left(msg)) */
+
+       return send_done;
+
+out_err:
+       rc = sk_stream_error(sk, msg->msg_flags, rc);
+       /* make sure we wake any epoll edge trigger waiter */
+       if (unlikely(rc == -EAGAIN))
+               sk->sk_write_space(sk);
+       return rc;
+}
+
+/***************************** sndbuf consumer *******************************/
+
+/* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
+static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
+                            int num_sges, struct ib_sge sges[])
+{
+       struct smc_link_group *lgr = conn->lgr;
+       struct ib_send_wr *failed_wr = NULL;
+       struct ib_rdma_wr rdma_wr;
+       struct smc_link *link;
+       int rc;
+
+       memset(&rdma_wr, 0, sizeof(rdma_wr));
+       link = &lgr->lnk[SMC_SINGLE_LINK];
+       rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link);
+       rdma_wr.wr.sg_list = sges;
+       rdma_wr.wr.num_sge = num_sges;
+       rdma_wr.wr.opcode = IB_WR_RDMA_WRITE;
+       rdma_wr.remote_addr =
+               lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr +
+               /* RMBE within RMB */
+               ((conn->peer_conn_idx - 1) * conn->peer_rmbe_size) +
+               /* offset within RMBE */
+               peer_rmbe_offset;
+       rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey;
+       rc = ib_post_send(link->roce_qp, &rdma_wr.wr, &failed_wr);
+       if (rc)
+               conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
+       return rc;
+}
+
+/* sndbuf consumer */
+static inline void smc_tx_advance_cursors(struct smc_connection *conn,
+                                         union smc_host_cursor *prod,
+                                         union smc_host_cursor *sent,
+                                         size_t len)
+{
+       smc_curs_add(conn->peer_rmbe_size, prod, len);
+       /* increased in recv tasklet smc_cdc_msg_rcv() */
+       smp_mb__before_atomic();
+       /* data in flight reduces usable snd_wnd */
+       atomic_sub(len, &conn->peer_rmbe_space);
+       /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
+       smp_mb__after_atomic();
+       smc_curs_add(conn->sndbuf_size, sent, len);
+}
+
+/* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
+ * usable snd_wnd as max transmit
+ */
+static int smc_tx_rdma_writes(struct smc_connection *conn)
+{
+       size_t src_off, src_len, dst_off, dst_len; /* current chunk values */
+       size_t len, dst_len_sum, src_len_sum, dstchunk, srcchunk;
+       union smc_host_cursor sent, prep, prod, cons;
+       struct ib_sge sges[SMC_IB_MAX_SEND_SGE];
+       struct smc_link_group *lgr = conn->lgr;
+       int to_send, rmbespace;
+       struct smc_link *link;
+       int num_sges;
+       int rc;
+
+       /* source: sndbuf */
+       smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn);
+       smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn);
+       /* cf. wmem_alloc - (snd_max - snd_una) */
+       to_send = smc_curs_diff(conn->sndbuf_size, &sent, &prep);
+       if (to_send <= 0)
+               return 0;
+
+       /* destination: RMBE */
+       /* cf. snd_wnd */
+       rmbespace = atomic_read(&conn->peer_rmbe_space);
+       if (rmbespace <= 0)
+               return 0;
+       smc_curs_write(&prod,
+                      smc_curs_read(&conn->local_tx_ctrl.prod, conn),
+                      conn);
+       smc_curs_write(&cons,
+                      smc_curs_read(&conn->local_rx_ctrl.cons, conn),
+                      conn);
+
+       /* if usable snd_wnd closes ask peer to advertise once it opens again */
+       conn->local_tx_ctrl.prod_flags.write_blocked = (to_send >= rmbespace);
+       /* cf. usable snd_wnd */
+       len = min(to_send, rmbespace);
+
+       /* initialize variables for first iteration of subsequent nested loop */
+       link = &lgr->lnk[SMC_SINGLE_LINK];
+       dst_off = prod.count;
+       if (prod.wrap == cons.wrap) {
+               /* the filled destination area is unwrapped,
+                * hence the available free destination space is wrapped
+                * and we need 2 destination chunks of sum len; start with 1st
+                * which is limited by what's available in sndbuf
+                */
+               dst_len = min_t(size_t,
+                               conn->peer_rmbe_size - prod.count, len);
+       } else {
+               /* the filled destination area is wrapped,
+                * hence the available free destination space is unwrapped
+                * and we need a single destination chunk of entire len
+                */
+               dst_len = len;
+       }
+       dst_len_sum = dst_len;
+       src_off = sent.count;
+       /* dst_len determines the maximum src_len */
+       if (sent.count + dst_len <= conn->sndbuf_size) {
+               /* unwrapped src case: single chunk of entire dst_len */
+               src_len = dst_len;
+       } else {
+               /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
+               src_len = conn->sndbuf_size - sent.count;
+       }
+       src_len_sum = src_len;
+       for (dstchunk = 0; dstchunk < 2; dstchunk++) {
+               num_sges = 0;
+               for (srcchunk = 0; srcchunk < 2; srcchunk++) {
+                       sges[srcchunk].addr =
+                               conn->sndbuf_desc->dma_addr[SMC_SINGLE_LINK] +
+                               src_off;
+                       sges[srcchunk].length = src_len;
+                       sges[srcchunk].lkey = link->roce_pd->local_dma_lkey;
+                       num_sges++;
+                       src_off += src_len;
+                       if (src_off >= conn->sndbuf_size)
+                               src_off -= conn->sndbuf_size;
+                                               /* modulo in send ring */
+                       if (src_len_sum == dst_len)
+                               break; /* either on 1st or 2nd iteration */
+                       /* prepare next (== 2nd) iteration */
+                       src_len = dst_len - src_len; /* remainder */
+                       src_len_sum += src_len;
+               }
+               rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges);
+               if (rc)
+                       return rc;
+               if (dst_len_sum == len)
+                       break; /* either on 1st or 2nd iteration */
+               /* prepare next (== 2nd) iteration */
+               dst_off = 0; /* modulo offset in RMBE ring buffer */
+               dst_len = len - dst_len; /* remainder */
+               dst_len_sum += dst_len;
+               src_len = min_t(int,
+                               dst_len, conn->sndbuf_size - sent.count);
+               src_len_sum = src_len;
+       }
+
+       smc_tx_advance_cursors(conn, &prod, &sent, len);
+       /* update connection's cursors with advanced local cursors */
+       smc_curs_write(&conn->local_tx_ctrl.prod,
+                      smc_curs_read(&prod, conn),
+                      conn);
+                                                       /* dst: peer RMBE */
+       smc_curs_write(&conn->tx_curs_sent,
+                      smc_curs_read(&sent, conn),
+                      conn);
+                                                       /* src: local sndbuf */
+
+       return 0;
+}
+
+/* Wakeup sndbuf consumers from any context (IRQ or process)
+ * since there is more data to transmit; usable snd_wnd as max transmit
+ */
+int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
+{
+       struct smc_cdc_tx_pend *pend;
+       struct smc_wr_buf *wr_buf;
+       int rc;
+
+       spin_lock_bh(&conn->send_lock);
+       rc = smc_cdc_get_free_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], &wr_buf,
+                                  &pend);
+       if (rc < 0) {
+               if (rc == -EBUSY) {
+                       struct smc_sock *smc =
+                               container_of(conn, struct smc_sock, conn);
+
+                       if (smc->sk.sk_err == ECONNABORTED) {
+                               rc = sock_error(&smc->sk);
+                               goto out_unlock;
+                       }
+                       rc = 0;
+                       schedule_work(&conn->tx_work);
+               }
+               goto out_unlock;
+       }
+
+       rc = smc_tx_rdma_writes(conn);
+       if (rc) {
+               smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
+                                  (struct smc_wr_tx_pend_priv *)pend);
+               goto out_unlock;
+       }
+
+       rc = smc_cdc_msg_send(conn, wr_buf, pend);
+
+out_unlock:
+       spin_unlock_bh(&conn->send_lock);
+       return rc;
+}
+
+/* Wakeup sndbuf consumers from process context
+ * since there is more data to transmit
+ */
+static void smc_tx_work(struct work_struct *work)
+{
+       struct smc_connection *conn = container_of(work,
+                                                  struct smc_connection,
+                                                  tx_work);
+       struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+
+       lock_sock(&smc->sk);
+       smc_tx_sndbuf_nonempty(conn);
+       release_sock(&smc->sk);
+}
+
+void smc_tx_consumer_update(struct smc_connection *conn)
+{
+       union smc_host_cursor cfed, cons;
+       struct smc_cdc_tx_pend *pend;
+       struct smc_wr_buf *wr_buf;
+       int to_confirm, rc;
+
+       smc_curs_write(&cons,
+                      smc_curs_read(&conn->local_tx_ctrl.cons, conn),
+                      conn);
+       smc_curs_write(&cfed,
+                      smc_curs_read(&conn->rx_curs_confirmed, conn),
+                      conn);
+       to_confirm = smc_curs_diff(conn->rmbe_size, &cfed, &cons);
+
+       if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
+           ((to_confirm > conn->rmbe_update_limit) &&
+            ((to_confirm > (conn->rmbe_size / 2)) ||
+             conn->local_rx_ctrl.prod_flags.write_blocked))) {
+               rc = smc_cdc_get_free_slot(&conn->lgr->lnk[SMC_SINGLE_LINK],
+                                          &wr_buf, &pend);
+               if (!rc)
+                       rc = smc_cdc_msg_send(conn, wr_buf, pend);
+               if (rc < 0) {
+                       schedule_work(&conn->tx_work);
+                       return;
+               }
+               smc_curs_write(&conn->rx_curs_confirmed,
+                              smc_curs_read(&conn->local_tx_ctrl.cons, conn),
+                              conn);
+               conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
+       }
+       if (conn->local_rx_ctrl.prod_flags.write_blocked &&
+           !atomic_read(&conn->bytes_to_rcv))
+               conn->local_rx_ctrl.prod_flags.write_blocked = 0;
+}
+
+/***************************** send initialize *******************************/
+
+/* Initialize send properties on connection establishment. NB: not __init! */
+void smc_tx_init(struct smc_sock *smc)
+{
+       smc->sk.sk_write_space = smc_tx_write_space;
+       INIT_WORK(&smc->conn.tx_work, smc_tx_work);
+       spin_lock_init(&smc->conn.send_lock);
+}
diff --git a/net/smc/smc_tx.h b/net/smc/smc_tx.h
new file mode 100644 (file)
index 0000000..1d6a0dc
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Manage send buffer
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
+ */
+
+#ifndef SMC_TX_H
+#define SMC_TX_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+#include "smc.h"
+#include "smc_cdc.h"
+
+static inline int smc_tx_prepared_sends(struct smc_connection *conn)
+{
+       union smc_host_cursor sent, prep;
+
+       smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn);
+       smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn);
+       return smc_curs_diff(conn->sndbuf_size, &sent, &prep);
+}
+
+void smc_tx_init(struct smc_sock *smc);
+int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len);
+int smc_tx_sndbuf_nonempty(struct smc_connection *conn);
+void smc_tx_sndbuf_nonfull(struct smc_sock *smc);
+void smc_tx_consumer_update(struct smc_connection *conn);
+
+#endif /* SMC_TX_H */
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
new file mode 100644 (file)
index 0000000..eadf157
--- /dev/null
@@ -0,0 +1,614 @@
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Work Requests exploiting Infiniband API
+ *
+ * Work requests (WR) of type ib_post_send or ib_post_recv respectively
+ * are submitted to either RC SQ or RC RQ respectively
+ * (reliably connected send/receive queue)
+ * and become work queue entries (WQEs).
+ * While an SQ WR/WQE is pending, we track it until transmission completion.
+ * Through a send or receive completion queue (CQ) respectively,
+ * we get completion queue entries (CQEs) [aka work completions (WCs)].
+ * Since the CQ callback is called from IRQ context, we split work by using
+ * bottom halves implemented by tasklets.
+ *
+ * SMC uses this to exchange LLC (link layer control)
+ * and CDC (connection data control) messages.
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * Author(s):  Steffen Maier <maier@linux.vnet.ibm.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/hashtable.h>
+#include <linux/wait.h>
+#include <rdma/ib_verbs.h>
+#include <asm/div64.h>
+
+#include "smc.h"
+#include "smc_wr.h"
+
+#define SMC_WR_MAX_POLL_CQE 10 /* max. # of compl. queue elements in 1 poll */
+
+#define SMC_WR_RX_HASH_BITS 4
+static DEFINE_HASHTABLE(smc_wr_rx_hash, SMC_WR_RX_HASH_BITS);
+static DEFINE_SPINLOCK(smc_wr_rx_hash_lock);
+
+struct smc_wr_tx_pend {        /* control data for a pending send request */
+       u64                     wr_id;          /* work request id sent */
+       smc_wr_tx_handler       handler;
+       enum ib_wc_status       wc_status;      /* CQE status */
+       struct smc_link         *link;
+       u32                     idx;
+       struct smc_wr_tx_pend_priv priv;
+};
+
+/******************************** send queue *********************************/
+
+/*------------------------------- completion --------------------------------*/
+
+static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
+{
+       u32 i;
+
+       for (i = 0; i < link->wr_tx_cnt; i++) {
+               if (link->wr_tx_pends[i].wr_id == wr_id)
+                       return i;
+       }
+       return link->wr_tx_cnt;
+}
+
+static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
+{
+       struct smc_wr_tx_pend pnd_snd;
+       struct smc_link *link;
+       u32 pnd_snd_idx;
+       int i;
+
+       link = wc->qp->qp_context;
+       pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id);
+       if (pnd_snd_idx == link->wr_tx_cnt)
+               return;
+       link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status;
+       memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx], sizeof(pnd_snd));
+       /* clear the full struct smc_wr_tx_pend including .priv */
+       memset(&link->wr_tx_pends[pnd_snd_idx], 0,
+              sizeof(link->wr_tx_pends[pnd_snd_idx]));
+       memset(&link->wr_tx_bufs[pnd_snd_idx], 0,
+              sizeof(link->wr_tx_bufs[pnd_snd_idx]));
+       if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
+               return;
+       if (wc->status) {
+               struct smc_link_group *lgr;
+
+               for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
+                       /* clear full struct smc_wr_tx_pend including .priv */
+                       memset(&link->wr_tx_pends[i], 0,
+                              sizeof(link->wr_tx_pends[i]));
+                       memset(&link->wr_tx_bufs[i], 0,
+                              sizeof(link->wr_tx_bufs[i]));
+                       clear_bit(i, link->wr_tx_mask);
+               }
+               /* terminate connections of this link group abnormally */
+               lgr = container_of(link, struct smc_link_group,
+                                  lnk[SMC_SINGLE_LINK]);
+               smc_lgr_terminate(lgr);
+       }
+       if (pnd_snd.handler)
+               pnd_snd.handler(&pnd_snd.priv, link, wc->status);
+       wake_up(&link->wr_tx_wait);
+}
+
+static void smc_wr_tx_tasklet_fn(unsigned long data)
+{
+       struct smc_ib_device *dev = (struct smc_ib_device *)data;
+       struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
+       int i = 0, rc;
+       int polled = 0;
+
+again:
+       polled++;
+       do {
+               rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
+               if (polled == 1) {
+                       ib_req_notify_cq(dev->roce_cq_send,
+                                        IB_CQ_NEXT_COMP |
+                                        IB_CQ_REPORT_MISSED_EVENTS);
+               }
+               if (!rc)
+                       break;
+               for (i = 0; i < rc; i++)
+                       smc_wr_tx_process_cqe(&wc[i]);
+       } while (rc > 0);
+       if (polled == 1)
+               goto again;
+}
+
+void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
+{
+       struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
+
+       tasklet_schedule(&dev->send_tasklet);
+}
+
+/*---------------------------- request submission ---------------------------*/
+
+static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
+{
+       *idx = link->wr_tx_cnt;
+       for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
+               if (!test_and_set_bit(*idx, link->wr_tx_mask))
+                       return 0;
+       }
+       *idx = link->wr_tx_cnt;
+       return -EBUSY;
+}
+
+/**
+ * smc_wr_tx_get_free_slot() - returns buffer for message assembly,
+ *                     and sets info for pending transmit tracking
+ * @link:              Pointer to smc_link used to later send the message.
+ * @handler:           Send completion handler function pointer.
+ * @wr_buf:            Out value returns pointer to message buffer.
+ * @wr_pend_priv:      Out value returns pointer serving as handler context.
+ *
+ * Return: 0 on success, or -errno on error.
+ */
+int smc_wr_tx_get_free_slot(struct smc_link *link,
+                           smc_wr_tx_handler handler,
+                           struct smc_wr_buf **wr_buf,
+                           struct smc_wr_tx_pend_priv **wr_pend_priv)
+{
+       struct smc_wr_tx_pend *wr_pend;
+       struct ib_send_wr *wr_ib;
+       u64 wr_id;
+       u32 idx;
+       int rc;
+
+       *wr_buf = NULL;
+       *wr_pend_priv = NULL;
+       if (in_softirq()) {
+               rc = smc_wr_tx_get_free_slot_index(link, &idx);
+               if (rc)
+                       return rc;
+       } else {
+               rc = wait_event_interruptible_timeout(
+                       link->wr_tx_wait,
+                       (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
+                       SMC_WR_TX_WAIT_FREE_SLOT_TIME);
+               if (!rc) {
+                       /* timeout - terminate connections */
+                       struct smc_link_group *lgr;
+
+                       lgr = container_of(link, struct smc_link_group,
+                                          lnk[SMC_SINGLE_LINK]);
+                       smc_lgr_terminate(lgr);
+                       return -EPIPE;
+               }
+               if (rc == -ERESTARTSYS)
+                       return -EINTR;
+               if (idx == link->wr_tx_cnt)
+                       return -EPIPE;
+       }
+       wr_id = smc_wr_tx_get_next_wr_id(link);
+       wr_pend = &link->wr_tx_pends[idx];
+       wr_pend->wr_id = wr_id;
+       wr_pend->handler = handler;
+       wr_pend->link = link;
+       wr_pend->idx = idx;
+       wr_ib = &link->wr_tx_ibs[idx];
+       wr_ib->wr_id = wr_id;
+       *wr_buf = &link->wr_tx_bufs[idx];
+       *wr_pend_priv = &wr_pend->priv;
+       return 0;
+}
+
+int smc_wr_tx_put_slot(struct smc_link *link,
+                      struct smc_wr_tx_pend_priv *wr_pend_priv)
+{
+       struct smc_wr_tx_pend *pend;
+
+       pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv);
+       if (pend->idx < link->wr_tx_cnt) {
+               /* clear the full struct smc_wr_tx_pend including .priv */
+               memset(&link->wr_tx_pends[pend->idx], 0,
+                      sizeof(link->wr_tx_pends[pend->idx]));
+               memset(&link->wr_tx_bufs[pend->idx], 0,
+                      sizeof(link->wr_tx_bufs[pend->idx]));
+               test_and_clear_bit(pend->idx, link->wr_tx_mask);
+               return 1;
+       }
+
+       return 0;
+}
+
+/* Send prepared WR slot via ib_post_send.
+ * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
+ */
+int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
+{
+       struct ib_send_wr *failed_wr = NULL;
+       struct smc_wr_tx_pend *pend;
+       int rc;
+
+       ib_req_notify_cq(link->smcibdev->roce_cq_send,
+                        IB_CQ_SOLICITED_MASK | IB_CQ_REPORT_MISSED_EVENTS);
+       pend = container_of(priv, struct smc_wr_tx_pend, priv);
+       rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx],
+                         &failed_wr);
+       if (rc)
+               smc_wr_tx_put_slot(link, priv);
+       return rc;
+}
+
+void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_rx_hdr_type,
+                            smc_wr_tx_filter filter,
+                            smc_wr_tx_dismisser dismisser,
+                            unsigned long data)
+{
+       struct smc_wr_tx_pend_priv *tx_pend;
+       struct smc_wr_rx_hdr *wr_rx;
+       int i;
+
+       for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
+               wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[i];
+               if (wr_rx->type != wr_rx_hdr_type)
+                       continue;
+               tx_pend = &link->wr_tx_pends[i].priv;
+               if (filter(tx_pend, data))
+                       dismisser(tx_pend);
+       }
+}
+
+bool smc_wr_tx_has_pending(struct smc_link *link, u8 wr_rx_hdr_type,
+                          smc_wr_tx_filter filter, unsigned long data)
+{
+       struct smc_wr_tx_pend_priv *tx_pend;
+       struct smc_wr_rx_hdr *wr_rx;
+       int i;
+
+       for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
+               wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[i];
+               if (wr_rx->type != wr_rx_hdr_type)
+                       continue;
+               tx_pend = &link->wr_tx_pends[i].priv;
+               if (filter(tx_pend, data))
+                       return true;
+       }
+       return false;
+}
+
+/****************************** receive queue ********************************/
+
+int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
+{
+       struct smc_wr_rx_handler *h_iter;
+       int rc = 0;
+
+       spin_lock(&smc_wr_rx_hash_lock);
+       hash_for_each_possible(smc_wr_rx_hash, h_iter, list, handler->type) {
+               if (h_iter->type == handler->type) {
+                       rc = -EEXIST;
+                       goto out_unlock;
+               }
+       }
+       hash_add(smc_wr_rx_hash, &handler->list, handler->type);
+out_unlock:
+       spin_unlock(&smc_wr_rx_hash_lock);
+       return rc;
+}
+
+/* Demultiplex a received work request based on the message type to its handler.
+ * Relies on smc_wr_rx_hash having been completely filled before any IB WRs,
+ * and not being modified any more afterwards so we don't need to lock it.
+ */
+static inline void smc_wr_rx_demultiplex(struct ib_wc *wc)
+{
+       struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
+       struct smc_wr_rx_handler *handler;
+       struct smc_wr_rx_hdr *wr_rx;
+       u64 temp_wr_id;
+       u32 index;
+
+       if (wc->byte_len < sizeof(*wr_rx))
+               return; /* short message */
+       temp_wr_id = wc->wr_id;
+       index = do_div(temp_wr_id, link->wr_rx_cnt);
+       wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index];
+       hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) {
+               if (handler->type == wr_rx->type)
+                       handler->handler(wc, wr_rx);
+       }
+}
+
+static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
+{
+       struct smc_link *link;
+       int i;
+
+       for (i = 0; i < num; i++) {
+               link = wc[i].qp->qp_context;
+               if (wc[i].status == IB_WC_SUCCESS) {
+                       smc_wr_rx_demultiplex(&wc[i]);
+                       smc_wr_rx_post(link); /* refill WR RX */
+               } else {
+                       struct smc_link_group *lgr;
+
+                       /* handle status errors */
+                       switch (wc[i].status) {
+                       case IB_WC_RETRY_EXC_ERR:
+                       case IB_WC_RNR_RETRY_EXC_ERR:
+                       case IB_WC_WR_FLUSH_ERR:
+                               /* terminate connections of this link group
+                                * abnormally
+                                */
+                               lgr = container_of(link, struct smc_link_group,
+                                                  lnk[SMC_SINGLE_LINK]);
+                               smc_lgr_terminate(lgr);
+                               break;
+                       default:
+                               smc_wr_rx_post(link); /* refill WR RX */
+                               break;
+                       }
+               }
+       }
+}
+
+static void smc_wr_rx_tasklet_fn(unsigned long data)
+{
+       struct smc_ib_device *dev = (struct smc_ib_device *)data;
+       struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
+       int polled = 0;
+       int rc;
+
+again:
+       polled++;
+       do {
+               memset(&wc, 0, sizeof(wc));
+               rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
+               if (polled == 1) {
+                       ib_req_notify_cq(dev->roce_cq_recv,
+                                        IB_CQ_SOLICITED_MASK
+                                        | IB_CQ_REPORT_MISSED_EVENTS);
+               }
+               if (!rc)
+                       break;
+               smc_wr_rx_process_cqes(&wc[0], rc);
+       } while (rc > 0);
+       if (polled == 1)
+               goto again;
+}
+
+void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
+{
+       struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
+
+       tasklet_schedule(&dev->recv_tasklet);
+}
+
+int smc_wr_rx_post_init(struct smc_link *link)
+{
+       u32 i;
+       int rc = 0;
+
+       for (i = 0; i < link->wr_rx_cnt; i++)
+               rc = smc_wr_rx_post(link);
+       return rc;
+}
+
+/***************************** init, exit, misc ******************************/
+
+void smc_wr_remember_qp_attr(struct smc_link *lnk)
+{
+       struct ib_qp_attr *attr = &lnk->qp_attr;
+       struct ib_qp_init_attr init_attr;
+
+       memset(attr, 0, sizeof(*attr));
+       memset(&init_attr, 0, sizeof(init_attr));
+       ib_query_qp(lnk->roce_qp, attr,
+                   IB_QP_STATE |
+                   IB_QP_CUR_STATE |
+                   IB_QP_PKEY_INDEX |
+                   IB_QP_PORT |
+                   IB_QP_QKEY |
+                   IB_QP_AV |
+                   IB_QP_PATH_MTU |
+                   IB_QP_TIMEOUT |
+                   IB_QP_RETRY_CNT |
+                   IB_QP_RNR_RETRY |
+                   IB_QP_RQ_PSN |
+                   IB_QP_ALT_PATH |
+                   IB_QP_MIN_RNR_TIMER |
+                   IB_QP_SQ_PSN |
+                   IB_QP_PATH_MIG_STATE |
+                   IB_QP_CAP |
+                   IB_QP_DEST_QPN,
+                   &init_attr);
+
+       lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT,
+                              lnk->qp_attr.cap.max_send_wr);
+       lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3,
+                              lnk->qp_attr.cap.max_recv_wr);
+}
+
+static void smc_wr_init_sge(struct smc_link *lnk)
+{
+       u32 i;
+
+       for (i = 0; i < lnk->wr_tx_cnt; i++) {
+               lnk->wr_tx_sges[i].addr =
+                       lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
+               lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
+               lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
+               lnk->wr_tx_ibs[i].next = NULL;
+               lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
+               lnk->wr_tx_ibs[i].num_sge = 1;
+               lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
+               lnk->wr_tx_ibs[i].send_flags =
+                       IB_SEND_SIGNALED | IB_SEND_SOLICITED | IB_SEND_INLINE;
+       }
+       for (i = 0; i < lnk->wr_rx_cnt; i++) {
+               lnk->wr_rx_sges[i].addr =
+                       lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE;
+               lnk->wr_rx_sges[i].length = SMC_WR_BUF_SIZE;
+               lnk->wr_rx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
+               lnk->wr_rx_ibs[i].next = NULL;
+               lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i];
+               lnk->wr_rx_ibs[i].num_sge = 1;
+       }
+}
+
+void smc_wr_free_link(struct smc_link *lnk)
+{
+       struct ib_device *ibdev;
+
+       memset(lnk->wr_tx_mask, 0,
+              BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
+
+       if (!lnk->smcibdev)
+               return;
+       ibdev = lnk->smcibdev->ibdev;
+
+       if (lnk->wr_rx_dma_addr) {
+               ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
+                                   SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
+                                   DMA_FROM_DEVICE);
+               lnk->wr_rx_dma_addr = 0;
+       }
+       if (lnk->wr_tx_dma_addr) {
+               ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr,
+                                   SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
+                                   DMA_TO_DEVICE);
+               lnk->wr_tx_dma_addr = 0;
+       }
+}
+
+void smc_wr_free_link_mem(struct smc_link *lnk)
+{
+       kfree(lnk->wr_tx_pends);
+       lnk->wr_tx_pends = NULL;
+       kfree(lnk->wr_tx_mask);
+       lnk->wr_tx_mask = NULL;
+       kfree(lnk->wr_tx_sges);
+       lnk->wr_tx_sges = NULL;
+       kfree(lnk->wr_rx_sges);
+       lnk->wr_rx_sges = NULL;
+       kfree(lnk->wr_rx_ibs);
+       lnk->wr_rx_ibs = NULL;
+       kfree(lnk->wr_tx_ibs);
+       lnk->wr_tx_ibs = NULL;
+       kfree(lnk->wr_tx_bufs);
+       lnk->wr_tx_bufs = NULL;
+       kfree(lnk->wr_rx_bufs);
+       lnk->wr_rx_bufs = NULL;
+}
+
+int smc_wr_alloc_link_mem(struct smc_link *link)
+{
+       /* allocate link related memory */
+       link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL);
+       if (!link->wr_tx_bufs)
+               goto no_mem;
+       link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE,
+                                  GFP_KERNEL);
+       if (!link->wr_rx_bufs)
+               goto no_mem_wr_tx_bufs;
+       link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]),
+                                 GFP_KERNEL);
+       if (!link->wr_tx_ibs)
+               goto no_mem_wr_rx_bufs;
+       link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3,
+                                 sizeof(link->wr_rx_ibs[0]),
+                                 GFP_KERNEL);
+       if (!link->wr_rx_ibs)
+               goto no_mem_wr_tx_ibs;
+       link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
+                                  GFP_KERNEL);
+       if (!link->wr_tx_sges)
+               goto no_mem_wr_rx_ibs;
+       link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
+                                  sizeof(link->wr_rx_sges[0]),
+                                  GFP_KERNEL);
+       if (!link->wr_rx_sges)
+               goto no_mem_wr_tx_sges;
+       link->wr_tx_mask = kzalloc(
+               BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*link->wr_tx_mask),
+               GFP_KERNEL);
+       if (!link->wr_tx_mask)
+               goto no_mem_wr_rx_sges;
+       link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT,
+                                   sizeof(link->wr_tx_pends[0]),
+                                   GFP_KERNEL);
+       if (!link->wr_tx_pends)
+               goto no_mem_wr_tx_mask;
+       return 0;
+
+no_mem_wr_tx_mask:
+       kfree(link->wr_tx_mask);
+no_mem_wr_rx_sges:
+       kfree(link->wr_rx_sges);
+no_mem_wr_tx_sges:
+       kfree(link->wr_tx_sges);
+no_mem_wr_rx_ibs:
+       kfree(link->wr_rx_ibs);
+no_mem_wr_tx_ibs:
+       kfree(link->wr_tx_ibs);
+no_mem_wr_rx_bufs:
+       kfree(link->wr_rx_bufs);
+no_mem_wr_tx_bufs:
+       kfree(link->wr_tx_bufs);
+no_mem:
+       return -ENOMEM;
+}
+
+void smc_wr_remove_dev(struct smc_ib_device *smcibdev)
+{
+       tasklet_kill(&smcibdev->recv_tasklet);
+       tasklet_kill(&smcibdev->send_tasklet);
+}
+
+void smc_wr_add_dev(struct smc_ib_device *smcibdev)
+{
+       tasklet_init(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn,
+                    (unsigned long)smcibdev);
+       tasklet_init(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn,
+                    (unsigned long)smcibdev);
+}
+
+int smc_wr_create_link(struct smc_link *lnk)
+{
+       struct ib_device *ibdev = lnk->smcibdev->ibdev;
+       int rc = 0;
+
+       smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0);
+       lnk->wr_rx_id = 0;
+       lnk->wr_rx_dma_addr = ib_dma_map_single(
+               ibdev, lnk->wr_rx_bufs, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
+               DMA_FROM_DEVICE);
+       if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) {
+               lnk->wr_rx_dma_addr = 0;
+               rc = -EIO;
+               goto out;
+       }
+       lnk->wr_tx_dma_addr = ib_dma_map_single(
+               ibdev, lnk->wr_tx_bufs, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
+               DMA_TO_DEVICE);
+       if (ib_dma_mapping_error(ibdev, lnk->wr_tx_dma_addr)) {
+               rc = -EIO;
+               goto dma_unmap;
+       }
+       smc_wr_init_sge(lnk);
+       memset(lnk->wr_tx_mask, 0,
+              BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
+       return rc;
+
+dma_unmap:
+       ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
+                           SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
+                           DMA_FROM_DEVICE);
+       lnk->wr_rx_dma_addr = 0;
+out:
+       return rc;
+}
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
new file mode 100644 (file)
index 0000000..0b9beed
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Shared Memory Communications over RDMA (SMC-R) and RoCE
+ *
+ * Work Requests exploiting Infiniband API
+ *
+ * Copyright IBM Corp. 2016
+ *
+ * Author(s):  Steffen Maier <maier@linux.vnet.ibm.com>
+ */
+
+#ifndef SMC_WR_H
+#define SMC_WR_H
+
+#include <linux/atomic.h>
+#include <rdma/ib_verbs.h>
+#include <asm/div64.h>
+
+#include "smc.h"
+#include "smc_core.h"
+
+#define SMC_WR_MAX_CQE 32768   /* max. # of completion queue elements */
+#define SMC_WR_BUF_CNT 16      /* # of ctrl buffers per link */
+
+#define SMC_WR_TX_WAIT_FREE_SLOT_TIME  (10 * HZ)
+#define SMC_WR_TX_WAIT_PENDING_TIME    (5 * HZ)
+
+#define SMC_WR_TX_SIZE 44 /* actual size of wr_send data (<=SMC_WR_BUF_SIZE) */
+
+#define SMC_WR_TX_PEND_PRIV_SIZE 32
+
+struct smc_wr_tx_pend_priv {
+       u8                      priv[SMC_WR_TX_PEND_PRIV_SIZE];
+};
+
+typedef void (*smc_wr_tx_handler)(struct smc_wr_tx_pend_priv *,
+                                 struct smc_link *,
+                                 enum ib_wc_status);
+
+typedef bool (*smc_wr_tx_filter)(struct smc_wr_tx_pend_priv *,
+                                unsigned long);
+
+typedef void (*smc_wr_tx_dismisser)(struct smc_wr_tx_pend_priv *);
+
+struct smc_wr_rx_handler {
+       struct hlist_node       list;   /* hash table collision resolution */
+       void                    (*handler)(struct ib_wc *, void *);
+       u8                      type;
+};
+
+/* Only used by RDMA write WRs.
+ * All other WRs (CDC/LLC) use smc_wr_tx_send handling WR_ID implicitly
+ */
+static inline long smc_wr_tx_get_next_wr_id(struct smc_link *link)
+{
+       return atomic_long_inc_return(&link->wr_tx_id);
+}
+
+static inline void smc_wr_tx_set_wr_id(atomic_long_t *wr_tx_id, long val)
+{
+       atomic_long_set(wr_tx_id, val);
+}
+
+/* post a new receive work request to fill a completed old work request entry */
+static inline int smc_wr_rx_post(struct smc_link *link)
+{
+       struct ib_recv_wr *bad_recv_wr = NULL;
+       int rc;
+       u64 wr_id, temp_wr_id;
+       u32 index;
+
+       wr_id = ++link->wr_rx_id; /* tasklet context, thus not atomic */
+       temp_wr_id = wr_id;
+       index = do_div(temp_wr_id, link->wr_rx_cnt);
+       link->wr_rx_ibs[index].wr_id = wr_id;
+       rc = ib_post_recv(link->roce_qp, &link->wr_rx_ibs[index], &bad_recv_wr);
+       return rc;
+}
+
+int smc_wr_create_link(struct smc_link *lnk);
+int smc_wr_alloc_link_mem(struct smc_link *lnk);
+void smc_wr_free_link(struct smc_link *lnk);
+void smc_wr_free_link_mem(struct smc_link *lnk);
+void smc_wr_remember_qp_attr(struct smc_link *lnk);
+void smc_wr_remove_dev(struct smc_ib_device *smcibdev);
+void smc_wr_add_dev(struct smc_ib_device *smcibdev);
+
+int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler,
+                           struct smc_wr_buf **wr_buf,
+                           struct smc_wr_tx_pend_priv **wr_pend_priv);
+int smc_wr_tx_put_slot(struct smc_link *link,
+                      struct smc_wr_tx_pend_priv *wr_pend_priv);
+int smc_wr_tx_send(struct smc_link *link,
+                  struct smc_wr_tx_pend_priv *wr_pend_priv);
+void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context);
+bool smc_wr_tx_has_pending(struct smc_link *link, u8 wr_rx_hdr_type,
+                          smc_wr_tx_filter filter, unsigned long data);
+void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type,
+                            smc_wr_tx_filter filter,
+                            smc_wr_tx_dismisser dismisser,
+                            unsigned long data);
+
+int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler);
+int smc_wr_rx_post_init(struct smc_link *link);
+void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context);
+
+#endif /* SMC_WR_H */
index 0758e13754e2faccb257d2f6ba9cca7b2da1baab..b7a63d5bc915f83350bf89717021bbb56e4ee34e 100644 (file)
@@ -287,7 +287,7 @@ static void init_once(void *foo)
        inode_init_once(&ei->vfs_inode);
 }
 
-static int init_inodecache(void)
+static void init_inodecache(void)
 {
        sock_inode_cachep = kmem_cache_create("sock_inode_cache",
                                              sizeof(struct socket_alloc),
@@ -296,9 +296,7 @@ static int init_inodecache(void)
                                               SLAB_RECLAIM_ACCOUNT |
                                               SLAB_MEM_SPREAD | SLAB_ACCOUNT),
                                              init_once);
-       if (sock_inode_cachep == NULL)
-               return -ENOMEM;
-       return 0;
+       BUG_ON(sock_inode_cachep == NULL);
 }
 
 static const struct super_operations sockfs_ops = {
@@ -1948,6 +1946,8 @@ static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg,
                ctl_buf = msg_sys->msg_control;
                ctl_len = msg_sys->msg_controllen;
        } else if (ctl_len) {
+               BUILD_BUG_ON(sizeof(struct cmsghdr) !=
+                            CMSG_ALIGN(sizeof(struct cmsghdr)));
                if (ctl_len > sizeof(ctl)) {
                        ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL);
                        if (ctl_buf == NULL)
index 288e35c2d8f4d7e28a5764469d6bbeceb40d0843..cb1e48e54eb1440181976a352229783f202f896d 100644 (file)
@@ -4,6 +4,7 @@
  * Support for backward direction RPCs on RPC/RDMA (server-side).
  */
 
+#include <linux/module.h>
 #include <linux/sunrpc/svc_rdma.h>
 #include "xprt_rdma.h"
 
index aa1babbea385348f1ecd4b7467079fc442653094..7d99029df342f15b28c1f8e49d43bc90c709d1b0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/bcast.c: TIPC broadcast code
  *
- * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
+ * Copyright (c) 2004-2006, 2014-2016, Ericsson AB
  * Copyright (c) 2004, Intel Corporation.
  * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
@@ -39,9 +39,8 @@
 #include "socket.h"
 #include "msg.h"
 #include "bcast.h"
-#include "name_distr.h"
 #include "link.h"
-#include "node.h"
+#include "name_table.h"
 
 #define        BCLINK_WIN_DEFAULT      50      /* bcast link window size (default) */
 #define        BCLINK_WIN_MIN          32      /* bcast minimum link window size */
@@ -54,12 +53,20 @@ const char tipc_bclink_name[] = "broadcast-link";
  * @inputq: data input queue; will only carry SOCK_WAKEUP messages
  * @dest: array keeping number of reachable destinations per bearer
  * @primary_bearer: a bearer having links to all broadcast destinations, if any
+ * @bcast_support: indicates if primary bearer, if any, supports broadcast
+ * @rcast_support: indicates if all peer nodes support replicast
+ * @rc_ratio: dest count as percentage of cluster size where send method changes
+ * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast
  */
 struct tipc_bc_base {
        struct tipc_link *link;
        struct sk_buff_head inputq;
        int dests[MAX_BEARERS];
        int primary_bearer;
+       bool bcast_support;
+       bool rcast_support;
+       int rc_ratio;
+       int bc_threshold;
 };
 
 static struct tipc_bc_base *tipc_bc_base(struct net *net)
@@ -69,7 +76,20 @@ static struct tipc_bc_base *tipc_bc_base(struct net *net)
 
 int tipc_bcast_get_mtu(struct net *net)
 {
-       return tipc_link_mtu(tipc_bc_sndlink(net));
+       return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE;
+}
+
+void tipc_bcast_disable_rcast(struct net *net)
+{
+       tipc_bc_base(net)->rcast_support = false;
+}
+
+static void tipc_bcbase_calc_bc_threshold(struct net *net)
+{
+       struct tipc_bc_base *bb = tipc_bc_base(net);
+       int cluster_size = tipc_link_bc_peers(tipc_bc_sndlink(net));
+
+       bb->bc_threshold = 1 + (cluster_size * bb->rc_ratio / 100);
 }
 
 /* tipc_bcbase_select_primary(): find a bearer with links to all destinations,
@@ -79,9 +99,10 @@ static void tipc_bcbase_select_primary(struct net *net)
 {
        struct tipc_bc_base *bb = tipc_bc_base(net);
        int all_dests =  tipc_link_bc_peers(bb->link);
-       int i, mtu;
+       int i, mtu, prim;
 
        bb->primary_bearer = INVALID_BEARER_ID;
+       bb->bcast_support = true;
 
        if (!all_dests)
                return;
@@ -93,7 +114,7 @@ static void tipc_bcbase_select_primary(struct net *net)
                mtu = tipc_bearer_mtu(net, i);
                if (mtu < tipc_link_mtu(bb->link))
                        tipc_link_set_mtu(bb->link, mtu);
-
+               bb->bcast_support &= tipc_bearer_bcast_support(net, i);
                if (bb->dests[i] < all_dests)
                        continue;
 
@@ -103,6 +124,9 @@ static void tipc_bcbase_select_primary(struct net *net)
                if ((i ^ tipc_own_addr(net)) & 1)
                        break;
        }
+       prim = bb->primary_bearer;
+       if (prim != INVALID_BEARER_ID)
+               bb->bcast_support = tipc_bearer_bcast_support(net, prim);
 }
 
 void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id)
@@ -170,45 +194,131 @@ static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
        __skb_queue_purge(&_xmitq);
 }
 
-/* tipc_bcast_xmit - deliver buffer chain to all nodes in cluster
- *                    and to identified node local sockets
+static void tipc_bcast_select_xmit_method(struct net *net, int dests,
+                                         struct tipc_mc_method *method)
+{
+       struct tipc_bc_base *bb = tipc_bc_base(net);
+       unsigned long exp = method->expires;
+
+       /* Broadcast supported by used bearer/bearers? */
+       if (!bb->bcast_support) {
+               method->rcast = true;
+               return;
+       }
+       /* Any destinations which don't support replicast ? */
+       if (!bb->rcast_support) {
+               method->rcast = false;
+               return;
+       }
+       /* Can current method be changed ? */
+       method->expires = jiffies + TIPC_METHOD_EXPIRE;
+       if (method->mandatory || time_before(jiffies, exp))
+               return;
+
+       /* Determine method to use now */
+       method->rcast = dests <= bb->bc_threshold;
+}
+
+/* tipc_bcast_xmit - broadcast the buffer chain to all external nodes
  * @net: the applicable net namespace
- * @list: chain of buffers containing message
- * Consumes the buffer chain, except when returning -ELINKCONG
- * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
+ * @pkts: chain of buffers containing message
+ * @cong_link_cnt: set to 1 if broadcast link is congested, otherwise 0
+ * Consumes the buffer chain.
+ * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE
  */
-int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list)
+static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
+                          u16 *cong_link_cnt)
 {
        struct tipc_link *l = tipc_bc_sndlink(net);
-       struct sk_buff_head xmitq, inputq, rcvq;
+       struct sk_buff_head xmitq;
        int rc = 0;
 
-       __skb_queue_head_init(&rcvq);
        __skb_queue_head_init(&xmitq);
-       skb_queue_head_init(&inputq);
-
-       /* Prepare message clone for local node */
-       if (unlikely(!tipc_msg_reassemble(list, &rcvq)))
-               return -EHOSTUNREACH;
-
        tipc_bcast_lock(net);
        if (tipc_link_bc_peers(l))
-               rc = tipc_link_xmit(l, list, &xmitq);
+               rc = tipc_link_xmit(l, pkts, &xmitq);
        tipc_bcast_unlock(net);
-
-       /* Don't send to local node if adding to link failed */
-       if (unlikely(rc)) {
-               __skb_queue_purge(&rcvq);
-               return rc;
+       tipc_bcbase_xmit(net, &xmitq);
+       __skb_queue_purge(pkts);
+       if (rc == -ELINKCONG) {
+               *cong_link_cnt = 1;
+               rc = 0;
        }
+       return rc;
+}
 
-       /* Broadcast to all nodes, inluding local node */
-       tipc_bcbase_xmit(net, &xmitq);
-       tipc_sk_mcast_rcv(net, &rcvq, &inputq);
-       __skb_queue_purge(list);
+/* tipc_rcast_xmit - replicate and send a message to given destination nodes
+ * @net: the applicable net namespace
+ * @pkts: chain of buffers containing message
+ * @dests: list of destination nodes
+ * @cong_link_cnt: returns number of congested links
+ * @cong_links: returns identities of congested links
+ * Returns 0 if success, otherwise errno
+ */
+static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
+                          struct tipc_nlist *dests, u16 *cong_link_cnt)
+{
+       struct sk_buff_head _pkts;
+       struct u32_item *n, *tmp;
+       u32 dst, selector;
+
+       selector = msg_link_selector(buf_msg(skb_peek(pkts)));
+       __skb_queue_head_init(&_pkts);
+
+       list_for_each_entry_safe(n, tmp, &dests->list, list) {
+               dst = n->value;
+               if (!tipc_msg_pskb_copy(dst, pkts, &_pkts))
+                       return -ENOMEM;
+
+               /* Any other return value than -ELINKCONG is ignored */
+               if (tipc_node_xmit(net, &_pkts, dst, selector) == -ELINKCONG)
+                       (*cong_link_cnt)++;
+       }
        return 0;
 }
 
+/* tipc_mcast_xmit - deliver message to indicated destination nodes
+ *                   and to identified node local sockets
+ * @net: the applicable net namespace
+ * @pkts: chain of buffers containing message
+ * @method: send method to be used
+ * @dests: destination nodes for message.
+ * @cong_link_cnt: returns number of encountered congested destination links
+ * Consumes buffer chain.
+ * Returns 0 if success, otherwise errno
+ */
+int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
+                   struct tipc_mc_method *method, struct tipc_nlist *dests,
+                   u16 *cong_link_cnt)
+{
+       struct sk_buff_head inputq, localq;
+       int rc = 0;
+
+       skb_queue_head_init(&inputq);
+       skb_queue_head_init(&localq);
+
+       /* Clone packets before they are consumed by next call */
+       if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {
+               rc = -ENOMEM;
+               goto exit;
+       }
+       /* Send according to determined transmit method */
+       if (dests->remote) {
+               tipc_bcast_select_xmit_method(net, dests->remote, method);
+               if (method->rcast)
+                       rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt);
+               else
+                       rc = tipc_bcast_xmit(net, pkts, cong_link_cnt);
+       }
+
+       if (dests->local)
+               tipc_sk_mcast_rcv(net, &localq, &inputq);
+exit:
+       /* This queue should normally be empty by now */
+       __skb_queue_purge(pkts);
+       return rc;
+}
+
 /* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
  *
  * RCU is locked, no other locks set
@@ -313,6 +423,7 @@ void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l,
        tipc_bcast_lock(net);
        tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
        tipc_bcbase_select_primary(net);
+       tipc_bcbase_calc_bc_threshold(net);
        tipc_bcast_unlock(net);
 }
 
@@ -331,6 +442,7 @@ void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
        tipc_bcast_lock(net);
        tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
        tipc_bcbase_select_primary(net);
+       tipc_bcbase_calc_bc_threshold(net);
        tipc_bcast_unlock(net);
 
        tipc_bcbase_xmit(net, &xmitq);
@@ -413,6 +525,8 @@ int tipc_bcast_init(struct net *net)
                goto enomem;
        bb->link = l;
        tn->bcl = l;
+       bb->rc_ratio = 25;
+       bb->rcast_support = true;
        return 0;
 enomem:
        kfree(bb);
@@ -428,3 +542,33 @@ void tipc_bcast_stop(struct net *net)
        kfree(tn->bcbase);
        kfree(tn->bcl);
 }
+
+void tipc_nlist_init(struct tipc_nlist *nl, u32 self)
+{
+       memset(nl, 0, sizeof(*nl));
+       INIT_LIST_HEAD(&nl->list);
+       nl->self = self;
+}
+
+void tipc_nlist_add(struct tipc_nlist *nl, u32 node)
+{
+       if (node == nl->self)
+               nl->local = true;
+       else if (u32_push(&nl->list, node))
+               nl->remote++;
+}
+
+void tipc_nlist_del(struct tipc_nlist *nl, u32 node)
+{
+       if (node == nl->self)
+               nl->local = false;
+       else if (u32_del(&nl->list, node))
+               nl->remote--;
+}
+
+void tipc_nlist_purge(struct tipc_nlist *nl)
+{
+       u32_list_purge(&nl->list);
+       nl->remote = 0;
+       nl->local = 0;
+}
index 855d53c64ab347ec5b37dc06a39e10748f7bf388..751530ab0c494f0b84c2c6d40fd6db20773f5367 100644 (file)
 struct tipc_node;
 struct tipc_msg;
 struct tipc_nl_msg;
-struct tipc_node_map;
+struct tipc_nlist;
+struct tipc_nitem;
 extern const char tipc_bclink_name[];
 
+#define TIPC_METHOD_EXPIRE msecs_to_jiffies(5000)
+
+struct tipc_nlist {
+       struct list_head list;
+       u32 self;
+       u16 remote;
+       bool local;
+};
+
+void tipc_nlist_init(struct tipc_nlist *nl, u32 self);
+void tipc_nlist_purge(struct tipc_nlist *nl);
+void tipc_nlist_add(struct tipc_nlist *nl, u32 node);
+void tipc_nlist_del(struct tipc_nlist *nl, u32 node);
+
+/* Cookie to be used between socket and broadcast layer
+ * @rcast: replicast (instead of broadcast) was used at previous xmit
+ * @mandatory: broadcast/replicast indication was set by user
+ * @expires: re-evaluate non-mandatory transmit method if we are past this
+ */
+struct tipc_mc_method {
+       bool rcast;
+       bool mandatory;
+       unsigned long expires;
+};
+
 int tipc_bcast_init(struct net *net);
 void tipc_bcast_stop(struct net *net);
 void tipc_bcast_add_peer(struct net *net, struct tipc_link *l,
@@ -53,7 +79,10 @@ void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_bcl);
 void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id);
 void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id);
 int  tipc_bcast_get_mtu(struct net *net);
-int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list);
+void tipc_bcast_disable_rcast(struct net *net);
+int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
+                   struct tipc_mc_method *method, struct tipc_nlist *dests,
+                   u16 *cong_link_cnt);
 int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
 void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
                        struct tipc_msg *hdr);
index 52d74760fb68697d508fafb213b56deaed2e8639..33a5bdfbef76c00578921198bfbbe9c735f643d2 100644 (file)
@@ -431,7 +431,7 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
        memset(&b->bcast_addr, 0, sizeof(b->bcast_addr));
        memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len);
        b->bcast_addr.media_id = b->media->type_id;
-       b->bcast_addr.broadcast = 1;
+       b->bcast_addr.broadcast = TIPC_BROADCAST_SUPPORT;
        b->mtu = dev->mtu;
        b->media->raw2addr(b, &b->addr, (char *)dev->dev_addr);
        rcu_assign_pointer(dev->tipc_ptr, b);
@@ -482,6 +482,19 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
        return 0;
 }
 
+bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id)
+{
+       bool supp = false;
+       struct tipc_bearer *b;
+
+       rcu_read_lock();
+       b = bearer_get(net, bearer_id);
+       if (b)
+               supp = (b->bcast_addr.broadcast == TIPC_BROADCAST_SUPPORT);
+       rcu_read_unlock();
+       return supp;
+}
+
 int tipc_bearer_mtu(struct net *net, u32 bearer_id)
 {
        int mtu = 0;
index 278ff7f616f9e884aca58fce761a23cfd1c55f70..635c9086e19af86b81b4dc572a7c2ee0374b02fc 100644 (file)
 #define TIPC_MEDIA_TYPE_IB     2
 #define TIPC_MEDIA_TYPE_UDP    3
 
-/* minimum bearer MTU */
+/* Minimum bearer MTU */
 #define TIPC_MIN_BEARER_MTU    (MAX_H_SIZE + INT_H_SIZE)
 
+/* Identifiers for distinguishing between broadcast/multicast and replicast
+ */
+#define TIPC_BROADCAST_SUPPORT  1
+#define TIPC_REPLICAST_SUPPORT  2
+
 /**
  * struct tipc_media_addr - destination address used by TIPC bearers
  * @value: address info (format defined by media)
@@ -210,6 +215,7 @@ int tipc_bearer_setup(void);
 void tipc_bearer_cleanup(void);
 void tipc_bearer_stop(struct net *net);
 int tipc_bearer_mtu(struct net *net, u32 bearer_id);
+bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id);
 void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
                          struct sk_buff *skb,
                          struct tipc_media_addr *dest);
index 4e8647aef01c1d070751adc6a24c63769b66e599..ddd2dd6f77aae1a5cd77c6bd86fac293db66e145 100644 (file)
@@ -515,6 +515,10 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
        if (link_is_bc_sndlink(l))
                l->state = LINK_ESTABLISHED;
 
+       /* Disable replicast if even a single peer doesn't support it */
+       if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
+               tipc_bcast_disable_rcast(net);
+
        return true;
 }
 
@@ -776,60 +780,47 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
 
 /**
  * link_schedule_user - schedule a message sender for wakeup after congestion
- * @link: congested link
- * @list: message that was attempted sent
+ * @l: congested link
+ * @hdr: header of message that is being sent
  * Create pseudo msg to send back to user when congestion abates
- * Does not consume buffer list
  */
-static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
+static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
 {
-       struct tipc_msg *msg = buf_msg(skb_peek(list));
-       int imp = msg_importance(msg);
-       u32 oport = msg_origport(msg);
-       u32 addr = tipc_own_addr(link->net);
+       u32 dnode = tipc_own_addr(l->net);
+       u32 dport = msg_origport(hdr);
        struct sk_buff *skb;
 
-       /* This really cannot happen...  */
-       if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
-               pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
-               return -ENOBUFS;
-       }
-       /* Non-blocking sender: */
-       if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
-               return -ELINKCONG;
-
        /* Create and schedule wakeup pseudo message */
        skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
-                             addr, addr, oport, 0, 0);
+                             dnode, l->addr, dport, 0, 0);
        if (!skb)
                return -ENOBUFS;
-       TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
-       TIPC_SKB_CB(skb)->chain_imp = imp;
-       skb_queue_tail(&link->wakeupq, skb);
-       link->stats.link_congs++;
+       msg_set_dest_droppable(buf_msg(skb), true);
+       TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
+       skb_queue_tail(&l->wakeupq, skb);
+       l->stats.link_congs++;
        return -ELINKCONG;
 }
 
 /**
  * link_prepare_wakeup - prepare users for wakeup after congestion
- * @link: congested link
- * Move a number of waiting users, as permitted by available space in
- * the send queue, from link wait queue to node wait queue for wakeup
+ * @l: congested link
+ * Wake up a number of waiting users, as permitted by available space
+ * in the send queue
  */
 void link_prepare_wakeup(struct tipc_link *l)
 {
-       int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
-       int imp, lim;
        struct sk_buff *skb, *tmp;
+       int imp, i = 0;
 
        skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
                imp = TIPC_SKB_CB(skb)->chain_imp;
-               lim = l->backlog[imp].limit;
-               pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
-               if ((pnd[imp] + l->backlog[imp].len) >= lim)
+               if (l->backlog[imp].len < l->backlog[imp].limit) {
+                       skb_unlink(skb, &l->wakeupq);
+                       skb_queue_tail(l->inputq, skb);
+               } else if (i++ > 10) {
                        break;
-               skb_unlink(skb, &l->wakeupq);
-               skb_queue_tail(l->inputq, skb);
+               }
        }
 }
 
@@ -869,8 +860,7 @@ void tipc_link_reset(struct tipc_link *l)
  * @list: chain of buffers containing message
  * @xmitq: returned list of packets to be sent by caller
  *
- * Consumes the buffer chain, except when returning -ELINKCONG,
- * since the caller then may want to make more send attempts.
+ * Consumes the buffer chain.
  * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
  */
@@ -879,7 +869,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
 {
        struct tipc_msg *hdr = buf_msg(skb_peek(list));
        unsigned int maxwin = l->window;
-       unsigned int i, imp = msg_importance(hdr);
+       int imp = msg_importance(hdr);
        unsigned int mtu = l->mtu;
        u16 ack = l->rcv_nxt - 1;
        u16 seqno = l->snd_nxt;
@@ -888,19 +878,22 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
        struct sk_buff_head *backlogq = &l->backlogq;
        struct sk_buff *skb, *_skb, *bskb;
        int pkt_cnt = skb_queue_len(list);
+       int rc = 0;
 
-       /* Match msg importance against this and all higher backlog limits: */
-       if (!skb_queue_empty(backlogq)) {
-               for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
-                       if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
-                               return link_schedule_user(l, list);
-               }
-       }
        if (unlikely(msg_size(hdr) > mtu)) {
                skb_queue_purge(list);
                return -EMSGSIZE;
        }
 
+       /* Allow oversubscription of one data msg per source at congestion */
+       if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
+               if (imp == TIPC_SYSTEM_IMPORTANCE) {
+                       pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
+                       return -ENOBUFS;
+               }
+               rc = link_schedule_user(l, hdr);
+       }
+
        if (pkt_cnt > 1) {
                l->stats.sent_fragmented++;
                l->stats.sent_fragments += pkt_cnt;
@@ -946,7 +939,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                skb_queue_splice_tail_init(list, backlogq);
        }
        l->snd_nxt = seqno;
-       return 0;
+       return rc;
 }
 
 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
@@ -1043,11 +1036,17 @@ int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
                            struct sk_buff_head *inputq)
 {
-       switch (msg_user(buf_msg(skb))) {
+       struct tipc_msg *hdr = buf_msg(skb);
+
+       switch (msg_user(hdr)) {
        case TIPC_LOW_IMPORTANCE:
        case TIPC_MEDIUM_IMPORTANCE:
        case TIPC_HIGH_IMPORTANCE:
        case TIPC_CRITICAL_IMPORTANCE:
+               if (unlikely(msg_type(hdr) == TIPC_MCAST_MSG)) {
+                       skb_queue_tail(l->bc_rcvlink->inputq, skb);
+                       return true;
+               }
        case CONN_MANAGER:
                skb_queue_tail(inputq, skb);
                return true;
index ab02d07424764ad4b269b6ad560167f528454b80..312ef7de57d7ba27c58533df9edb2f4dd61cc864 100644 (file)
@@ -607,6 +607,23 @@ error:
        return false;
 }
 
+bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
+                       struct sk_buff_head *cpy)
+{
+       struct sk_buff *skb, *_skb;
+
+       skb_queue_walk(msg, skb) {
+               _skb = pskb_copy(skb, GFP_ATOMIC);
+               if (!_skb) {
+                       __skb_queue_purge(cpy);
+                       return false;
+               }
+               msg_set_destnode(buf_msg(_skb), dst);
+               __skb_queue_tail(cpy, _skb);
+       }
+       return true;
+}
+
 /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
  * @list: list to be appended to
  * @seqno: sequence number of buffer to add
index 2c3dc38abf9c25814c0d9e3c31711706f2bca664..c843fd2bc48d2d4a9fb4edebbffc2a114f66f89d 100644 (file)
@@ -98,8 +98,6 @@ struct tipc_skb_cb {
        u32 bytes_read;
        struct sk_buff *tail;
        bool validated;
-       bool wakeup_pending;
-       u16 chain_sz;
        u16 chain_imp;
        u16 ackers;
 };
@@ -633,14 +631,11 @@ static inline void msg_set_bc_netid(struct tipc_msg *m, u32 id)
 
 static inline u32 msg_link_selector(struct tipc_msg *m)
 {
+       if (msg_user(m) == MSG_FRAGMENTER)
+               m = (void *)msg_data(m);
        return msg_bits(m, 4, 0, 1);
 }
 
-static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
-{
-       msg_set_bits(m, 4, 0, 1, n);
-}
-
 /*
  * Word 5
  */
@@ -837,6 +832,8 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
                   int offset, int dsz, int mtu, struct sk_buff_head *list);
 bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
 bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq);
+bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
+                       struct sk_buff_head *cpy);
 void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
                             struct sk_buff *skb);
 
index e190460fe0d396e795da064c9c3fe4523177879c..9be6592e4a6fa20c78995396ffa3dfcd1f19537a 100644 (file)
@@ -608,7 +608,7 @@ not_found:
  * Returns non-zero if any off-node ports overlap
  */
 int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
-                             u32 limit, struct tipc_plist *dports)
+                             u32 limit, struct list_head *dports)
 {
        struct name_seq *seq;
        struct sub_seq *sseq;
@@ -633,7 +633,7 @@ int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
                info = sseq->info;
                list_for_each_entry(publ, &info->node_list, node_list) {
                        if (publ->scope <= limit)
-                               tipc_plist_push(dports, publ->ref);
+                               u32_push(dports, publ->ref);
                }
 
                if (info->cluster_list_size != info->node_list_size)
@@ -645,6 +645,39 @@ exit:
        return res;
 }
 
+/* tipc_nametbl_lookup_dst_nodes - find broadcast destination nodes
+ * - Creates list of nodes that overlap the given multicast address
+ * - Determines if any node local ports overlap
+ */
+void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
+                                  u32 upper, u32 domain,
+                                  struct tipc_nlist *nodes)
+{
+       struct sub_seq *sseq, *stop;
+       struct publication *publ;
+       struct name_info *info;
+       struct name_seq *seq;
+
+       rcu_read_lock();
+       seq = nametbl_find_seq(net, type);
+       if (!seq)
+               goto exit;
+
+       spin_lock_bh(&seq->lock);
+       sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
+       stop = seq->sseqs + seq->first_free;
+       for (; sseq->lower <= upper && sseq != stop; sseq++) {
+               info = sseq->info;
+               list_for_each_entry(publ, &info->zone_list, zone_list) {
+                       if (tipc_in_scope(domain, publ->node))
+                               tipc_nlist_add(nodes, publ->node);
+               }
+       }
+       spin_unlock_bh(&seq->lock);
+exit:
+       rcu_read_unlock();
+}
+
 /*
  * tipc_nametbl_publish - add name publication to network name tables
  */
@@ -1022,40 +1055,79 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
        return skb->len;
 }
 
-void tipc_plist_push(struct tipc_plist *pl, u32 port)
+bool u32_find(struct list_head *l, u32 value)
 {
-       struct tipc_plist *nl;
+       struct u32_item *item;
 
-       if (likely(!pl->port)) {
-               pl->port = port;
-               return;
+       list_for_each_entry(item, l, list) {
+               if (item->value == value)
+                       return true;
        }
-       if (pl->port == port)
-               return;
-       list_for_each_entry(nl, &pl->list, list) {
-               if (nl->port == port)
-                       return;
+       return false;
+}
+
+bool u32_push(struct list_head *l, u32 value)
+{
+       struct u32_item *item;
+
+       list_for_each_entry(item, l, list) {
+               if (item->value == value)
+                       return false;
+       }
+       item = kmalloc(sizeof(*item), GFP_ATOMIC);
+       if (unlikely(!item))
+               return false;
+
+       item->value = value;
+       list_add(&item->list, l);
+       return true;
+}
+
+u32 u32_pop(struct list_head *l)
+{
+       struct u32_item *item;
+       u32 value = 0;
+
+       if (list_empty(l))
+               return 0;
+       item = list_first_entry(l, typeof(*item), list);
+       value = item->value;
+       list_del(&item->list);
+       kfree(item);
+       return value;
+}
+
+bool u32_del(struct list_head *l, u32 value)
+{
+       struct u32_item *item, *tmp;
+
+       list_for_each_entry_safe(item, tmp, l, list) {
+               if (item->value != value)
+                       continue;
+               list_del(&item->list);
+               kfree(item);
+               return true;
        }
-       nl = kmalloc(sizeof(*nl), GFP_ATOMIC);
-       if (nl) {
-               nl->port = port;
-               list_add(&nl->list, &pl->list);
+       return false;
+}
+
+void u32_list_purge(struct list_head *l)
+{
+       struct u32_item *item, *tmp;
+
+       list_for_each_entry_safe(item, tmp, l, list) {
+               list_del(&item->list);
+               kfree(item);
        }
 }
 
-u32 tipc_plist_pop(struct tipc_plist *pl)
+int u32_list_len(struct list_head *l)
 {
-       struct tipc_plist *nl;
-       u32 port = 0;
+       struct u32_item *item;
+       int i = 0;
 
-       if (likely(list_empty(&pl->list))) {
-               port = pl->port;
-               pl->port = 0;
-               return port;
+       list_for_each_entry(item, l, list) {
+               i++;
        }
-       nl = list_first_entry(&pl->list, typeof(*nl), list);
-       port = nl->port;
-       list_del(&nl->list);
-       kfree(nl);
-       return port;
+       return i;
 }
index 1524a73830f7e89b078cd374d81e725daea39a3c..6ebdeb1d84a550dfdefb2f28a71d6a42ad2d5e88 100644 (file)
@@ -39,6 +39,7 @@
 
 struct tipc_subscription;
 struct tipc_plist;
+struct tipc_nlist;
 
 /*
  * TIPC name types reserved for internal TIPC use (both current and planned)
@@ -99,7 +100,10 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
 
 u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *node);
 int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
-                             u32 limit, struct tipc_plist *dports);
+                             u32 limit, struct list_head *dports);
+void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
+                                  u32 upper, u32 domain,
+                                  struct tipc_nlist *nodes);
 struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
                                         u32 upper, u32 scope, u32 port_ref,
                                         u32 key);
@@ -116,18 +120,16 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
 int tipc_nametbl_init(struct net *net);
 void tipc_nametbl_stop(struct net *net);
 
-struct tipc_plist {
+struct u32_item {
        struct list_head list;
-       u32 port;
+       u32 value;
 };
 
-static inline void tipc_plist_init(struct tipc_plist *pl)
-{
-       INIT_LIST_HEAD(&pl->list);
-       pl->port = 0;
-}
-
-void tipc_plist_push(struct tipc_plist *pl, u32 port);
-u32 tipc_plist_pop(struct tipc_plist *pl);
+bool u32_push(struct list_head *l, u32 value);
+u32 u32_pop(struct list_head *l);
+bool u32_find(struct list_head *l, u32 value);
+bool u32_del(struct list_head *l, u32 value);
+void u32_list_purge(struct list_head *l);
+int u32_list_len(struct list_head *l);
 
 #endif
index 28bf4feeb81c25d0761177dc049100989640cdc5..ab8a2d5d1e3245d31f97375e5a27deda9a15ad58 100644 (file)
@@ -110,6 +110,10 @@ int tipc_net_start(struct net *net, u32 addr)
        char addr_string[16];
 
        tn->own_addr = addr;
+
+       /* Ensure that the new address is visible before we reinit. */
+       smp_mb();
+
        tipc_named_reinit(net);
        tipc_sk_reinit(net);
 
index 27753325e06e4355d71270d8c5c8818bef3e94c5..e9295fa3a554c860120f8e22a96f16c9f7e74745 100644 (file)
@@ -1172,7 +1172,7 @@ msg_full:
  * @list: chain of buffers containing message
  * @dnode: address of destination node
  * @selector: a number used for deterministic link selection
- * Consumes the buffer chain, except when returning -ELINKCONG
+ * Consumes the buffer chain.
  * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
  */
 int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
@@ -1211,10 +1211,10 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
        spin_unlock_bh(&le->lock);
        tipc_node_read_unlock(n);
 
-       if (likely(rc == 0))
-               tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
-       else if (rc == -ENOBUFS)
+       if (unlikely(rc == -ENOBUFS))
                tipc_node_link_down(n, bearer_id, false);
+       else
+               tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
 
        tipc_node_put(n);
 
@@ -1226,20 +1226,15 @@ int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
  * messages, which will not be rejected
  * The only exception is datagram messages rerouted after secondary
  * lookup, which are rare and safe to dispose of anyway.
- * TODO: Return real return value, and let callers use
- * tipc_wait_for_sendpkt() where applicable
  */
 int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
                       u32 selector)
 {
        struct sk_buff_head head;
-       int rc;
 
        skb_queue_head_init(&head);
        __skb_queue_tail(&head, skb);
-       rc = tipc_node_xmit(net, &head, dnode, selector);
-       if (rc == -ELINKCONG)
-               kfree_skb(skb);
+       tipc_node_xmit(net, &head, dnode, selector);
        return 0;
 }
 
@@ -1267,6 +1262,19 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
        kfree_skb(skb);
 }
 
+static void tipc_node_mcast_rcv(struct tipc_node *n)
+{
+       struct tipc_bclink_entry *be = &n->bc_entry;
+
+       /* 'arrvq' is under inputq2's lock protection */
+       spin_lock_bh(&be->inputq2.lock);
+       spin_lock_bh(&be->inputq1.lock);
+       skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
+       spin_unlock_bh(&be->inputq1.lock);
+       spin_unlock_bh(&be->inputq2.lock);
+       tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
+}
+
 static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
                                  int bearer_id, struct sk_buff_head *xmitq)
 {
@@ -1340,15 +1348,8 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
        if (!skb_queue_empty(&xmitq))
                tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
 
-       /* Deliver. 'arrvq' is under inputq2's lock protection */
-       if (!skb_queue_empty(&be->inputq1)) {
-               spin_lock_bh(&be->inputq2.lock);
-               spin_lock_bh(&be->inputq1.lock);
-               skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
-               spin_unlock_bh(&be->inputq1.lock);
-               spin_unlock_bh(&be->inputq2.lock);
-               tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2);
-       }
+       if (!skb_queue_empty(&be->inputq1))
+               tipc_node_mcast_rcv(n);
 
        if (rc & TIPC_LINK_DOWN_EVT) {
                /* Reception reassembly failure => reset all links to peer */
@@ -1575,6 +1576,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
        if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
                tipc_named_rcv(net, &n->bc_entry.namedq);
 
+       if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
+               tipc_node_mcast_rcv(n);
+
        if (!skb_queue_empty(&le->inputq))
                tipc_sk_rcv(net, &le->inputq);
 
index 39ef54c1f2ad41ad0d32b38d2b2bef1b5f0c4e82..898c229169841448c621397853f29ac9c96fa2df 100644 (file)
 enum {
        TIPC_BCAST_SYNCH      = (1 << 1),
        TIPC_BCAST_STATE_NACK = (1 << 2),
-       TIPC_BLOCK_FLOWCTL    = (1 << 3)
+       TIPC_BLOCK_FLOWCTL    = (1 << 3),
+       TIPC_BCAST_RCAST      = (1 << 4)
 };
 
 #define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \
                                TIPC_BCAST_STATE_NACK | \
+                               TIPC_BCAST_RCAST | \
                                TIPC_BLOCK_FLOWCTL)
 #define INVALID_BEARER_ID -1
 
index 800caaa699a1669f6efe228e51974e6e2bd19e60..6b09a778cc71faffb7672b79b2d32ab0c2f49240 100644 (file)
@@ -67,16 +67,19 @@ enum {
  * @max_pkt: maximum packet size "hint" used when building messages sent by port
  * @portid: unique port identity in TIPC socket hash table
  * @phdr: preformatted message header used when sending messages
+ * #cong_links: list of congested links
  * @publications: list of publications for port
+ * @blocking_link: address of the congested link we are currently sleeping on
  * @pub_count: total # of publications port has made during its lifetime
  * @probing_state:
  * @conn_timeout: the time we can wait for an unresponded setup request
  * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
- * @link_cong: non-zero if owner must sleep because of link congestion
+ * @cong_link_cnt: number of congested links
  * @sent_unacked: # messages sent by socket, and not yet acked by peer
  * @rcv_unacked: # messages read by user, but not yet acked back to peer
  * @peer: 'connected' peer for dgram/rdm
  * @node: hash table node
+ * @mc_method: cookie for use between socket and broadcast layer
  * @rcu: rcu struct for tipc_sock
  */
 struct tipc_sock {
@@ -87,13 +90,13 @@ struct tipc_sock {
        u32 max_pkt;
        u32 portid;
        struct tipc_msg phdr;
-       struct list_head sock_list;
+       struct list_head cong_links;
        struct list_head publications;
        u32 pub_count;
        uint conn_timeout;
        atomic_t dupl_rcvcnt;
        bool probe_unacked;
-       bool link_cong;
+       u16 cong_link_cnt;
        u16 snt_unacked;
        u16 snd_win;
        u16 peer_caps;
@@ -101,6 +104,7 @@ struct tipc_sock {
        u16 rcv_win;
        struct sockaddr_tipc peer;
        struct rhash_head node;
+       struct tipc_mc_method mc_method;
        struct rcu_head rcu;
 };
 
@@ -110,7 +114,6 @@ static void tipc_write_space(struct sock *sk);
 static void tipc_sock_destruct(struct sock *sk);
 static int tipc_release(struct socket *sock);
 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
-static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
 static void tipc_sk_timeout(unsigned long data);
 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
                           struct tipc_name_seq const *seq);
@@ -119,8 +122,7 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
 static int tipc_sk_insert(struct tipc_sock *tsk);
 static void tipc_sk_remove(struct tipc_sock *tsk);
-static int __tipc_send_stream(struct socket *sock, struct msghdr *m,
-                             size_t dsz);
+static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
 static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
 
 static const struct proto_ops packet_ops;
@@ -334,6 +336,49 @@ static int tipc_set_sk_state(struct sock *sk, int state)
        return res;
 }
 
+static int tipc_sk_sock_err(struct socket *sock, long *timeout)
+{
+       struct sock *sk = sock->sk;
+       int err = sock_error(sk);
+       int typ = sock->type;
+
+       if (err)
+               return err;
+       if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
+               if (sk->sk_state == TIPC_DISCONNECTING)
+                       return -EPIPE;
+               else if (!tipc_sk_connected(sk))
+                       return -ENOTCONN;
+       }
+       if (!*timeout)
+               return -EAGAIN;
+       if (signal_pending(current))
+               return sock_intr_errno(*timeout);
+
+       return 0;
+}
+
+#define tipc_wait_for_cond(sock_, timeout_, condition_)                        \
+({                                                                     \
+       int rc_ = 0;                                                    \
+       int done_ = 0;                                                  \
+                                                                       \
+       while (!(condition_) && !done_) {                               \
+               struct sock *sk_ = sock->sk;                            \
+               DEFINE_WAIT_FUNC(wait_, woken_wake_function);           \
+                                                                       \
+               rc_ = tipc_sk_sock_err(sock_, timeout_);                \
+               if (rc_)                                                \
+                       break;                                          \
+               prepare_to_wait(sk_sleep(sk_), &wait_,                  \
+                               TASK_INTERRUPTIBLE);                    \
+               done_ = sk_wait_event(sk_, timeout_,                    \
+                                     (condition_), &wait_);            \
+               remove_wait_queue(sk_sleep(sk_), &wait_);               \
+       }                                                               \
+       rc_;                                                            \
+})
+
 /**
  * tipc_sk_create - create a TIPC socket
  * @net: network namespace (must be default network)
@@ -382,10 +427,9 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        tsk = tipc_sk(sk);
        tsk->max_pkt = MAX_PKT_DEFAULT;
        INIT_LIST_HEAD(&tsk->publications);
+       INIT_LIST_HEAD(&tsk->cong_links);
        msg = &tsk->phdr;
        tn = net_generic(sock_net(sk), tipc_net_id);
-       tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
-                     NAMED_H_SIZE, 0);
 
        /* Finish initializing socket data structures */
        sock->ops = ops;
@@ -395,6 +439,13 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
                pr_warn("Socket create failed; port number exhausted\n");
                return -EINVAL;
        }
+
+       /* Ensure tsk is visible before we read own_addr. */
+       smp_mb();
+
+       tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
+                     NAMED_H_SIZE, 0);
+
        msg_set_origport(msg, tsk->portid);
        setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
        sk->sk_shutdown = 0;
@@ -432,9 +483,14 @@ static void __tipc_shutdown(struct socket *sock, int error)
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
        struct net *net = sock_net(sk);
+       long timeout = CONN_TIMEOUT_DEFAULT;
        u32 dnode = tsk_peer_node(tsk);
        struct sk_buff *skb;
 
+       /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
+       tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
+                                           !tsk_conn_cong(tsk)));
+
        /* Reject all unreceived messages, except on an active connection
         * (which disconnects locally & sends a 'FIN+' to peer).
         */
@@ -505,7 +561,8 @@ static int tipc_release(struct socket *sock)
 
        /* Reject any messages that accumulated in backlog queue */
        release_sock(sk);
-
+       u32_list_purge(&tsk->cong_links);
+       tsk->cong_link_cnt = 0;
        call_rcu(&tsk->rcu, tipc_sk_callback);
        sock->sk = NULL;
 
@@ -648,7 +705,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
 
        switch (sk->sk_state) {
        case TIPC_ESTABLISHED:
-               if (!tsk->link_cong && !tsk_conn_cong(tsk))
+               if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
                        mask |= POLLOUT;
                /* fall thru' */
        case TIPC_LISTEN:
@@ -657,7 +714,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
                        mask |= (POLLIN | POLLRDNORM);
                break;
        case TIPC_OPEN:
-               if (!tsk->link_cong)
+               if (!tsk->cong_link_cnt)
                        mask |= POLLOUT;
                if (tipc_sk_type_connectionless(sk) &&
                    (!skb_queue_empty(&sk->sk_receive_queue)))
@@ -676,63 +733,60 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
  * @sock: socket structure
  * @seq: destination address
  * @msg: message to send
- * @dsz: total length of message data
- * @timeo: timeout to wait for wakeup
+ * @dlen: length of data to send
+ * @timeout: timeout to wait for wakeup
  *
  * Called from function tipc_sendmsg(), which has done all sanity checks
  * Returns the number of bytes sent on success, or errno
  */
 static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
-                         struct msghdr *msg, size_t dsz, long timeo)
+                         struct msghdr *msg, size_t dlen, long timeout)
 {
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
+       struct tipc_msg *hdr = &tsk->phdr;
        struct net *net = sock_net(sk);
-       struct tipc_msg *mhdr = &tsk->phdr;
-       struct sk_buff_head pktchain;
-       struct iov_iter save = msg->msg_iter;
-       uint mtu;
+       int mtu = tipc_bcast_get_mtu(net);
+       struct tipc_mc_method *method = &tsk->mc_method;
+       u32 domain = addr_domain(net, TIPC_CLUSTER_SCOPE);
+       struct sk_buff_head pkts;
+       struct tipc_nlist dsts;
        int rc;
 
-       if (!timeo && tsk->link_cong)
-               return -ELINKCONG;
+       /* Block or return if any destination link is congested */
+       rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
+       if (unlikely(rc))
+               return rc;
 
-       msg_set_type(mhdr, TIPC_MCAST_MSG);
-       msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
-       msg_set_destport(mhdr, 0);
-       msg_set_destnode(mhdr, 0);
-       msg_set_nametype(mhdr, seq->type);
-       msg_set_namelower(mhdr, seq->lower);
-       msg_set_nameupper(mhdr, seq->upper);
-       msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
+       /* Lookup destination nodes */
+       tipc_nlist_init(&dsts, tipc_own_addr(net));
+       tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
+                                     seq->upper, domain, &dsts);
+       if (!dsts.local && !dsts.remote)
+               return -EHOSTUNREACH;
 
-       skb_queue_head_init(&pktchain);
+       /* Build message header */
+       msg_set_type(hdr, TIPC_MCAST_MSG);
+       msg_set_hdr_sz(hdr, MCAST_H_SIZE);
+       msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
+       msg_set_destport(hdr, 0);
+       msg_set_destnode(hdr, 0);
+       msg_set_nametype(hdr, seq->type);
+       msg_set_namelower(hdr, seq->lower);
+       msg_set_nameupper(hdr, seq->upper);
 
-new_mtu:
-       mtu = tipc_bcast_get_mtu(net);
-       rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &pktchain);
-       if (unlikely(rc < 0))
-               return rc;
+       /* Build message as chain of buffers */
+       skb_queue_head_init(&pkts);
+       rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
 
-       do {
-               rc = tipc_bcast_xmit(net, &pktchain);
-               if (likely(!rc))
-                       return dsz;
-
-               if (rc == -ELINKCONG) {
-                       tsk->link_cong = 1;
-                       rc = tipc_wait_for_sndmsg(sock, &timeo);
-                       if (!rc)
-                               continue;
-               }
-               __skb_queue_purge(&pktchain);
-               if (rc == -EMSGSIZE) {
-                       msg->msg_iter = save;
-                       goto new_mtu;
-               }
-               break;
-       } while (1);
-       return rc;
+       /* Send message if build was successful */
+       if (unlikely(rc == dlen))
+               rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
+                                    &tsk->cong_link_cnt);
+
+       tipc_nlist_purge(&dsts);
+
+       return rc ? rc : dlen;
 }
 
 /**
@@ -746,7 +800,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
                       struct sk_buff_head *inputq)
 {
        struct tipc_msg *msg;
-       struct tipc_plist dports;
+       struct list_head dports;
        u32 portid;
        u32 scope = TIPC_CLUSTER_SCOPE;
        struct sk_buff_head tmpq;
@@ -754,7 +808,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
        struct sk_buff *skb, *_skb;
 
        __skb_queue_head_init(&tmpq);
-       tipc_plist_init(&dports);
+       INIT_LIST_HEAD(&dports);
 
        skb = tipc_skb_peek(arrvq, &inputq->lock);
        for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
@@ -768,8 +822,8 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
                tipc_nametbl_mc_translate(net,
                                          msg_nametype(msg), msg_namelower(msg),
                                          msg_nameupper(msg), scope, &dports);
-               portid = tipc_plist_pop(&dports);
-               for (; portid; portid = tipc_plist_pop(&dports)) {
+               portid = u32_pop(&dports);
+               for (; portid; portid = u32_pop(&dports)) {
                        _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
                        if (_skb) {
                                msg_set_destport(buf_msg(_skb), portid);
@@ -830,31 +884,6 @@ exit:
        kfree_skb(skb);
 }
 
-static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
-{
-       DEFINE_WAIT_FUNC(wait, woken_wake_function);
-       struct sock *sk = sock->sk;
-       struct tipc_sock *tsk = tipc_sk(sk);
-       int done;
-
-       do {
-               int err = sock_error(sk);
-               if (err)
-                       return err;
-               if (sk->sk_shutdown & SEND_SHUTDOWN)
-                       return -EPIPE;
-               if (!*timeo_p)
-                       return -EAGAIN;
-               if (signal_pending(current))
-                       return sock_intr_errno(*timeo_p);
-
-               add_wait_queue(sk_sleep(sk), &wait);
-               done = sk_wait_event(sk, timeo_p, !tsk->link_cong, &wait);
-               remove_wait_queue(sk_sleep(sk), &wait);
-       } while (!done);
-       return 0;
-}
-
 /**
  * tipc_sendmsg - send message in connectionless manner
  * @sock: socket structure
@@ -881,35 +910,38 @@ static int tipc_sendmsg(struct socket *sock,
        return ret;
 }
 
-static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
+static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
 {
-       DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
        struct sock *sk = sock->sk;
-       struct tipc_sock *tsk = tipc_sk(sk);
        struct net *net = sock_net(sk);
-       struct tipc_msg *mhdr = &tsk->phdr;
-       u32 dnode, dport;
-       struct sk_buff_head pktchain;
-       bool is_connectionless = tipc_sk_type_connectionless(sk);
-       struct sk_buff *skb;
+       struct tipc_sock *tsk = tipc_sk(sk);
+       DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
+       long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+       struct list_head *clinks = &tsk->cong_links;
+       bool syn = !tipc_sk_type_connectionless(sk);
+       struct tipc_msg *hdr = &tsk->phdr;
        struct tipc_name_seq *seq;
-       struct iov_iter save;
-       u32 mtu;
-       long timeo;
-       int rc;
+       struct sk_buff_head pkts;
+       u32 type, inst, domain;
+       u32 dnode, dport;
+       int mtu, rc;
 
-       if (dsz > TIPC_MAX_USER_MSG_SIZE)
+       if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
                return -EMSGSIZE;
+
        if (unlikely(!dest)) {
-               if (is_connectionless && tsk->peer.family == AF_TIPC)
-                       dest = &tsk->peer;
-               else
+               dest = &tsk->peer;
+               if (!syn || dest->family != AF_TIPC)
                        return -EDESTADDRREQ;
-       } else if (unlikely(m->msg_namelen < sizeof(*dest)) ||
-                  dest->family != AF_TIPC) {
-               return -EINVAL;
        }
-       if (!is_connectionless) {
+
+       if (unlikely(m->msg_namelen < sizeof(*dest)))
+               return -EINVAL;
+
+       if (unlikely(dest->family != AF_TIPC))
+               return -EINVAL;
+
+       if (unlikely(syn)) {
                if (sk->sk_state == TIPC_LISTEN)
                        return -EPIPE;
                if (sk->sk_state != TIPC_OPEN)
@@ -921,102 +953,62 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz)
                        tsk->conn_instance = dest->addr.name.name.instance;
                }
        }
-       seq = &dest->addr.nameseq;
-       timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
 
-       if (dest->addrtype == TIPC_ADDR_MCAST) {
-               return tipc_sendmcast(sock, seq, m, dsz, timeo);
-       } else if (dest->addrtype == TIPC_ADDR_NAME) {
-               u32 type = dest->addr.name.name.type;
-               u32 inst = dest->addr.name.name.instance;
-               u32 domain = dest->addr.name.domain;
+       seq = &dest->addr.nameseq;
+       if (dest->addrtype == TIPC_ADDR_MCAST)
+               return tipc_sendmcast(sock, seq, m, dlen, timeout);
 
+       if (dest->addrtype == TIPC_ADDR_NAME) {
+               type = dest->addr.name.name.type;
+               inst = dest->addr.name.name.instance;
+               domain = dest->addr.name.domain;
                dnode = domain;
-               msg_set_type(mhdr, TIPC_NAMED_MSG);
-               msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
-               msg_set_nametype(mhdr, type);
-               msg_set_nameinst(mhdr, inst);
-               msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
+               msg_set_type(hdr, TIPC_NAMED_MSG);
+               msg_set_hdr_sz(hdr, NAMED_H_SIZE);
+               msg_set_nametype(hdr, type);
+               msg_set_nameinst(hdr, inst);
+               msg_set_lookup_scope(hdr, tipc_addr_scope(domain));
                dport = tipc_nametbl_translate(net, type, inst, &dnode);
-               msg_set_destnode(mhdr, dnode);
-               msg_set_destport(mhdr, dport);
+               msg_set_destnode(hdr, dnode);
+               msg_set_destport(hdr, dport);
                if (unlikely(!dport && !dnode))
                        return -EHOSTUNREACH;
+
        } else if (dest->addrtype == TIPC_ADDR_ID) {
                dnode = dest->addr.id.node;
-               msg_set_type(mhdr, TIPC_DIRECT_MSG);
-               msg_set_lookup_scope(mhdr, 0);
-               msg_set_destnode(mhdr, dnode);
-               msg_set_destport(mhdr, dest->addr.id.ref);
-               msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
+               msg_set_type(hdr, TIPC_DIRECT_MSG);
+               msg_set_lookup_scope(hdr, 0);
+               msg_set_destnode(hdr, dnode);
+               msg_set_destport(hdr, dest->addr.id.ref);
+               msg_set_hdr_sz(hdr, BASIC_H_SIZE);
        }
 
-       skb_queue_head_init(&pktchain);
-       save = m->msg_iter;
-new_mtu:
-       mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
-       rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &pktchain);
-       if (rc < 0)
+       /* Block or return if destination link is congested */
+       rc = tipc_wait_for_cond(sock, &timeout, !u32_find(clinks, dnode));
+       if (unlikely(rc))
                return rc;
 
-       do {
-               skb = skb_peek(&pktchain);
-               TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
-               rc = tipc_node_xmit(net, &pktchain, dnode, tsk->portid);
-               if (likely(!rc)) {
-                       if (!is_connectionless)
-                               tipc_set_sk_state(sk, TIPC_CONNECTING);
-                       return dsz;
-               }
-               if (rc == -ELINKCONG) {
-                       tsk->link_cong = 1;
-                       rc = tipc_wait_for_sndmsg(sock, &timeo);
-                       if (!rc)
-                               continue;
-               }
-               __skb_queue_purge(&pktchain);
-               if (rc == -EMSGSIZE) {
-                       m->msg_iter = save;
-                       goto new_mtu;
-               }
-               break;
-       } while (1);
-
-       return rc;
-}
+       skb_queue_head_init(&pkts);
+       mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
+       rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
+       if (unlikely(rc != dlen))
+               return rc;
 
-static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
-{
-       DEFINE_WAIT_FUNC(wait, woken_wake_function);
-       struct sock *sk = sock->sk;
-       struct tipc_sock *tsk = tipc_sk(sk);
-       int done;
+       rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
+       if (unlikely(rc == -ELINKCONG)) {
+               u32_push(clinks, dnode);
+               tsk->cong_link_cnt++;
+               rc = 0;
+       }
 
-       do {
-               int err = sock_error(sk);
-               if (err)
-                       return err;
-               if (sk->sk_state == TIPC_DISCONNECTING)
-                       return -EPIPE;
-               else if (!tipc_sk_connected(sk))
-                       return -ENOTCONN;
-               if (!*timeo_p)
-                       return -EAGAIN;
-               if (signal_pending(current))
-                       return sock_intr_errno(*timeo_p);
+       if (unlikely(syn && !rc))
+               tipc_set_sk_state(sk, TIPC_CONNECTING);
 
-               add_wait_queue(sk_sleep(sk), &wait);
-               done = sk_wait_event(sk, timeo_p,
-                                    (!tsk->link_cong &&
-                                     !tsk_conn_cong(tsk)) ||
-                                     !tipc_sk_connected(sk), &wait);
-               remove_wait_queue(sk_sleep(sk), &wait);
-       } while (!done);
-       return 0;
+       return rc ? rc : dlen;
 }
 
 /**
- * tipc_send_stream - send stream-oriented data
+ * tipc_sendstream - send stream-oriented data
  * @sock: socket structure
  * @m: data to send
  * @dsz: total length of data to be transmitted
@@ -1026,94 +1018,69 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
  * Returns the number of bytes sent on success (or partial success),
  * or errno if no data sent
  */
-static int tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
+static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
 {
        struct sock *sk = sock->sk;
        int ret;
 
        lock_sock(sk);
-       ret = __tipc_send_stream(sock, m, dsz);
+       ret = __tipc_sendstream(sock, m, dsz);
        release_sock(sk);
 
        return ret;
 }
 
-static int __tipc_send_stream(struct socket *sock, struct msghdr *m, size_t dsz)
+static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
 {
        struct sock *sk = sock->sk;
-       struct net *net = sock_net(sk);
-       struct tipc_sock *tsk = tipc_sk(sk);
-       struct tipc_msg *mhdr = &tsk->phdr;
-       struct sk_buff_head pktchain;
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
-       u32 portid = tsk->portid;
-       int rc = -EINVAL;
-       long timeo;
-       u32 dnode;
-       uint mtu, send, sent = 0;
-       struct iov_iter save;
-       int hlen = MIN_H_SIZE;
-
-       /* Handle implied connection establishment */
-       if (unlikely(dest)) {
-               rc = __tipc_sendmsg(sock, m, dsz);
-               hlen = msg_hdr_sz(mhdr);
-               if (dsz && (dsz == rc))
-                       tsk->snt_unacked = tsk_inc(tsk, dsz + hlen);
-               return rc;
-       }
-       if (dsz > (uint)INT_MAX)
-               return -EMSGSIZE;
-
-       if (unlikely(!tipc_sk_connected(sk))) {
-               if (sk->sk_state == TIPC_DISCONNECTING)
-                       return -EPIPE;
-               else
-                       return -ENOTCONN;
-       }
+       long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+       struct tipc_sock *tsk = tipc_sk(sk);
+       struct tipc_msg *hdr = &tsk->phdr;
+       struct net *net = sock_net(sk);
+       struct sk_buff_head pkts;
+       u32 dnode = tsk_peer_node(tsk);
+       int send, sent = 0;
+       int rc = 0;
 
-       timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
-       if (!timeo && tsk->link_cong)
-               return -ELINKCONG;
+       skb_queue_head_init(&pkts);
 
-       dnode = tsk_peer_node(tsk);
-       skb_queue_head_init(&pktchain);
+       if (unlikely(dlen > INT_MAX))
+               return -EMSGSIZE;
 
-next:
-       save = m->msg_iter;
-       mtu = tsk->max_pkt;
-       send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
-       rc = tipc_msg_build(mhdr, m, sent, send, mtu, &pktchain);
-       if (unlikely(rc < 0))
+       /* Handle implicit connection setup */
+       if (unlikely(dest)) {
+               rc = __tipc_sendmsg(sock, m, dlen);
+               if (dlen && (dlen == rc))
+                       tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
                return rc;
+       }
 
        do {
-               if (likely(!tsk_conn_cong(tsk))) {
-                       rc = tipc_node_xmit(net, &pktchain, dnode, portid);
-                       if (likely(!rc)) {
-                               tsk->snt_unacked += tsk_inc(tsk, send + hlen);
-                               sent += send;
-                               if (sent == dsz)
-                                       return dsz;
-                               goto next;
-                       }
-                       if (rc == -EMSGSIZE) {
-                               __skb_queue_purge(&pktchain);
-                               tsk->max_pkt = tipc_node_get_mtu(net, dnode,
-                                                                portid);
-                               m->msg_iter = save;
-                               goto next;
-                       }
-                       if (rc != -ELINKCONG)
-                               break;
+               rc = tipc_wait_for_cond(sock, &timeout,
+                                       (!tsk->cong_link_cnt &&
+                                        !tsk_conn_cong(tsk) &&
+                                        tipc_sk_connected(sk)));
+               if (unlikely(rc))
+                       break;
 
-                       tsk->link_cong = 1;
+               send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
+               rc = tipc_msg_build(hdr, m, sent, send, tsk->max_pkt, &pkts);
+               if (unlikely(rc != send))
+                       break;
+
+               rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
+               if (unlikely(rc == -ELINKCONG)) {
+                       tsk->cong_link_cnt = 1;
+                       rc = 0;
                }
-               rc = tipc_wait_for_sndpkt(sock, &timeo);
-       } while (!rc);
+               if (likely(!rc)) {
+                       tsk->snt_unacked += tsk_inc(tsk, send + MIN_H_SIZE);
+                       sent += send;
+               }
+       } while (sent < dlen && !rc);
 
-       __skb_queue_purge(&pktchain);
-       return sent ? sent : rc;
+       return rc ? rc : sent;
 }
 
 /**
@@ -1131,7 +1098,7 @@ static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
        if (dsz > TIPC_MAX_USER_MSG_SIZE)
                return -EMSGSIZE;
 
-       return tipc_send_stream(sock, m, dsz);
+       return tipc_sendstream(sock, m, dsz);
 }
 
 /* tipc_sk_finish_conn - complete the setup of a connection
@@ -1698,6 +1665,7 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
        unsigned int limit = rcvbuf_limit(sk, skb);
        int err = TIPC_OK;
        int usr = msg_user(hdr);
+       u32 onode;
 
        if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
                tipc_sk_proto_rcv(tsk, skb, xmitq);
@@ -1705,8 +1673,10 @@ static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
        }
 
        if (unlikely(usr == SOCK_WAKEUP)) {
+               onode = msg_orignode(hdr);
                kfree_skb(skb);
-               tsk->link_cong = 0;
+               u32_del(&tsk->cong_links, onode);
+               tsk->cong_link_cnt--;
                sk->sk_write_space(sk);
                return false;
        }
@@ -2114,7 +2084,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
                struct msghdr m = {NULL,};
 
                tsk_advance_rx_queue(sk);
-               __tipc_send_stream(new_sock, &m, 0);
+               __tipc_sendstream(new_sock, &m, 0);
        } else {
                __skb_dequeue(&sk->sk_receive_queue);
                __skb_queue_head(&new_sk->sk_receive_queue, buf);
@@ -2269,24 +2239,27 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
 void tipc_sk_reinit(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
-       const struct bucket_table *tbl;
-       struct rhash_head *pos;
+       struct rhashtable_iter iter;
        struct tipc_sock *tsk;
        struct tipc_msg *msg;
-       int i;
 
-       rcu_read_lock();
-       tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
-       for (i = 0; i < tbl->size; i++) {
-               rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
+       rhashtable_walk_enter(&tn->sk_rht, &iter);
+
+       do {
+               tsk = ERR_PTR(rhashtable_walk_start(&iter));
+               if (tsk)
+                       continue;
+
+               while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
                        spin_lock_bh(&tsk->sk.sk_lock.slock);
                        msg = &tsk->phdr;
                        msg_set_prevnode(msg, tn->own_addr);
                        msg_set_orignode(msg, tn->own_addr);
                        spin_unlock_bh(&tsk->sk.sk_lock.slock);
                }
-       }
-       rcu_read_unlock();
+
+               rhashtable_walk_stop(&iter);
+       } while (tsk == ERR_PTR(-EAGAIN));
 }
 
 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
@@ -2382,18 +2355,29 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
 {
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
-       u32 value;
-       int res;
+       u32 value = 0;
+       int res = 0;
 
        if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
                return 0;
        if (lvl != SOL_TIPC)
                return -ENOPROTOOPT;
-       if (ol < sizeof(value))
-               return -EINVAL;
-       res = get_user(value, (u32 __user *)ov);
-       if (res)
-               return res;
+
+       switch (opt) {
+       case TIPC_IMPORTANCE:
+       case TIPC_SRC_DROPPABLE:
+       case TIPC_DEST_DROPPABLE:
+       case TIPC_CONN_TIMEOUT:
+               if (ol < sizeof(value))
+                       return -EINVAL;
+               res = get_user(value, (u32 __user *)ov);
+               if (res)
+                       return res;
+               break;
+       default:
+               if (ov || ol)
+                       return -EINVAL;
+       }
 
        lock_sock(sk);
 
@@ -2412,7 +2396,14 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
                break;
        case TIPC_CONN_TIMEOUT:
                tipc_sk(sk)->conn_timeout = value;
-               /* no need to set "res", since already 0 at this point */
+               break;
+       case TIPC_MCAST_BROADCAST:
+               tsk->mc_method.rcast = false;
+               tsk->mc_method.mandatory = true;
+               break;
+       case TIPC_MCAST_REPLICAST:
+               tsk->mc_method.rcast = true;
+               tsk->mc_method.mandatory = true;
                break;
        default:
                res = -EINVAL;
@@ -2575,7 +2566,7 @@ static const struct proto_ops stream_ops = {
        .shutdown       = tipc_shutdown,
        .setsockopt     = tipc_setsockopt,
        .getsockopt     = tipc_getsockopt,
-       .sendmsg        = tipc_send_stream,
+       .sendmsg        = tipc_sendstream,
        .recvmsg        = tipc_recv_stream,
        .mmap           = sock_no_mmap,
        .sendpage       = sock_no_sendpage
index b58dc95f3d3535a305e529c75282360ea731f82d..46061cf48cd13506a12cb9b8fb53f64d9a56aa06 100644 (file)
@@ -113,7 +113,7 @@ static void tipc_udp_media_addr_set(struct tipc_media_addr *addr,
        memcpy(addr->value, ua, sizeof(struct udp_media_addr));
 
        if (tipc_udp_is_mcast_addr(ua))
-               addr->broadcast = 1;
+               addr->broadcast = TIPC_BROADCAST_SUPPORT;
 }
 
 /* tipc_udp_addr2str - convert ip/udp address to string */
@@ -229,7 +229,7 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
                goto out;
        }
 
-       if (!addr->broadcast || list_empty(&ub->rcast.list))
+       if (addr->broadcast != TIPC_REPLICAST_SUPPORT)
                return tipc_udp_xmit(net, skb, ub, src, dst);
 
        /* Replicast, send an skb to each configured IP address */
@@ -296,7 +296,7 @@ static int tipc_udp_rcast_add(struct tipc_bearer *b,
        else if (ntohs(addr->proto) == ETH_P_IPV6)
                pr_info("New replicast peer: %pI6\n", &rcast->addr.ipv6);
 #endif
-
+       b->bcast_addr.broadcast = TIPC_REPLICAST_SUPPORT;
        list_add_rcu(&rcast->list, &ub->rcast.list);
        return 0;
 }
@@ -681,7 +681,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
                goto err;
 
        b->bcast_addr.media_id = TIPC_MEDIA_TYPE_UDP;
-       b->bcast_addr.broadcast = 1;
+       b->bcast_addr.broadcast = TIPC_BROADCAST_SUPPORT;
        rcu_assign_pointer(b->media_ptr, ub);
        rcu_assign_pointer(ub->bearer, b);
        tipc_udp_media_addr_set(&b->addr, &local);
index cef79873b09d2051663fedf37dc52874b7f7c415..e2d18b9f910fd10050faf9571e8532c10dba98da 100644 (file)
 #include <net/checksum.h>
 #include <linux/security.h>
 #include <linux/freezer.h>
+#include <linux/file.h>
 
 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
 EXPORT_SYMBOL_GPL(unix_socket_table);
@@ -2592,6 +2593,43 @@ long unix_outq_len(struct sock *sk)
 }
 EXPORT_SYMBOL_GPL(unix_outq_len);
 
+static int unix_open_file(struct sock *sk)
+{
+       struct path path;
+       struct file *f;
+       int fd;
+
+       if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
+               return -EPERM;
+
+       unix_state_lock(sk);
+       path = unix_sk(sk)->path;
+       if (!path.dentry) {
+               unix_state_unlock(sk);
+               return -ENOENT;
+       }
+
+       path_get(&path);
+       unix_state_unlock(sk);
+
+       fd = get_unused_fd_flags(O_CLOEXEC);
+       if (fd < 0)
+               goto out;
+
+       f = dentry_open(&path, O_PATH, current_cred());
+       if (IS_ERR(f)) {
+               put_unused_fd(fd);
+               fd = PTR_ERR(f);
+               goto out;
+       }
+
+       fd_install(fd, f);
+out:
+       path_put(&path);
+
+       return fd;
+}
+
 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 {
        struct sock *sk = sock->sk;
@@ -2610,6 +2648,9 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                else
                        err = put_user(amount, (int __user *)arg);
                break;
+       case SIOCUNIXFILE:
+               err = unix_open_file(sk);
+               break;
        default:
                err = -ENOIOCTLCMD;
                break;
index 816c9331c8d2dbef32b99e0355bc68c0d15999ea..d06e5015751a4ea94dfa51fe437aa8c38f59ceaf 100644 (file)
@@ -11,6 +11,7 @@ obj-$(CONFIG_WEXT_PRIV) += wext-priv.o
 
 cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
 cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o trace.o ocb.o
+cfg80211-$(CONFIG_OF) += of.o
 cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
 cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
 cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
index 158c59ecf90a35a6af8ca353c7583e0bd0cda807..e55e05bc48053f5542696a4c2d53170dfd02577c 100644 (file)
@@ -626,7 +626,8 @@ int wiphy_register(struct wiphy *wiphy)
 
        if (WARN_ON((wiphy->interface_modes & BIT(NL80211_IFTYPE_NAN)) &&
                    (!rdev->ops->start_nan || !rdev->ops->stop_nan ||
-                    !rdev->ops->add_nan_func || !rdev->ops->del_nan_func)))
+                    !rdev->ops->add_nan_func || !rdev->ops->del_nan_func ||
+                    !(wiphy->nan_supported_bands & BIT(NL80211_BAND_2GHZ)))))
                return -EINVAL;
 
 #ifndef CONFIG_WIRELESS_WDS
@@ -1142,6 +1143,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                     wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr)
                        dev->priv_flags |= IFF_DONT_BRIDGE;
 
+               INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk);
+
                nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
                break;
        case NETDEV_GOING_DOWN:
@@ -1230,6 +1233,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
 #ifdef CONFIG_CFG80211_WEXT
                        kzfree(wdev->wext.keys);
 #endif
+                       flush_work(&wdev->disconnect_wk);
                }
                /*
                 * synchronise (so that we won't find this netdev
index af6e023020b14a09733b84560e50269317aa2537..58ca206982feafa7ddfe2c90d4483791e5057cf0 100644 (file)
@@ -228,6 +228,7 @@ struct cfg80211_event {
                        size_t resp_ie_len;
                        struct cfg80211_bss *bss;
                        int status; /* -1 = failed; 0..65535 = status code */
+                       enum nl80211_timeout_reason timeout_reason;
                } cr;
                struct {
                        const u8 *req_ie;
@@ -388,7 +389,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                               const u8 *req_ie, size_t req_ie_len,
                               const u8 *resp_ie, size_t resp_ie_len,
                               int status, bool wextev,
-                              struct cfg80211_bss *bss);
+                              struct cfg80211_bss *bss,
+                              enum nl80211_timeout_reason timeout_reason);
 void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
                             size_t ie_len, u16 reason, bool from_ap);
 int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
@@ -400,6 +402,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
                       const u8 *resp_ie, size_t resp_ie_len);
 int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
                              struct wireless_dev *wdev);
+void cfg80211_autodisconnect_wk(struct work_struct *work);
 
 /* SME implementation */
 void cfg80211_conn_work(struct work_struct *work);
@@ -430,6 +433,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
 void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
 void cfg80211_process_wdev_events(struct wireless_dev *wdev);
 
+bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
+                               u32 center_freq_khz, u32 bw_khz);
+
 /**
  * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable
  * @wiphy: the wiphy to validate against
index 5d453916a4179877ff2f7d0b45f908905b4e5d22..30fc6eb352bccdf70a9e8433af0aae82bf5157cd 100644 (file)
@@ -17,7 +17,7 @@
 static ssize_t name## _read(struct file *file, char __user *userbuf,   \
                            size_t count, loff_t *ppos)                 \
 {                                                                      \
-       struct wiphy *wiphy= file->private_data;                \
+       struct wiphy *wiphy = file->private_data;                       \
        char buf[buflen];                                               \
        int res;                                                        \
                                                                        \
@@ -29,14 +29,14 @@ static const struct file_operations name## _ops = {                 \
        .read = name## _read,                                           \
        .open = simple_open,                                            \
        .llseek = generic_file_llseek,                                  \
-};
+}
 
 DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d",
-                     wiphy->rts_threshold)
+                     wiphy->rts_threshold);
 DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d",
                      wiphy->frag_threshold);
 DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d",
-                     wiphy->retry_short)
+                     wiphy->retry_short);
 DEBUGFS_READONLY_FILE(long_retry_limit, 20, "%d",
                      wiphy->retry_long);
 
@@ -103,7 +103,7 @@ static const struct file_operations ht40allow_map_ops = {
 };
 
 #define DEBUGFS_ADD(name)                                              \
-       debugfs_create_file(#name, S_IRUGO, phyd, &rdev->wiphy, &name## _ops);
+       debugfs_create_file(#name, 0444, phyd, &rdev->wiphy, &name## _ops)
 
 void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev)
 {
index 4646cf5695b958fc461a6b884046bb944f9f13cf..22b3d999006559d7572287c2b8e6f4fad286766d 100644 (file)
@@ -48,7 +48,8 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
        /* update current_bss etc., consumes the bss reference */
        __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs,
                                  status_code,
-                                 status_code == WLAN_STATUS_SUCCESS, bss);
+                                 status_code == WLAN_STATUS_SUCCESS, bss,
+                                 NL80211_TIMEOUT_UNSPECIFIED);
 }
 EXPORT_SYMBOL(cfg80211_rx_assoc_resp);
 
@@ -345,6 +346,11 @@ int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
             !ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
                return 0;
 
+       if (ether_addr_equal(wdev->disconnect_bssid, bssid) ||
+           (wdev->current_bss &&
+            ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
+               wdev->conn_owner_nlportid = 0;
+
        return rdev_deauth(rdev, dev, &req);
 }
 
@@ -657,8 +663,25 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
                        return err;
        }
 
-       if (!ether_addr_equal(mgmt->sa, wdev_address(wdev)))
-               return -EINVAL;
+       if (!ether_addr_equal(mgmt->sa, wdev_address(wdev))) {
+               /* Allow random TA to be used with Public Action frames if the
+                * driver has indicated support for this. Otherwise, only allow
+                * the local address to be used.
+                */
+               if (!ieee80211_is_action(mgmt->frame_control) ||
+                   mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
+                       return -EINVAL;
+               if (!wdev->current_bss &&
+                   !wiphy_ext_feature_isset(
+                           &rdev->wiphy,
+                           NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA))
+                       return -EINVAL;
+               if (wdev->current_bss &&
+                   !wiphy_ext_feature_isset(
+                           &rdev->wiphy,
+                           NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED))
+                       return -EINVAL;
+       }
 
        /* Transmit the Action frame as requested by user space */
        return rdev_mgmt_tx(rdev, wdev, params, cookie);
index aee396b9f190bb4454844c7282fd4d3f5d85b5e7..d7f8be4e321a32eba3a615aa69a860c212511625 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright 2015-2016 Intel Deutschland GmbH
+ * Copyright 2015-2017 Intel Deutschland GmbH
  */
 
 #include <linux/if.h>
@@ -398,13 +398,18 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        },
        [NL80211_ATTR_MU_MIMO_FOLLOW_MAC_ADDR] = { .len = ETH_ALEN },
        [NL80211_ATTR_NAN_MASTER_PREF] = { .type = NLA_U8 },
-       [NL80211_ATTR_NAN_DUAL] = { .type = NLA_U8 },
+       [NL80211_ATTR_BANDS] = { .type = NLA_U32 },
        [NL80211_ATTR_NAN_FUNC] = { .type = NLA_NESTED },
        [NL80211_ATTR_FILS_KEK] = { .type = NLA_BINARY,
                                    .len = FILS_MAX_KEK_LEN },
        [NL80211_ATTR_FILS_NONCES] = { .len = 2 * FILS_NONCE_LEN },
        [NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED] = { .type = NLA_FLAG, },
        [NL80211_ATTR_BSSID] = { .len = ETH_ALEN },
+       [NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] = { .type = NLA_S8 },
+       [NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST] = {
+               .len = sizeof(struct nl80211_bss_select_rssi_adjust)
+       },
+       [NL80211_ATTR_TIMEOUT_REASON] = { .type = NLA_U32 },
 };
 
 /* policy for the key attributes */
@@ -1881,6 +1886,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
                        }
                }
 
+               if (nla_put_u32(msg, NL80211_ATTR_BANDS,
+                               rdev->wiphy.nan_supported_bands))
+                       goto nla_put_failure;
+
                /* done */
                state->split_start = 0;
                break;
@@ -3738,6 +3747,49 @@ static int nl80211_parse_beacon(struct nlattr *attrs[],
        return 0;
 }
 
+static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params,
+                                           const u8 *rates)
+{
+       int i;
+
+       if (!rates)
+               return;
+
+       for (i = 0; i < rates[1]; i++) {
+               if (rates[2 + i] == BSS_MEMBERSHIP_SELECTOR_HT_PHY)
+                       params->ht_required = true;
+               if (rates[2 + i] == BSS_MEMBERSHIP_SELECTOR_VHT_PHY)
+                       params->vht_required = true;
+       }
+}
+
+/*
+ * Since the nl80211 API didn't include, from the beginning, attributes about
+ * HT/VHT requirements/capabilities, we parse them out of the IEs for the
+ * benefit of drivers that rebuild IEs in the firmware.
+ */
+static void nl80211_calculate_ap_params(struct cfg80211_ap_settings *params)
+{
+       const struct cfg80211_beacon_data *bcn = &params->beacon;
+       size_t ies_len = bcn->beacon_ies_len;
+       const u8 *ies = bcn->beacon_ies;
+       const u8 *rates;
+       const u8 *cap;
+
+       rates = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies, ies_len);
+       nl80211_check_ap_rate_selectors(params, rates);
+
+       rates = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, ies, ies_len);
+       nl80211_check_ap_rate_selectors(params, rates);
+
+       cap = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
+       if (cap && cap[1] >= sizeof(*params->ht_cap))
+               params->ht_cap = (void *)(cap + 2);
+       cap = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, ies, ies_len);
+       if (cap && cap[1] >= sizeof(*params->vht_cap))
+               params->vht_cap = (void *)(cap + 2);
+}
+
 static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
                                   struct cfg80211_ap_settings *params)
 {
@@ -3966,6 +4018,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                        return PTR_ERR(params.acl);
        }
 
+       nl80211_calculate_ap_params(&params);
+
        wdev_lock(wdev);
        err = rdev_start_ap(rdev, dev, &params);
        if (!err) {
@@ -6791,13 +6845,10 @@ nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans,
 
                /*
                 * If scan plans are not specified,
-                * %NL80211_ATTR_SCHED_SCAN_INTERVAL must be specified. In this
+                * %NL80211_ATTR_SCHED_SCAN_INTERVAL will be specified. In this
                 * case one scan plan will be set with the specified scan
                 * interval and infinite number of iterations.
                 */
-               if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
-                       return -EINVAL;
-
                interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
                if (!interval)
                        return -EINVAL;
@@ -6866,7 +6917,7 @@ nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans,
 
 static struct cfg80211_sched_scan_request *
 nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
-                        struct nlattr **attrs)
+                        struct nlattr **attrs, int max_match_sets)
 {
        struct cfg80211_sched_scan_request *request;
        struct nlattr *attr;
@@ -6931,7 +6982,7 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
        if (!n_match_sets && default_match_rssi != NL80211_SCAN_RSSI_THOLD_OFF)
                n_match_sets = 1;
 
-       if (n_match_sets > wiphy->max_match_sets)
+       if (n_match_sets > max_match_sets)
                return ERR_PTR(-EINVAL);
 
        if (attrs[NL80211_ATTR_IE])
@@ -6969,6 +7020,12 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
        if (!n_plans || n_plans > wiphy->max_sched_scan_plans)
                return ERR_PTR(-EINVAL);
 
+       if (!wiphy_ext_feature_isset(
+                   wiphy, NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI) &&
+           (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] ||
+            attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]))
+               return ERR_PTR(-EINVAL);
+
        request = kzalloc(sizeof(*request)
                        + sizeof(*request->ssids) * n_ssids
                        + sizeof(*request->match_sets) * n_match_sets
@@ -7175,6 +7232,26 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
                request->delay =
                        nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]);
 
+       if (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]) {
+               request->relative_rssi = nla_get_s8(
+                       attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]);
+               request->relative_rssi_set = true;
+       }
+
+       if (request->relative_rssi_set &&
+           attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]) {
+               struct nl80211_bss_select_rssi_adjust *rssi_adjust;
+
+               rssi_adjust = nla_data(
+                       attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]);
+               request->rssi_adjust.band = rssi_adjust->band;
+               request->rssi_adjust.delta = rssi_adjust->delta;
+               if (!is_band_valid(wiphy, request->rssi_adjust.band)) {
+                       err = -EINVAL;
+                       goto out_free;
+               }
+       }
+
        err = nl80211_parse_sched_scan_plans(wiphy, n_plans, request, attrs);
        if (err)
                goto out_free;
@@ -7205,7 +7282,8 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
                return -EINPROGRESS;
 
        sched_scan_req = nl80211_parse_sched_scan(&rdev->wiphy, wdev,
-                                                 info->attrs);
+                                                 info->attrs,
+                                                 rdev->wiphy.max_match_sets);
 
        err = PTR_ERR_OR_ZERO(sched_scan_req);
        if (err)
@@ -8069,8 +8147,17 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
        err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
        if (!err) {
                wdev_lock(dev->ieee80211_ptr);
+
                err = cfg80211_mlme_assoc(rdev, dev, chan, bssid,
                                          ssid, ssid_len, &req);
+
+               if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
+                       dev->ieee80211_ptr->conn_owner_nlportid =
+                               info->snd_portid;
+                       memcpy(dev->ieee80211_ptr->disconnect_bssid,
+                              bssid, ETH_ALEN);
+               }
+
                wdev_unlock(dev->ieee80211_ptr);
        }
 
@@ -8549,6 +8636,12 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
                 * so we need to offset by 1.
                 */
                phy_idx = cb->args[0] - 1;
+
+               rdev = cfg80211_rdev_by_wiphy_idx(phy_idx);
+               if (!rdev) {
+                       err = -ENOENT;
+                       goto out_err;
+               }
        } else {
                struct nlattr **attrbuf = genl_family_attrbuf(&nl80211_fam);
 
@@ -8563,7 +8656,6 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
                        goto out_err;
                }
                phy_idx = rdev->wiphy_idx;
-               rdev = NULL;
 
                if (attrbuf[NL80211_ATTR_TESTDATA])
                        cb->args[1] = (long)attrbuf[NL80211_ATTR_TESTDATA];
@@ -8574,12 +8666,6 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
                data_len = nla_len((void *)cb->args[1]);
        }
 
-       rdev = cfg80211_rdev_by_wiphy_idx(phy_idx);
-       if (!rdev) {
-               err = -ENOENT;
-               goto out_err;
-       }
-
        if (!rdev->ops->testmode_dump) {
                err = -EOPNOTSUPP;
                goto out_err;
@@ -8789,11 +8875,24 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
        }
 
        wdev_lock(dev->ieee80211_ptr);
+
        err = cfg80211_connect(rdev, dev, &connect, connkeys,
                               connect.prev_bssid);
-       wdev_unlock(dev->ieee80211_ptr);
        if (err)
                kzfree(connkeys);
+
+       if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
+               dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid;
+               if (connect.bssid)
+                       memcpy(dev->ieee80211_ptr->disconnect_bssid,
+                              connect.bssid, ETH_ALEN);
+               else
+                       memset(dev->ieee80211_ptr->disconnect_bssid,
+                              0, ETH_ALEN);
+       }
+
+       wdev_unlock(dev->ieee80211_ptr);
+
        return err;
 }
 
@@ -9380,6 +9479,7 @@ nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] = {
        [NL80211_ATTR_CQM_TXE_RATE] = { .type = NLA_U32 },
        [NL80211_ATTR_CQM_TXE_PKTS] = { .type = NLA_U32 },
        [NL80211_ATTR_CQM_TXE_INTVL] = { .type = NLA_U32 },
+       [NL80211_ATTR_CQM_RSSI_LEVEL] = { .type = NLA_S32 },
 };
 
 static int nl80211_set_cqm_txe(struct genl_info *info,
@@ -9689,6 +9789,20 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg,
        if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay))
                return -ENOBUFS;
 
+       if (req->relative_rssi_set) {
+               struct nl80211_bss_select_rssi_adjust rssi_adjust;
+
+               if (nla_put_s8(msg, NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI,
+                              req->relative_rssi))
+                       return -ENOBUFS;
+
+               rssi_adjust.band = req->rssi_adjust.band;
+               rssi_adjust.delta = req->rssi_adjust.delta;
+               if (nla_put(msg, NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST,
+                           sizeof(rssi_adjust), &rssi_adjust))
+                       return -ENOBUFS;
+       }
+
        freqs = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
        if (!freqs)
                return -ENOBUFS;
@@ -9982,7 +10096,8 @@ static int nl80211_parse_wowlan_nd(struct cfg80211_registered_device *rdev,
        if (err)
                goto out;
 
-       trig->nd_config = nl80211_parse_sched_scan(&rdev->wiphy, NULL, tb);
+       trig->nd_config = nl80211_parse_sched_scan(&rdev->wiphy, NULL, tb,
+                                                  wowlan->max_nd_match_sets);
        err = PTR_ERR_OR_ZERO(trig->nd_config);
        if (err)
                trig->nd_config = NULL;
@@ -10667,15 +10782,22 @@ static int nl80211_start_nan(struct sk_buff *skb, struct genl_info *info)
        if (!info->attrs[NL80211_ATTR_NAN_MASTER_PREF])
                return -EINVAL;
 
-       if (!info->attrs[NL80211_ATTR_NAN_DUAL])
-               return -EINVAL;
-
        conf.master_pref =
                nla_get_u8(info->attrs[NL80211_ATTR_NAN_MASTER_PREF]);
        if (!conf.master_pref)
                return -EINVAL;
 
-       conf.dual = nla_get_u8(info->attrs[NL80211_ATTR_NAN_DUAL]);
+       if (info->attrs[NL80211_ATTR_BANDS]) {
+               u32 bands = nla_get_u32(info->attrs[NL80211_ATTR_BANDS]);
+
+               if (bands & ~(u32)wdev->wiphy->nan_supported_bands)
+                       return -EOPNOTSUPP;
+
+               if (bands && !(bands & BIT(NL80211_BAND_2GHZ)))
+                       return -EINVAL;
+
+               conf.bands = bands;
+       }
 
        err = rdev_start_nan(rdev, wdev, &conf);
        if (err)
@@ -11040,9 +11162,17 @@ static int nl80211_nan_change_config(struct sk_buff *skb,
                changed |= CFG80211_NAN_CONF_CHANGED_PREF;
        }
 
-       if (info->attrs[NL80211_ATTR_NAN_DUAL]) {
-               conf.dual = nla_get_u8(info->attrs[NL80211_ATTR_NAN_DUAL]);
-               changed |= CFG80211_NAN_CONF_CHANGED_DUAL;
+       if (info->attrs[NL80211_ATTR_BANDS]) {
+               u32 bands = nla_get_u32(info->attrs[NL80211_ATTR_BANDS]);
+
+               if (bands & ~(u32)wdev->wiphy->nan_supported_bands)
+                       return -EOPNOTSUPP;
+
+               if (bands && !(bands & BIT(NL80211_BAND_2GHZ)))
+                       return -EINVAL;
+
+               conf.bands = bands;
+               changed |= CFG80211_NAN_CONF_CHANGED_BANDS;
        }
 
        if (!changed)
@@ -11823,9 +11953,6 @@ static int nl80211_set_multicast_to_unicast(struct sk_buff *skb,
        const struct nlattr *nla;
        bool enabled;
 
-       if (netif_running(dev))
-               return -EBUSY;
-
        if (!rdev->ops->set_multicast_to_unicast)
                return -EOPNOTSUPP;
 
@@ -12826,7 +12953,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
        return -ENOBUFS;
 }
 
-static int nl80211_send_scan_msg(struct sk_buff *msg,
+static int nl80211_prep_scan_msg(struct sk_buff *msg,
                                 struct cfg80211_registered_device *rdev,
                                 struct wireless_dev *wdev,
                                 u32 portid, u32 seq, int flags,
@@ -12857,7 +12984,7 @@ static int nl80211_send_scan_msg(struct sk_buff *msg,
 }
 
 static int
-nl80211_send_sched_scan_msg(struct sk_buff *msg,
+nl80211_prep_sched_scan_msg(struct sk_buff *msg,
                            struct cfg80211_registered_device *rdev,
                            struct net_device *netdev,
                            u32 portid, u32 seq, int flags, u32 cmd)
@@ -12889,7 +13016,7 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
        if (!msg)
                return;
 
-       if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
+       if (nl80211_prep_scan_msg(msg, rdev, wdev, 0, 0, 0,
                                  NL80211_CMD_TRIGGER_SCAN) < 0) {
                nlmsg_free(msg);
                return;
@@ -12908,7 +13035,7 @@ struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
        if (!msg)
                return NULL;
 
-       if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
+       if (nl80211_prep_scan_msg(msg, rdev, wdev, 0, 0, 0,
                                  aborted ? NL80211_CMD_SCAN_ABORTED :
                                            NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
                nlmsg_free(msg);
@@ -12918,8 +13045,9 @@ struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
        return msg;
 }
 
-void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
-                             struct sk_buff *msg)
+/* send message created by nl80211_build_scan_msg() */
+void nl80211_send_scan_msg(struct cfg80211_registered_device *rdev,
+                          struct sk_buff *msg)
 {
        if (!msg)
                return;
@@ -12928,25 +13056,6 @@ void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
                                NL80211_MCGRP_SCAN, GFP_KERNEL);
 }
 
-void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
-                                    struct net_device *netdev)
-{
-       struct sk_buff *msg;
-
-       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-       if (!msg)
-               return;
-
-       if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0,
-                                       NL80211_CMD_SCHED_SCAN_RESULTS) < 0) {
-               nlmsg_free(msg);
-               return;
-       }
-
-       genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
-                               NL80211_MCGRP_SCAN, GFP_KERNEL);
-}
-
 void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
                             struct net_device *netdev, u32 cmd)
 {
@@ -12956,7 +13065,7 @@ void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
        if (!msg)
                return;
 
-       if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, cmd) < 0) {
+       if (nl80211_prep_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, cmd) < 0) {
                nlmsg_free(msg);
                return;
        }
@@ -13058,7 +13167,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
        struct sk_buff *msg;
        void *hdr;
 
-       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+       msg = nlmsg_new(100 + len, gfp);
        if (!msg)
                return;
 
@@ -13205,12 +13314,14 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
                                 struct net_device *netdev, const u8 *bssid,
                                 const u8 *req_ie, size_t req_ie_len,
                                 const u8 *resp_ie, size_t resp_ie_len,
-                                int status, gfp_t gfp)
+                                int status,
+                                enum nl80211_timeout_reason timeout_reason,
+                                gfp_t gfp)
 {
        struct sk_buff *msg;
        void *hdr;
 
-       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+       msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
        if (!msg)
                return;
 
@@ -13226,7 +13337,9 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
            nla_put_u16(msg, NL80211_ATTR_STATUS_CODE,
                        status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
                        status) ||
-           (status < 0 && nla_put_flag(msg, NL80211_ATTR_TIMED_OUT)) ||
+           (status < 0 &&
+            (nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
+             nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON, timeout_reason))) ||
            (req_ie &&
             nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
            (resp_ie &&
@@ -13252,7 +13365,7 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
        struct sk_buff *msg;
        void *hdr;
 
-       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+       msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
        if (!msg)
                return;
 
@@ -13289,7 +13402,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
        struct sk_buff *msg;
        void *hdr;
 
-       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       msg = nlmsg_new(100 + ie_len, GFP_KERNEL);
        if (!msg)
                return;
 
@@ -13365,7 +13478,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
 
        trace_cfg80211_notify_new_peer_candidate(dev, addr);
 
-       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+       msg = nlmsg_new(100 + ie_len, gfp);
        if (!msg)
                return;
 
@@ -13736,7 +13849,7 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
        struct sk_buff *msg;
        void *hdr;
 
-       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+       msg = nlmsg_new(100 + len, gfp);
        if (!msg)
                return -ENOMEM;
 
@@ -13780,7 +13893,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
 
        trace_cfg80211_mgmt_tx_status(wdev, cookie, ack);
 
-       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+       msg = nlmsg_new(100 + len, gfp);
        if (!msg)
                return;
 
@@ -13867,11 +13980,11 @@ static void cfg80211_send_cqm(struct sk_buff *msg, gfp_t gfp)
 
 void cfg80211_cqm_rssi_notify(struct net_device *dev,
                              enum nl80211_cqm_rssi_threshold_event rssi_event,
-                             gfp_t gfp)
+                             s32 rssi_level, gfp_t gfp)
 {
        struct sk_buff *msg;
 
-       trace_cfg80211_cqm_rssi_notify(dev, rssi_event);
+       trace_cfg80211_cqm_rssi_notify(dev, rssi_event, rssi_level);
 
        if (WARN_ON(rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW &&
                    rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH))
@@ -13885,6 +13998,10 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
                        rssi_event))
                goto nla_put_failure;
 
+       if (rssi_level && nla_put_s32(msg, NL80211_ATTR_CQM_RSSI_LEVEL,
+                                     rssi_level))
+               goto nla_put_failure;
+
        cfg80211_send_cqm(msg, gfp);
 
        return;
@@ -14535,6 +14652,8 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
 
                        if (wdev->owner_nlportid == notify->portid)
                                schedule_destroy_work = true;
+                       else if (wdev->conn_owner_nlportid == notify->portid)
+                               schedule_work(&wdev->disconnect_wk);
                }
 
                spin_lock_bh(&rdev->beacon_registrations_lock);
@@ -14589,7 +14708,7 @@ void cfg80211_ft_event(struct net_device *netdev,
        if (!ft_event->target_ap)
                return;
 
-       msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL);
        if (!msg)
                return;
 
index 7e3821d7fcc5eb185359d3b9c45bd3d526fd36c2..e488dca87423eb7c85fc80d408c3d45f91e9a97a 100644 (file)
@@ -14,12 +14,10 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
                             struct wireless_dev *wdev);
 struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
                                       struct wireless_dev *wdev, bool aborted);
-void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
-                             struct sk_buff *msg);
+void nl80211_send_scan_msg(struct cfg80211_registered_device *rdev,
+                          struct sk_buff *msg);
 void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
                             struct net_device *netdev, u32 cmd);
-void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
-                                    struct net_device *netdev);
 void nl80211_common_reg_change_event(enum nl80211_commands cmd_id,
                                     struct regulatory_request *request);
 
@@ -58,7 +56,9 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
                                 struct net_device *netdev, const u8 *bssid,
                                 const u8 *req_ie, size_t req_ie_len,
                                 const u8 *resp_ie, size_t resp_ie_len,
-                                int status, gfp_t gfp);
+                                int status,
+                                enum nl80211_timeout_reason timeout_reason,
+                                gfp_t gfp);
 void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
                         struct net_device *netdev, const u8 *bssid,
                         const u8 *req_ie, size_t req_ie_len,
diff --git a/net/wireless/of.c b/net/wireless/of.c
new file mode 100644 (file)
index 0000000..de221f0
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2017 RafaÅ‚ MiÅ‚ecki <rafal@milecki.pl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/of.h>
+#include <net/cfg80211.h>
+#include "core.h"
+
+static bool wiphy_freq_limits_valid_chan(struct wiphy *wiphy,
+                                        struct ieee80211_freq_range *freq_limits,
+                                        unsigned int n_freq_limits,
+                                        struct ieee80211_channel *chan)
+{
+       u32 bw = MHZ_TO_KHZ(20);
+       int i;
+
+       for (i = 0; i < n_freq_limits; i++) {
+               struct ieee80211_freq_range *limit = &freq_limits[i];
+
+               if (cfg80211_does_bw_fit_range(limit,
+                                              MHZ_TO_KHZ(chan->center_freq),
+                                              bw))
+                       return true;
+       }
+
+       return false;
+}
+
+static void wiphy_freq_limits_apply(struct wiphy *wiphy,
+                                   struct ieee80211_freq_range *freq_limits,
+                                   unsigned int n_freq_limits)
+{
+       enum nl80211_band band;
+       int i;
+
+       if (WARN_ON(!n_freq_limits))
+               return;
+
+       for (band = 0; band < NUM_NL80211_BANDS; band++) {
+               struct ieee80211_supported_band *sband = wiphy->bands[band];
+
+               if (!sband)
+                       continue;
+
+               for (i = 0; i < sband->n_channels; i++) {
+                       struct ieee80211_channel *chan = &sband->channels[i];
+
+                       if (chan->flags & IEEE80211_CHAN_DISABLED)
+                               continue;
+
+                       if (!wiphy_freq_limits_valid_chan(wiphy, freq_limits,
+                                                         n_freq_limits,
+                                                         chan)) {
+                               pr_debug("Disabling freq %d MHz as it's out of OF limits\n",
+                                        chan->center_freq);
+                               chan->flags |= IEEE80211_CHAN_DISABLED;
+                       }
+               }
+       }
+}
+
+void wiphy_read_of_freq_limits(struct wiphy *wiphy)
+{
+       struct device *dev = wiphy_dev(wiphy);
+       struct device_node *np;
+       struct property *prop;
+       struct ieee80211_freq_range *freq_limits;
+       unsigned int n_freq_limits;
+       const __be32 *p;
+       int len, i;
+       int err = 0;
+
+       if (!dev)
+               return;
+       np = dev_of_node(dev);
+       if (!np)
+               return;
+
+       prop = of_find_property(np, "ieee80211-freq-limit", &len);
+       if (!prop)
+               return;
+
+       if (!len || len % sizeof(u32) || len / sizeof(u32) % 2) {
+               dev_err(dev, "ieee80211-freq-limit wrong format");
+               return;
+       }
+       n_freq_limits = len / sizeof(u32) / 2;
+
+       freq_limits = kcalloc(n_freq_limits, sizeof(*freq_limits), GFP_KERNEL);
+       if (!freq_limits) {
+               err = -ENOMEM;
+               goto out_kfree;
+       }
+
+       p = NULL;
+       for (i = 0; i < n_freq_limits; i++) {
+               struct ieee80211_freq_range *limit = &freq_limits[i];
+
+               p = of_prop_next_u32(prop, p, &limit->start_freq_khz);
+               if (!p) {
+                       err = -EINVAL;
+                       goto out_kfree;
+               }
+
+               p = of_prop_next_u32(prop, p, &limit->end_freq_khz);
+               if (!p) {
+                       err = -EINVAL;
+                       goto out_kfree;
+               }
+
+               if (!limit->start_freq_khz ||
+                   !limit->end_freq_khz ||
+                   limit->start_freq_khz >= limit->end_freq_khz) {
+                       err = -EINVAL;
+                       goto out_kfree;
+               }
+       }
+
+       wiphy_freq_limits_apply(wiphy, freq_limits, n_freq_limits);
+
+out_kfree:
+       kfree(freq_limits);
+       if (err)
+               dev_err(dev, "Failed to get limits: %d\n", err);
+}
+EXPORT_SYMBOL(wiphy_read_of_freq_limits);
index 5dbac37497386fffb23de9bf5fe384891f874eca..753efcd51fa3495c66e7063d3ce6c1aca7fd9d14 100644 (file)
@@ -748,21 +748,6 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
        return true;
 }
 
-static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range,
-                           u32 center_freq_khz, u32 bw_khz)
-{
-       u32 start_freq_khz, end_freq_khz;
-
-       start_freq_khz = center_freq_khz - (bw_khz/2);
-       end_freq_khz = center_freq_khz + (bw_khz/2);
-
-       if (start_freq_khz >= freq_range->start_freq_khz &&
-           end_freq_khz <= freq_range->end_freq_khz)
-               return true;
-
-       return false;
-}
-
 /**
  * freq_in_rule_band - tells us if a frequency is in a frequency band
  * @freq_range: frequency rule we want to query
@@ -1070,7 +1055,7 @@ freq_reg_info_regd(u32 center_freq,
                if (!band_rule_found)
                        band_rule_found = freq_in_rule_band(fr, center_freq);
 
-               bw_fits = reg_does_bw_fit(fr, center_freq, bw);
+               bw_fits = cfg80211_does_bw_fit_range(fr, center_freq, bw);
 
                if (band_rule_found && bw_fits)
                        return rr;
@@ -1138,11 +1123,13 @@ static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd
                max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
        /* If we get a reg_rule we can assume that at least 5Mhz fit */
-       if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
-                            MHZ_TO_KHZ(10)))
+       if (!cfg80211_does_bw_fit_range(freq_range,
+                                       MHZ_TO_KHZ(chan->center_freq),
+                                       MHZ_TO_KHZ(10)))
                bw_flags |= IEEE80211_CHAN_NO_10MHZ;
-       if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
-                            MHZ_TO_KHZ(20)))
+       if (!cfg80211_does_bw_fit_range(freq_range,
+                                       MHZ_TO_KHZ(chan->center_freq),
+                                       MHZ_TO_KHZ(20)))
                bw_flags |= IEEE80211_CHAN_NO_20MHZ;
 
        if (max_bandwidth_khz < MHZ_TO_KHZ(10))
index 35ad69fd08383a2a8392170d2a580fc4549276d7..21be56b3128ee74c4119ee67d5f0b85fecfe0b2e 100644 (file)
@@ -227,7 +227,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
        ASSERT_RTNL();
 
        if (rdev->scan_msg) {
-               nl80211_send_scan_result(rdev, rdev->scan_msg);
+               nl80211_send_scan_msg(rdev, rdev->scan_msg);
                rdev->scan_msg = NULL;
                return;
        }
@@ -273,7 +273,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
        if (!send_message)
                rdev->scan_msg = msg;
        else
-               nl80211_send_scan_result(rdev, msg);
+               nl80211_send_scan_msg(rdev, msg);
 }
 
 void __cfg80211_scan_done(struct work_struct *wk)
@@ -321,7 +321,8 @@ void __cfg80211_sched_scan_results(struct work_struct *wk)
                        spin_unlock_bh(&rdev->bss_lock);
                        request->scan_start = jiffies;
                }
-               nl80211_send_sched_scan_results(rdev, request->dev);
+               nl80211_send_sched_scan(rdev, request->dev,
+                                       NL80211_CMD_SCHED_SCAN_RESULTS);
        }
 
        rtnl_unlock();
@@ -1147,7 +1148,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
        else
                rcu_assign_pointer(tmp.pub.beacon_ies, ies);
        rcu_assign_pointer(tmp.pub.ies, ies);
-       
+
        memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN);
        tmp.pub.channel = channel;
        tmp.pub.scan_width = data->scan_width;
index 5e0d19380302c73cae700cc43b2751a12979ff2a..b347e63d7aaa6814f524d3b080a005a88966d65d 100644 (file)
@@ -34,10 +34,11 @@ struct cfg80211_conn {
                CFG80211_CONN_SCAN_AGAIN,
                CFG80211_CONN_AUTHENTICATE_NEXT,
                CFG80211_CONN_AUTHENTICATING,
-               CFG80211_CONN_AUTH_FAILED,
+               CFG80211_CONN_AUTH_FAILED_TIMEOUT,
                CFG80211_CONN_ASSOCIATE_NEXT,
                CFG80211_CONN_ASSOCIATING,
                CFG80211_CONN_ASSOC_FAILED,
+               CFG80211_CONN_ASSOC_FAILED_TIMEOUT,
                CFG80211_CONN_DEAUTH,
                CFG80211_CONN_ABANDON,
                CFG80211_CONN_CONNECTED,
@@ -140,7 +141,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
        return err;
 }
 
-static int cfg80211_conn_do_work(struct wireless_dev *wdev)
+static int cfg80211_conn_do_work(struct wireless_dev *wdev,
+                                enum nl80211_timeout_reason *treason)
 {
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        struct cfg80211_connect_params *params;
@@ -171,7 +173,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
                                          NULL, 0,
                                          params->key, params->key_len,
                                          params->key_idx, NULL, 0);
-       case CFG80211_CONN_AUTH_FAILED:
+       case CFG80211_CONN_AUTH_FAILED_TIMEOUT:
+               *treason = NL80211_TIMEOUT_AUTH;
                return -ENOTCONN;
        case CFG80211_CONN_ASSOCIATE_NEXT:
                if (WARN_ON(!rdev->ops->assoc))
@@ -198,6 +201,9 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
                                             WLAN_REASON_DEAUTH_LEAVING,
                                             false);
                return err;
+       case CFG80211_CONN_ASSOC_FAILED_TIMEOUT:
+               *treason = NL80211_TIMEOUT_ASSOC;
+               /* fall through */
        case CFG80211_CONN_ASSOC_FAILED:
                cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
                                     NULL, 0,
@@ -223,6 +229,7 @@ void cfg80211_conn_work(struct work_struct *work)
                container_of(work, struct cfg80211_registered_device, conn_work);
        struct wireless_dev *wdev;
        u8 bssid_buf[ETH_ALEN], *bssid = NULL;
+       enum nl80211_timeout_reason treason;
 
        rtnl_lock();
 
@@ -244,10 +251,12 @@ void cfg80211_conn_work(struct work_struct *work)
                        memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN);
                        bssid = bssid_buf;
                }
-               if (cfg80211_conn_do_work(wdev)) {
+               treason = NL80211_TIMEOUT_UNSPECIFIED;
+               if (cfg80211_conn_do_work(wdev, &treason)) {
                        __cfg80211_connect_result(
                                        wdev->netdev, bssid,
-                                       NULL, 0, NULL, 0, -1, false, NULL);
+                                       NULL, 0, NULL, 0, -1, false, NULL,
+                                       treason);
                }
                wdev_unlock(wdev);
        }
@@ -352,7 +361,8 @@ void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len)
        } else if (status_code != WLAN_STATUS_SUCCESS) {
                __cfg80211_connect_result(wdev->netdev, mgmt->bssid,
                                          NULL, 0, NULL, 0,
-                                         status_code, false, NULL);
+                                         status_code, false, NULL,
+                                         NL80211_TIMEOUT_UNSPECIFIED);
        } else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) {
                wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT;
                schedule_work(&rdev->conn_work);
@@ -400,7 +410,7 @@ void cfg80211_sme_auth_timeout(struct wireless_dev *wdev)
        if (!wdev->conn)
                return;
 
-       wdev->conn->state = CFG80211_CONN_AUTH_FAILED;
+       wdev->conn->state = CFG80211_CONN_AUTH_FAILED_TIMEOUT;
        schedule_work(&rdev->conn_work);
 }
 
@@ -422,7 +432,7 @@ void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev)
        if (!wdev->conn)
                return;
 
-       wdev->conn->state = CFG80211_CONN_ASSOC_FAILED;
+       wdev->conn->state = CFG80211_CONN_ASSOC_FAILED_TIMEOUT;
        schedule_work(&rdev->conn_work);
 }
 
@@ -564,7 +574,9 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
 
        /* we're good if we have a matching bss struct */
        if (bss) {
-               err = cfg80211_conn_do_work(wdev);
+               enum nl80211_timeout_reason treason;
+
+               err = cfg80211_conn_do_work(wdev, &treason);
                cfg80211_put_bss(wdev->wiphy, bss);
        } else {
                /* otherwise we'll need to scan for the AP first */
@@ -661,7 +673,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                               const u8 *req_ie, size_t req_ie_len,
                               const u8 *resp_ie, size_t resp_ie_len,
                               int status, bool wextev,
-                              struct cfg80211_bss *bss)
+                              struct cfg80211_bss *bss,
+                              enum nl80211_timeout_reason timeout_reason)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        const u8 *country_ie;
@@ -680,7 +693,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
        nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev,
                                    bssid, req_ie, req_ie_len,
                                    resp_ie, resp_ie_len,
-                                   status, GFP_KERNEL);
+                                   status, timeout_reason, GFP_KERNEL);
 
 #ifdef CONFIG_CFG80211_WEXT
        if (wextev) {
@@ -727,6 +740,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                kzfree(wdev->connect_keys);
                wdev->connect_keys = NULL;
                wdev->ssid_len = 0;
+               wdev->conn_owner_nlportid = 0;
                if (bss) {
                        cfg80211_unhold_bss(bss_from_pub(bss));
                        cfg80211_put_bss(wdev->wiphy, bss);
@@ -770,7 +784,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
 void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
                          struct cfg80211_bss *bss, const u8 *req_ie,
                          size_t req_ie_len, const u8 *resp_ie,
-                         size_t resp_ie_len, int status, gfp_t gfp)
+                         size_t resp_ie_len, int status, gfp_t gfp,
+                         enum nl80211_timeout_reason timeout_reason)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
@@ -810,6 +825,7 @@ void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
                cfg80211_hold_bss(bss_from_pub(bss));
        ev->cr.bss = bss;
        ev->cr.status = status;
+       ev->cr.timeout_reason = timeout_reason;
 
        spin_lock_irqsave(&wdev->event_lock, flags);
        list_add_tail(&ev->list, &wdev->event_list);
@@ -955,6 +971,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
 
        wdev->current_bss = NULL;
        wdev->ssid_len = 0;
+       wdev->conn_owner_nlportid = 0;
 
        nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
 
@@ -1098,6 +1115,8 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
        kzfree(wdev->connect_keys);
        wdev->connect_keys = NULL;
 
+       wdev->conn_owner_nlportid = 0;
+
        if (wdev->conn)
                err = cfg80211_sme_disconnect(wdev, reason);
        else if (!rdev->ops->disconnect)
@@ -1107,3 +1126,32 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
 
        return err;
 }
+
+/*
+ * Used to clean up after the connection / connection attempt owner socket
+ * disconnects
+ */
+void cfg80211_autodisconnect_wk(struct work_struct *work)
+{
+       struct wireless_dev *wdev =
+               container_of(work, struct wireless_dev, disconnect_wk);
+       struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+
+       wdev_lock(wdev);
+
+       if (wdev->conn_owner_nlportid) {
+               /*
+                * Use disconnect_bssid if still connecting and ops->disconnect
+                * not implemented.  Otherwise we can use cfg80211_disconnect.
+                */
+               if (rdev->ops->disconnect || wdev->current_bss)
+                       cfg80211_disconnect(rdev, wdev->netdev,
+                                           WLAN_REASON_DEAUTH_LEAVING, true);
+               else
+                       cfg80211_mlme_deauth(rdev, wdev->netdev,
+                                            wdev->disconnect_bssid, NULL, 0,
+                                            WLAN_REASON_DEAUTH_LEAVING, false);
+       }
+
+       wdev_unlock(wdev);
+}
index 14b3f007826d91da6c5a71aee105b735eb9a2071..16b6b5988be969299c34a9881f258a300b366e2c 100644 (file)
@@ -39,9 +39,11 @@ SHOW_FMT(address_mask, "%pM", wiphy.addr_mask);
 
 static ssize_t name_show(struct device *dev,
                         struct device_attribute *attr,
-                        char *buf) {
+                        char *buf)
+{
        struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy;
-       return sprintf(buf, "%s\n", dev_name(&wiphy->dev));
+
+       return sprintf(buf, "%s\n", wiphy_name(wiphy));
 }
 static DEVICE_ATTR_RO(name);
 
index ea1b47e04fa474b601bd34a77d1e6cc05cf0e176..776e80cef9b4ee2761681f3884427a8343e6050a 100644 (file)
@@ -1915,18 +1915,18 @@ TRACE_EVENT(rdev_start_nan,
                WIPHY_ENTRY
                WDEV_ENTRY
                __field(u8, master_pref)
-               __field(u8, dual);
+               __field(u8, bands);
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
                WDEV_ASSIGN;
                __entry->master_pref = conf->master_pref;
-               __entry->dual = conf->dual;
+               __entry->bands = conf->bands;
        ),
        TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT
-                 ", master preference: %u, dual: %d",
+                 ", master preference: %u, bands: 0x%0x",
                  WIPHY_PR_ARG, WDEV_PR_ARG, __entry->master_pref,
-                 __entry->dual)
+                 __entry->bands)
 );
 
 TRACE_EVENT(rdev_nan_change_conf,
@@ -1937,20 +1937,20 @@ TRACE_EVENT(rdev_nan_change_conf,
                WIPHY_ENTRY
                WDEV_ENTRY
                __field(u8, master_pref)
-               __field(u8, dual);
+               __field(u8, bands);
                __field(u32, changes);
        ),
        TP_fast_assign(
                WIPHY_ASSIGN;
                WDEV_ASSIGN;
                __entry->master_pref = conf->master_pref;
-               __entry->dual = conf->dual;
+               __entry->bands = conf->bands;
                __entry->changes = changes;
        ),
        TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT
-                 ", master preference: %u, dual: %d, changes: %x",
+                 ", master preference: %u, bands: 0x%0x, changes: %x",
                  WIPHY_PR_ARG, WDEV_PR_ARG, __entry->master_pref,
-                 __entry->dual, __entry->changes)
+                 __entry->bands, __entry->changes)
 );
 
 DEFINE_EVENT(wiphy_wdev_evt, rdev_stop_nan,
@@ -2490,18 +2490,21 @@ TRACE_EVENT(cfg80211_mgmt_tx_status,
 
 TRACE_EVENT(cfg80211_cqm_rssi_notify,
        TP_PROTO(struct net_device *netdev,
-                enum nl80211_cqm_rssi_threshold_event rssi_event),
-       TP_ARGS(netdev, rssi_event),
+                enum nl80211_cqm_rssi_threshold_event rssi_event,
+                s32 rssi_level),
+       TP_ARGS(netdev, rssi_event, rssi_level),
        TP_STRUCT__entry(
                NETDEV_ENTRY
                __field(enum nl80211_cqm_rssi_threshold_event, rssi_event)
+               __field(s32, rssi_level)
        ),
        TP_fast_assign(
                NETDEV_ASSIGN;
                __entry->rssi_event = rssi_event;
+               __entry->rssi_level = rssi_level;
        ),
-       TP_printk(NETDEV_PR_FMT ", rssi event: %d",
-                 NETDEV_PR_ARG, __entry->rssi_event)
+       TP_printk(NETDEV_PR_FMT ", rssi event: %d, level: %d",
+                 NETDEV_PR_ARG, __entry->rssi_event, __entry->rssi_level)
 );
 
 TRACE_EVENT(cfg80211_reg_can_beacon,
index e9d040d29846f87517c0b6b40f0ff9dbeca63a05..68e5f2ecee1aa22f17ab9a55eb566124e585740b 100644 (file)
@@ -114,8 +114,7 @@ int ieee80211_frequency_to_channel(int freq)
 }
 EXPORT_SYMBOL(ieee80211_frequency_to_channel);
 
-struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
-                                                 int freq)
+struct ieee80211_channel *ieee80211_get_channel(struct wiphy *wiphy, int freq)
 {
        enum nl80211_band band;
        struct ieee80211_supported_band *sband;
@@ -135,14 +134,13 @@ struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
 
        return NULL;
 }
-EXPORT_SYMBOL(__ieee80211_get_channel);
+EXPORT_SYMBOL(ieee80211_get_channel);
 
-static void set_mandatory_flags_band(struct ieee80211_supported_band *sband,
-                                    enum nl80211_band band)
+static void set_mandatory_flags_band(struct ieee80211_supported_band *sband)
 {
        int i, want;
 
-       switch (band) {
+       switch (sband->band) {
        case NL80211_BAND_5GHZ:
                want = 3;
                for (i = 0; i < sband->n_bitrates; i++) {
@@ -192,6 +190,7 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband,
                WARN_ON((sband->ht_cap.mcs.rx_mask[0] & 0x1e) != 0x1e);
                break;
        case NUM_NL80211_BANDS:
+       default:
                WARN_ON(1);
                break;
        }
@@ -203,7 +202,7 @@ void ieee80211_set_bitrate_flags(struct wiphy *wiphy)
 
        for (band = 0; band < NUM_NL80211_BANDS; band++)
                if (wiphy->bands[band])
-                       set_mandatory_flags_band(wiphy->bands[band], band);
+                       set_mandatory_flags_band(wiphy->bands[band]);
 }
 
 bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher)
@@ -619,8 +618,6 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
 
                if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC))
                        return -ENOMEM;
-
-               skb->truesize += head_need;
        }
 
        if (encaps_data) {
@@ -952,7 +949,7 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev)
                                ev->cr.resp_ie, ev->cr.resp_ie_len,
                                ev->cr.status,
                                ev->cr.status == WLAN_STATUS_SUCCESS,
-                               ev->cr.bss);
+                               ev->cr.bss, ev->cr.timeout_reason);
                        break;
                case EVENT_ROAMED:
                        __cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie,
@@ -1848,6 +1845,21 @@ void cfg80211_free_nan_func(struct cfg80211_nan_func *f)
 }
 EXPORT_SYMBOL(cfg80211_free_nan_func);
 
+bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
+                               u32 center_freq_khz, u32 bw_khz)
+{
+       u32 start_freq_khz, end_freq_khz;
+
+       start_freq_khz = center_freq_khz - (bw_khz / 2);
+       end_freq_khz = center_freq_khz + (bw_khz / 2);
+
+       if (start_freq_khz >= freq_range->start_freq_khz &&
+           end_freq_khz <= freq_range->end_freq_khz)
+               return true;
+
+       return false;
+}
+
 /* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
 /* Ethernet-II snap header (RFC1042 for most EtherTypes) */
 const unsigned char rfc1042_header[] __aligned(2) =
index 6250b1cfcde58758bb480758d1c61217d37a7cd1..1a4db6790e2077d4ea922d1c32cde5352543fa2c 100644 (file)
@@ -1119,3 +1119,70 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
        return ret;
 }
 #endif
+
+char *iwe_stream_add_event(struct iw_request_info *info, char *stream,
+                          char *ends, struct iw_event *iwe, int event_len)
+{
+       int lcp_len = iwe_stream_lcp_len(info);
+
+       event_len = iwe_stream_event_len_adjust(info, event_len);
+
+       /* Check if it's possible */
+       if (likely((stream + event_len) < ends)) {
+               iwe->len = event_len;
+               /* Beware of alignement issues on 64 bits */
+               memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN);
+               memcpy(stream + lcp_len, &iwe->u,
+                      event_len - lcp_len);
+               stream += event_len;
+       }
+
+       return stream;
+}
+EXPORT_SYMBOL(iwe_stream_add_event);
+
+char *iwe_stream_add_point(struct iw_request_info *info, char *stream,
+                          char *ends, struct iw_event *iwe, char *extra)
+{
+       int event_len = iwe_stream_point_len(info) + iwe->u.data.length;
+       int point_len = iwe_stream_point_len(info);
+       int lcp_len   = iwe_stream_lcp_len(info);
+
+       /* Check if it's possible */
+       if (likely((stream + event_len) < ends)) {
+               iwe->len = event_len;
+               memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN);
+               memcpy(stream + lcp_len,
+                      ((char *) &iwe->u) + IW_EV_POINT_OFF,
+                      IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
+               if (iwe->u.data.length && extra)
+                       memcpy(stream + point_len, extra, iwe->u.data.length);
+               stream += event_len;
+       }
+
+       return stream;
+}
+EXPORT_SYMBOL(iwe_stream_add_point);
+
+char *iwe_stream_add_value(struct iw_request_info *info, char *event,
+                          char *value, char *ends, struct iw_event *iwe,
+                          int event_len)
+{
+       int lcp_len = iwe_stream_lcp_len(info);
+
+       /* Don't duplicate LCP */
+       event_len -= IW_EV_LCP_LEN;
+
+       /* Check if it's possible */
+       if (likely((value + event_len) < ends)) {
+               /* Add new value */
+               memcpy(value, &iwe->u, event_len);
+               value += event_len;
+               /* Patch LCP */
+               iwe->len = value - event;
+               memcpy(event, (char *) iwe, lcp_len);
+       }
+
+       return value;
+}
+EXPORT_SYMBOL(iwe_stream_add_value);
index 995163830a61f582c11b9234b370bb2c3766f7f0..c434f193f39aa6a1215c2791e25a891faed6b573 100644 (file)
@@ -105,30 +105,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
                        goto out;
        }
 
-
        wdev->wext.connect.channel = chan;
-
-       /*
-        * SSID is not set, we just want to switch monitor channel,
-        * this is really just backward compatibility, if the SSID
-        * is set then we use the channel to select the BSS to use
-        * to connect to instead. If we were connected on another
-        * channel we disconnected above and reconnect below.
-        */
-       if (chan && !wdev->wext.connect.ssid_len) {
-               struct cfg80211_chan_def chandef = {
-                       .width = NL80211_CHAN_WIDTH_20_NOHT,
-                       .center_freq1 = freq,
-               };
-
-               chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq);
-               if (chandef.chan)
-                       err = cfg80211_set_monitor_channel(rdev, &chandef);
-               else
-                       err = -EINVAL;
-               goto out;
-       }
-
        err = cfg80211_mgd_wext_connect(rdev, wdev);
  out:
        wdev_unlock(wdev);
index bda1a13628a8143b812554d0d29fd83cac3a1e36..286ed25c1a698ae9bb2b89b110d0469475a1e2de 100644 (file)
@@ -4,6 +4,11 @@
 config XFRM
        bool
        depends on NET
+       select GRO_CELLS
+
+config XFRM_OFFLOAD
+       bool
+       depends on XFRM
 
 config XFRM_ALGO
        tristate
index 6e3f0254d8a11bcc5075915abf953fcc21008aec..46bdb4fbed0bb34a5d6ae40991b3fda6e5dff82c 100644 (file)
 static struct kmem_cache *secpath_cachep __read_mostly;
 
 static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
-static struct xfrm_input_afinfo __rcu *xfrm_input_afinfo[NPROTO];
+static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[AF_INET6 + 1];
 
-int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo)
+static struct gro_cells gro_cells;
+static struct net_device xfrm_napi_dev;
+
+int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
 {
        int err = 0;
 
-       if (unlikely(afinfo == NULL))
-               return -EINVAL;
-       if (unlikely(afinfo->family >= NPROTO))
+       if (WARN_ON(afinfo->family >= ARRAY_SIZE(xfrm_input_afinfo)))
                return -EAFNOSUPPORT;
+
        spin_lock_bh(&xfrm_input_afinfo_lock);
        if (unlikely(xfrm_input_afinfo[afinfo->family] != NULL))
                err = -EEXIST;
@@ -39,14 +41,10 @@ int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo)
 }
 EXPORT_SYMBOL(xfrm_input_register_afinfo);
 
-int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo)
+int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo)
 {
        int err = 0;
 
-       if (unlikely(afinfo == NULL))
-               return -EINVAL;
-       if (unlikely(afinfo->family >= NPROTO))
-               return -EAFNOSUPPORT;
        spin_lock_bh(&xfrm_input_afinfo_lock);
        if (likely(xfrm_input_afinfo[afinfo->family] != NULL)) {
                if (unlikely(xfrm_input_afinfo[afinfo->family] != afinfo))
@@ -60,12 +58,13 @@ int xfrm_input_unregister_afinfo(struct xfrm_input_afinfo *afinfo)
 }
 EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
 
-static struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family)
+static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family)
 {
-       struct xfrm_input_afinfo *afinfo;
+       const struct xfrm_input_afinfo *afinfo;
 
-       if (unlikely(family >= NPROTO))
+       if (WARN_ON_ONCE(family >= ARRAY_SIZE(xfrm_input_afinfo)))
                return NULL;
+
        rcu_read_lock();
        afinfo = rcu_dereference(xfrm_input_afinfo[family]);
        if (unlikely(!afinfo))
@@ -73,22 +72,17 @@ static struct xfrm_input_afinfo *xfrm_input_get_afinfo(unsigned int family)
        return afinfo;
 }
 
-static void xfrm_input_put_afinfo(struct xfrm_input_afinfo *afinfo)
-{
-       rcu_read_unlock();
-}
-
 static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
                       int err)
 {
        int ret;
-       struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family);
+       const struct xfrm_input_afinfo *afinfo = xfrm_input_get_afinfo(family);
 
        if (!afinfo)
                return -EAFNOSUPPORT;
 
        ret = afinfo->callback(skb, protocol, err);
-       xfrm_input_put_afinfo(afinfo);
+       rcu_read_unlock();
 
        return ret;
 }
@@ -111,6 +105,8 @@ struct sec_path *secpath_dup(struct sec_path *src)
                return NULL;
 
        sp->len = 0;
+       sp->olen = 0;
+
        if (src) {
                int i;
 
@@ -123,6 +119,24 @@ struct sec_path *secpath_dup(struct sec_path *src)
 }
 EXPORT_SYMBOL(secpath_dup);
 
+int secpath_set(struct sk_buff *skb)
+{
+       struct sec_path *sp;
+
+       /* Allocate new secpath or COW existing one. */
+       if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
+               sp = secpath_dup(skb->sp);
+               if (!sp)
+                       return -ENOMEM;
+
+               if (skb->sp)
+                       secpath_put(skb->sp);
+               skb->sp = sp;
+       }
+       return 0;
+}
+EXPORT_SYMBOL(secpath_set);
+
 /* Fetch spi and seq from ipsec header */
 
 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
@@ -158,6 +172,7 @@ int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
        *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq);
        return 0;
 }
+EXPORT_SYMBOL(xfrm_parse_spi);
 
 int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
 {
@@ -192,14 +207,23 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
        unsigned int family;
        int decaps = 0;
        int async = 0;
+       struct xfrm_offload *xo;
+       bool xfrm_gro = false;
 
-       /* A negative encap_type indicates async resumption. */
        if (encap_type < 0) {
-               async = 1;
                x = xfrm_input_state(skb);
-               seq = XFRM_SKB_CB(skb)->seq.input.low;
                family = x->outer_mode->afinfo->family;
-               goto resume;
+
+               /* An encap_type of -1 indicates async resumption. */
+               if (encap_type == -1) {
+                       async = 1;
+                       seq = XFRM_SKB_CB(skb)->seq.input.low;
+                       goto resume;
+               }
+               /* encap_type < -1 indicates a GRO call. */
+               encap_type = 0;
+               seq = XFRM_SPI_SKB_CB(skb)->seq;
+               goto lock;
        }
 
        daddr = (xfrm_address_t *)(skb_network_header(skb) +
@@ -218,18 +242,10 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                break;
        }
 
-       /* Allocate new secpath or COW existing one. */
-       if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
-               struct sec_path *sp;
-
-               sp = secpath_dup(skb->sp);
-               if (!sp) {
-                       XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
-                       goto drop;
-               }
-               if (skb->sp)
-                       secpath_put(skb->sp);
-               skb->sp = sp;
+       err = secpath_set(skb);
+       if (err) {
+               XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
+               goto drop;
        }
 
        seq = 0;
@@ -253,6 +269,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
 
                skb->sp->xvec[skb->sp->len++] = x;
 
+lock:
                spin_lock(&x->lock);
 
                if (unlikely(x->km.state != XFRM_STATE_VALID)) {
@@ -371,10 +388,21 @@ resume:
 
        if (decaps) {
                skb_dst_drop(skb);
-               netif_rx(skb);
+               gro_cells_receive(&gro_cells, skb);
                return 0;
        } else {
-               return x->inner_mode->afinfo->transport_finish(skb, async);
+               xo = xfrm_offload(skb);
+               if (xo)
+                       xfrm_gro = xo->flags & XFRM_GRO;
+
+               err = x->inner_mode->afinfo->transport_finish(skb, async);
+               if (xfrm_gro) {
+                       skb_dst_drop(skb);
+                       gro_cells_receive(&gro_cells, skb);
+                       return err;
+               }
+
+               return err;
        }
 
 drop_unlock:
@@ -394,6 +422,13 @@ EXPORT_SYMBOL(xfrm_input_resume);
 
 void __init xfrm_input_init(void)
 {
+       int err;
+
+       init_dummy_netdev(&xfrm_napi_dev);
+       err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
+       if (err)
+               gro_cells.cells = NULL;
+
        secpath_cachep = kmem_cache_create("secpath_cache",
                                           sizeof(struct sec_path),
                                           0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
index 637387bbaaea33f62a1a970c9a361c895c4e5f2d..8ba29fe58352abcc887235d36d42a06797d94b28 100644 (file)
@@ -246,10 +246,8 @@ void xfrm_local_error(struct sk_buff *skb, int mtu)
                return;
 
        afinfo = xfrm_state_get_afinfo(proto);
-       if (!afinfo)
-               return;
-
-       afinfo->local_error(skb, mtu);
-       xfrm_state_put_afinfo(afinfo);
+       if (afinfo)
+               afinfo->local_error(skb, mtu);
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(xfrm_local_error);
index 177e208e8ff5091f0bc45e1bf75442a071d1cb33..5f3e87866438f5320aaa1030329ff5ab78a8ba13 100644 (file)
@@ -45,7 +45,7 @@ struct xfrm_flo {
 };
 
 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
-static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
+static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
                                                __read_mostly;
 
 static struct kmem_cache *xfrm_dst_cache __read_mostly;
@@ -103,11 +103,11 @@ bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl
        return false;
 }
 
-static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
+static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
 {
-       struct xfrm_policy_afinfo *afinfo;
+       const struct xfrm_policy_afinfo *afinfo;
 
-       if (unlikely(family >= NPROTO))
+       if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
                return NULL;
        rcu_read_lock();
        afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
@@ -116,18 +116,13 @@ static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
        return afinfo;
 }
 
-static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
-{
-       rcu_read_unlock();
-}
-
 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
                                                  int tos, int oif,
                                                  const xfrm_address_t *saddr,
                                                  const xfrm_address_t *daddr,
                                                  int family)
 {
-       struct xfrm_policy_afinfo *afinfo;
+       const struct xfrm_policy_afinfo *afinfo;
        struct dst_entry *dst;
 
        afinfo = xfrm_policy_get_afinfo(family);
@@ -136,7 +131,7 @@ static inline struct dst_entry *__xfrm_dst_lookup(struct net *net,
 
        dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr);
 
-       xfrm_policy_put_afinfo(afinfo);
+       rcu_read_unlock();
 
        return dst;
 }
@@ -330,7 +325,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
 }
 EXPORT_SYMBOL(xfrm_policy_destroy);
 
-/* Rule must be locked. Release descentant resources, announce
+/* Rule must be locked. Release descendant resources, announce
  * entry dead. The rule must be unlinked from lists to the moment.
  */
 
@@ -1431,12 +1426,12 @@ xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
               xfrm_address_t *remote, unsigned short family)
 {
        int err;
-       struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
+       const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
 
        if (unlikely(afinfo == NULL))
                return -EINVAL;
        err = afinfo->get_saddr(net, oif, local, remote);
-       xfrm_policy_put_afinfo(afinfo);
+       rcu_read_unlock();
        return err;
 }
 
@@ -1538,21 +1533,15 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
 
 }
 
-/* Check that the bundle accepts the flow and its components are
- * still valid.
- */
-
-static inline int xfrm_get_tos(const struct flowi *fl, int family)
+static int xfrm_get_tos(const struct flowi *fl, int family)
 {
-       struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
-       int tos;
-
-       if (!afinfo)
-               return -EINVAL;
+       const struct xfrm_policy_afinfo *afinfo;
+       int tos = 0;
 
-       tos = afinfo->get_tos(fl);
+       afinfo = xfrm_policy_get_afinfo(family);
+       tos = afinfo ? afinfo->get_tos(fl) : 0;
 
-       xfrm_policy_put_afinfo(afinfo);
+       rcu_read_unlock();
 
        return tos;
 }
@@ -1609,7 +1598,7 @@ static const struct flow_cache_ops xfrm_bundle_fc_ops = {
 
 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
 {
-       struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
+       const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
        struct dst_ops *dst_ops;
        struct xfrm_dst *xdst;
 
@@ -1638,7 +1627,7 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
        } else
                xdst = ERR_PTR(-ENOBUFS);
 
-       xfrm_policy_put_afinfo(afinfo);
+       rcu_read_unlock();
 
        return xdst;
 }
@@ -1646,7 +1635,7 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
                                 int nfheader_len)
 {
-       struct xfrm_policy_afinfo *afinfo =
+       const struct xfrm_policy_afinfo *afinfo =
                xfrm_policy_get_afinfo(dst->ops->family);
        int err;
 
@@ -1655,7 +1644,7 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
 
        err = afinfo->init_path(path, dst, nfheader_len);
 
-       xfrm_policy_put_afinfo(afinfo);
+       rcu_read_unlock();
 
        return err;
 }
@@ -1663,7 +1652,7 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
                                const struct flowi *fl)
 {
-       struct xfrm_policy_afinfo *afinfo =
+       const struct xfrm_policy_afinfo *afinfo =
                xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
        int err;
 
@@ -1672,7 +1661,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
 
        err = afinfo->fill_dst(xdst, dev, fl);
 
-       xfrm_policy_put_afinfo(afinfo);
+       rcu_read_unlock();
 
        return err;
 }
@@ -1705,9 +1694,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
        xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
 
        tos = xfrm_get_tos(fl, family);
-       err = tos;
-       if (tos < 0)
-               goto put_states;
 
        dst_hold(dst);
 
@@ -2215,7 +2201,7 @@ error:
 static struct dst_entry *make_blackhole(struct net *net, u16 family,
                                        struct dst_entry *dst_orig)
 {
-       struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
+       const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
        struct dst_entry *ret;
 
        if (!afinfo) {
@@ -2224,7 +2210,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family,
        } else {
                ret = afinfo->blackhole_route(net, dst_orig);
        }
-       xfrm_policy_put_afinfo(afinfo);
+       rcu_read_unlock();
 
        return ret;
 }
@@ -2466,7 +2452,7 @@ xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int star
 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
                          unsigned int family, int reverse)
 {
-       struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
+       const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
        int err;
 
        if (unlikely(afinfo == NULL))
@@ -2474,7 +2460,7 @@ int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
 
        afinfo->decode_session(skb, fl, reverse);
        err = security_xfrm_decode_session(skb, &fl->flowi_secid);
-       xfrm_policy_put_afinfo(afinfo);
+       rcu_read_unlock();
        return err;
 }
 EXPORT_SYMBOL(__xfrm_decode_session);
@@ -2742,10 +2728,11 @@ void xfrm_garbage_collect(struct net *net)
 }
 EXPORT_SYMBOL(xfrm_garbage_collect);
 
-static void xfrm_garbage_collect_deferred(struct net *net)
+void xfrm_garbage_collect_deferred(struct net *net)
 {
        flow_cache_flush_deferred(net);
 }
+EXPORT_SYMBOL(xfrm_garbage_collect_deferred);
 
 static void xfrm_init_pmtu(struct dst_entry *dst)
 {
@@ -2856,15 +2843,32 @@ static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
        return dst->path->ops->neigh_lookup(dst, skb, daddr);
 }
 
-int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
+static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
+{
+       const struct dst_entry *path = dst->path;
+
+       for (; dst != path; dst = dst->child) {
+               const struct xfrm_state *xfrm = dst->xfrm;
+
+               if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
+                       continue;
+               if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
+                       daddr = xfrm->coaddr;
+               else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
+                       daddr = &xfrm->id.daddr;
+       }
+       path->ops->confirm_neigh(path, daddr);
+}
+
+int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
 {
        int err = 0;
-       if (unlikely(afinfo == NULL))
-               return -EINVAL;
-       if (unlikely(afinfo->family >= NPROTO))
+
+       if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
                return -EAFNOSUPPORT;
+
        spin_lock(&xfrm_policy_afinfo_lock);
-       if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
+       if (unlikely(xfrm_policy_afinfo[family] != NULL))
                err = -EEXIST;
        else {
                struct dst_ops *dst_ops = afinfo->dst_ops;
@@ -2882,9 +2886,9 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                        dst_ops->link_failure = xfrm_link_failure;
                if (likely(dst_ops->neigh_lookup == NULL))
                        dst_ops->neigh_lookup = xfrm_neigh_lookup;
-               if (likely(afinfo->garbage_collect == NULL))
-                       afinfo->garbage_collect = xfrm_garbage_collect_deferred;
-               rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
+               if (likely(!dst_ops->confirm_neigh))
+                       dst_ops->confirm_neigh = xfrm_confirm_neigh;
+               rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
        }
        spin_unlock(&xfrm_policy_afinfo_lock);
 
@@ -2892,34 +2896,24 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
 }
 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
 
-int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
+void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
 {
-       int err = 0;
-       if (unlikely(afinfo == NULL))
-               return -EINVAL;
-       if (unlikely(afinfo->family >= NPROTO))
-               return -EAFNOSUPPORT;
-       spin_lock(&xfrm_policy_afinfo_lock);
-       if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
-               if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
-                       err = -EINVAL;
-               else
-                       RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family],
-                                        NULL);
+       struct dst_ops *dst_ops = afinfo->dst_ops;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
+               if (xfrm_policy_afinfo[i] != afinfo)
+                       continue;
+               RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
+               break;
        }
-       spin_unlock(&xfrm_policy_afinfo_lock);
-       if (!err) {
-               struct dst_ops *dst_ops = afinfo->dst_ops;
 
-               synchronize_rcu();
+       synchronize_rcu();
 
-               dst_ops->kmem_cachep = NULL;
-               dst_ops->check = NULL;
-               dst_ops->negative_advice = NULL;
-               dst_ops->link_failure = NULL;
-               afinfo->garbage_collect = NULL;
-       }
-       return err;
+       dst_ops->kmem_cachep = NULL;
+       dst_ops->check = NULL;
+       dst_ops->negative_advice = NULL;
+       dst_ops->link_failure = NULL;
 }
 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
 
index 64e3c82eedf6bbe0e6817d008fd661bdef8cca91..5a597dbbe564f09ad6b77e1f529127a0410c7fd4 100644 (file)
@@ -192,7 +192,7 @@ int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
        else
                err = -EEXIST;
        spin_unlock_bh(&xfrm_type_lock);
-       xfrm_state_put_afinfo(afinfo);
+       rcu_read_unlock();
        return err;
 }
 EXPORT_SYMBOL(xfrm_register_type);
@@ -213,7 +213,7 @@ int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
        else
                typemap[type->proto] = NULL;
        spin_unlock_bh(&xfrm_type_lock);
-       xfrm_state_put_afinfo(afinfo);
+       rcu_read_unlock();
        return err;
 }
 EXPORT_SYMBOL(xfrm_unregister_type);
@@ -231,17 +231,18 @@ retry:
                return NULL;
        typemap = afinfo->type_map;
 
-       type = typemap[proto];
+       type = READ_ONCE(typemap[proto]);
        if (unlikely(type && !try_module_get(type->owner)))
                type = NULL;
+
+       rcu_read_unlock();
+
        if (!type && !modload_attempted) {
-               xfrm_state_put_afinfo(afinfo);
                request_module("xfrm-type-%d-%d", family, proto);
                modload_attempted = 1;
                goto retry;
        }
 
-       xfrm_state_put_afinfo(afinfo);
        return type;
 }
 
@@ -280,7 +281,7 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
 
 out:
        spin_unlock_bh(&xfrm_mode_lock);
-       xfrm_state_put_afinfo(afinfo);
+       rcu_read_unlock();
        return err;
 }
 EXPORT_SYMBOL(xfrm_register_mode);
@@ -308,7 +309,7 @@ int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
        }
 
        spin_unlock_bh(&xfrm_mode_lock);
-       xfrm_state_put_afinfo(afinfo);
+       rcu_read_unlock();
        return err;
 }
 EXPORT_SYMBOL(xfrm_unregister_mode);
@@ -327,17 +328,17 @@ retry:
        if (unlikely(afinfo == NULL))
                return NULL;
 
-       mode = afinfo->mode_map[encap];
+       mode = READ_ONCE(afinfo->mode_map[encap]);
        if (unlikely(mode && !try_module_get(mode->owner)))
                mode = NULL;
+
+       rcu_read_unlock();
        if (!mode && !modload_attempted) {
-               xfrm_state_put_afinfo(afinfo);
                request_module("xfrm-mode-%d-%d", family, encap);
                modload_attempted = 1;
                goto retry;
        }
 
-       xfrm_state_put_afinfo(afinfo);
        return mode;
 }
 
@@ -409,7 +410,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
                        if (x->xflags & XFRM_SOFT_EXPIRE) {
                                /* enter hard expire without soft expire first?!
                                 * setting a new date could trigger this.
-                                * workarbound: fix x->curflt.add_time by below:
+                                * workaround: fix x->curflt.add_time by below:
                                 */
                                x->curlft.add_time = now - x->saved_tmo - 1;
                                tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
@@ -639,26 +640,25 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
 }
 EXPORT_SYMBOL(xfrm_sad_getinfo);
 
-static int
+static void
 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
                    const struct xfrm_tmpl *tmpl,
                    const xfrm_address_t *daddr, const xfrm_address_t *saddr,
                    unsigned short family)
 {
-       struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
+       struct xfrm_state_afinfo *afinfo = xfrm_state_afinfo_get_rcu(family);
+
        if (!afinfo)
-               return -1;
+               return;
+
        afinfo->init_tempsel(&x->sel, fl);
 
        if (family != tmpl->encap_family) {
-               xfrm_state_put_afinfo(afinfo);
-               afinfo = xfrm_state_get_afinfo(tmpl->encap_family);
+               afinfo = xfrm_state_afinfo_get_rcu(tmpl->encap_family);
                if (!afinfo)
-                       return -1;
+                       return;
        }
        afinfo->init_temprop(x, tmpl, daddr, saddr);
-       xfrm_state_put_afinfo(afinfo);
-       return 0;
 }
 
 static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
@@ -1474,7 +1474,7 @@ xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
        if (afinfo->tmpl_sort)
                err = afinfo->tmpl_sort(dst, src, n);
        spin_unlock_bh(&net->xfrm.xfrm_state_lock);
-       xfrm_state_put_afinfo(afinfo);
+       rcu_read_unlock();
        return err;
 }
 EXPORT_SYMBOL(xfrm_tmpl_sort);
@@ -1494,7 +1494,7 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
        if (afinfo->state_sort)
                err = afinfo->state_sort(dst, src, n);
        spin_unlock_bh(&net->xfrm.xfrm_state_lock);
-       xfrm_state_put_afinfo(afinfo);
+       rcu_read_unlock();
        return err;
 }
 EXPORT_SYMBOL(xfrm_state_sort);
@@ -1932,10 +1932,10 @@ EXPORT_SYMBOL(xfrm_unregister_km);
 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
 {
        int err = 0;
-       if (unlikely(afinfo == NULL))
-               return -EINVAL;
-       if (unlikely(afinfo->family >= NPROTO))
+
+       if (WARN_ON(afinfo->family >= NPROTO))
                return -EAFNOSUPPORT;
+
        spin_lock_bh(&xfrm_state_afinfo_lock);
        if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
                err = -EEXIST;
@@ -1948,14 +1948,14 @@ EXPORT_SYMBOL(xfrm_state_register_afinfo);
 
 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
 {
-       int err = 0;
-       if (unlikely(afinfo == NULL))
-               return -EINVAL;
-       if (unlikely(afinfo->family >= NPROTO))
+       int err = 0, family = afinfo->family;
+
+       if (WARN_ON(family >= NPROTO))
                return -EAFNOSUPPORT;
+
        spin_lock_bh(&xfrm_state_afinfo_lock);
        if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
-               if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
+               if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
                        err = -EINVAL;
                else
                        RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
@@ -1966,6 +1966,14 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
 }
 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
 
+struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
+{
+       if (unlikely(family >= NPROTO))
+               return NULL;
+
+       return rcu_dereference(xfrm_state_afinfo[family]);
+}
+
 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
 {
        struct xfrm_state_afinfo *afinfo;
@@ -1978,11 +1986,6 @@ struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
        return afinfo;
 }
 
-void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
-{
-       rcu_read_unlock();
-}
-
 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
 void xfrm_state_delete_tunnel(struct xfrm_state *x)
 {
@@ -2000,16 +2003,13 @@ EXPORT_SYMBOL(xfrm_state_delete_tunnel);
 
 int xfrm_state_mtu(struct xfrm_state *x, int mtu)
 {
-       int res;
+       const struct xfrm_type *type = READ_ONCE(x->type);
 
-       spin_lock_bh(&x->lock);
        if (x->km.state == XFRM_STATE_VALID &&
-           x->type && x->type->get_mtu)
-               res = x->type->get_mtu(x, mtu);
-       else
-               res = mtu - x->props.header_len;
-       spin_unlock_bh(&x->lock);
-       return res;
+           type && type->get_mtu)
+               return type->get_mtu(x, mtu);
+
+       return mtu - x->props.header_len;
 }
 
 int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
@@ -2028,7 +2028,7 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
        if (afinfo->init_flags)
                err = afinfo->init_flags(x);
 
-       xfrm_state_put_afinfo(afinfo);
+       rcu_read_unlock();
 
        if (err)
                goto error;
index 7ee1574c8ccff49ad7a2a301c4462f66e46d88da..a91872a97742a6413c316548c66ab8349ba1aff0 100644 (file)
@@ -57,6 +57,14 @@ struct bpf_map_def SEC("maps") percpu_hash_map_alloc = {
        .map_flags = BPF_F_NO_PREALLOC,
 };
 
+struct bpf_map_def SEC("maps") lpm_trie_map_alloc = {
+       .type = BPF_MAP_TYPE_LPM_TRIE,
+       .key_size = 8,
+       .value_size = sizeof(long),
+       .max_entries = 10000,
+       .map_flags = BPF_F_NO_PREALLOC,
+};
+
 SEC("kprobe/sys_getuid")
 int stress_hmap(struct pt_regs *ctx)
 {
@@ -135,5 +143,27 @@ int stress_percpu_lru_hmap_alloc(struct pt_regs *ctx)
        return 0;
 }
 
+SEC("kprobe/sys_gettid")
+int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
+{
+       union {
+               u32 b32[2];
+               u8 b8[8];
+       } key;
+       unsigned int i;
+
+       key.b32[0] = 32;
+       key.b8[4] = 192;
+       key.b8[5] = 168;
+       key.b8[6] = 0;
+       key.b8[7] = 1;
+
+#pragma clang loop unroll(full)
+       for (i = 0; i < 32; ++i)
+               bpf_map_lookup_elem(&lpm_trie_map_alloc, &key);
+
+       return 0;
+}
+
 char _license[] SEC("license") = "GPL";
 u32 _version SEC("version") = LINUX_VERSION_CODE;
index 9505b4d112f426790645ecd00c50263620ae68e6..680260a91f50c893dd26a1b968cc220a299c530f 100644 (file)
@@ -37,6 +37,7 @@ static __u64 time_get_ns(void)
 #define PERCPU_HASH_KMALLOC    (1 << 3)
 #define LRU_HASH_PREALLOC      (1 << 4)
 #define PERCPU_LRU_HASH_PREALLOC       (1 << 5)
+#define LPM_KMALLOC            (1 << 6)
 
 static int test_flags = ~0;
 
@@ -112,6 +113,18 @@ static void test_percpu_hash_kmalloc(int cpu)
               cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
 }
 
+static void test_lpm_kmalloc(int cpu)
+{
+       __u64 start_time;
+       int i;
+
+       start_time = time_get_ns();
+       for (i = 0; i < MAX_CNT; i++)
+               syscall(__NR_gettid);
+       printf("%d:lpm_perf kmalloc %lld events per sec\n",
+              cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+}
+
 static void loop(int cpu)
 {
        cpu_set_t cpuset;
@@ -137,6 +150,9 @@ static void loop(int cpu)
 
        if (test_flags & PERCPU_LRU_HASH_PREALLOC)
                test_percpu_lru_hash_prealloc(cpu);
+
+       if (test_flags & LPM_KMALLOC)
+               test_lpm_kmalloc(cpu);
 }
 
 static void run_perf_test(int tasks)
@@ -162,6 +178,37 @@ static void run_perf_test(int tasks)
        }
 }
 
+static void fill_lpm_trie(void)
+{
+       struct bpf_lpm_trie_key *key;
+       unsigned long value = 0;
+       unsigned int i;
+       int r;
+
+       key = alloca(sizeof(*key) + 4);
+       key->prefixlen = 32;
+
+       for (i = 0; i < 512; ++i) {
+               key->prefixlen = rand() % 33;
+               key->data[0] = rand() & 0xff;
+               key->data[1] = rand() & 0xff;
+               key->data[2] = rand() & 0xff;
+               key->data[3] = rand() & 0xff;
+               r = bpf_map_update_elem(map_fd[6], key, &value, 0);
+               assert(!r);
+       }
+
+       key->prefixlen = 32;
+       key->data[0] = 192;
+       key->data[1] = 168;
+       key->data[2] = 0;
+       key->data[3] = 1;
+       value = 128;
+
+       r = bpf_map_update_elem(map_fd[6], key, &value, 0);
+       assert(!r);
+}
+
 int main(int argc, char **argv)
 {
        struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
@@ -182,6 +229,8 @@ int main(int argc, char **argv)
                return 1;
        }
 
+       fill_lpm_trie();
+
        run_perf_test(num_cpu);
 
        return 0;
index d98550abe16d40250be4327197c48866953b5645..5eebce1af9a43f881bf0676bed54a525df68c91b 100644 (file)
@@ -4365,7 +4365,8 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
 
                        inet_get_local_port_range(sock_net(sk), &low, &high);
 
-                       if (snum < max(PROT_SOCK, low) || snum > high) {
+                       if (snum < max(inet_prot_sock(sock_net(sk)), low) ||
+                           snum > high) {
                                err = sel_netport_sid(sk->sk_protocol,
                                                      snum, &sid);
                                if (err)
index d2b0ac799d03c925a6eec2b49bdb14525331687c..0539a0ceef38155835552360667070552ebce641 100644 (file)
@@ -63,6 +63,12 @@ struct bpf_insn {
        __s32   imm;            /* signed immediate constant */
 };
 
+/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
+struct bpf_lpm_trie_key {
+       __u32   prefixlen;      /* up to 32 for AF_INET, 128 for AF_INET6 */
+       __u8    data[0];        /* Arbitrary size */
+};
+
 /* BPF syscall commands, see bpf(2) man-page for details. */
 enum bpf_cmd {
        BPF_MAP_CREATE,
@@ -89,6 +95,7 @@ enum bpf_map_type {
        BPF_MAP_TYPE_CGROUP_ARRAY,
        BPF_MAP_TYPE_LRU_HASH,
        BPF_MAP_TYPE_LRU_PERCPU_HASH,
+       BPF_MAP_TYPE_LPM_TRIE,
 };
 
 enum bpf_prog_type {
@@ -437,6 +444,18 @@ union bpf_attr {
  *     @xdp_md: pointer to xdp_md
  *     @delta: An positive/negative integer to be added to xdp_md.data
  *     Return: 0 on success or negative on error
+ *
+ * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+ *     Copy a NUL terminated string from unsafe address. In case the string
+ *     length is smaller than size, the target is not padded with further NUL
+ *     bytes. In case the string length is larger than size, just count-1
+ *     bytes are copied and the last byte is set to NUL.
+ *     @dst: destination address
+ *     @size: maximum number of bytes to copy, including the trailing NUL
+ *     @unsafe_ptr: unsafe address
+ *     Return:
+ *       > 0 length of the string including the trailing NUL on success
+ *       < 0 error
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -483,7 +502,8 @@ union bpf_attr {
        FN(set_hash_invalid),           \
        FN(get_numa_node_id),           \
        FN(skb_change_head),            \
-       FN(xdp_adjust_head),
+       FN(xdp_adjust_head),            \
+       FN(probe_read_str),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
@@ -509,6 +529,7 @@ enum bpf_func_id {
 /* BPF_FUNC_l4_csum_replace flags. */
 #define BPF_F_PSEUDO_HDR               (1ULL << 4)
 #define BPF_F_MARK_MANGLED_0           (1ULL << 5)
+#define BPF_F_MARK_ENFORCE             (1ULL << 6)
 
 /* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
 #define BPF_F_INGRESS                  (1ULL << 0)
index ae752fa4eaa741989040d982772740691486a1e3..d48b70ceb25a9b0f33ee12478c5fde78de001c40 100644 (file)
 # endif
 #endif
 
-static __u64 ptr_to_u64(void *ptr)
+static inline __u64 ptr_to_u64(const void *ptr)
 {
        return (__u64) (unsigned long) ptr;
 }
 
-static int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
-                  unsigned int size)
+static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
+                         unsigned int size)
 {
        return syscall(__NR_bpf, cmd, attr, size);
 }
@@ -69,8 +69,8 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size,
        return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
 }
 
-int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
-                    size_t insns_cnt, char *license,
+int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
+                    size_t insns_cnt, const char *license,
                     __u32 kern_version, char *log_buf, size_t log_buf_sz)
 {
        int fd;
@@ -98,7 +98,7 @@ int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
        return sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
 }
 
-int bpf_map_update_elem(int fd, void *key, void *value,
+int bpf_map_update_elem(int fd, const void *key, const void *value,
                        __u64 flags)
 {
        union bpf_attr attr;
@@ -112,7 +112,7 @@ int bpf_map_update_elem(int fd, void *key, void *value,
        return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
 }
 
-int bpf_map_lookup_elem(int fd, void *key, void *value)
+int bpf_map_lookup_elem(int fd, const void *key, void *value)
 {
        union bpf_attr attr;
 
@@ -124,7 +124,7 @@ int bpf_map_lookup_elem(int fd, void *key, void *value)
        return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
 }
 
-int bpf_map_delete_elem(int fd, void *key)
+int bpf_map_delete_elem(int fd, const void *key)
 {
        union bpf_attr attr;
 
@@ -135,7 +135,7 @@ int bpf_map_delete_elem(int fd, void *key)
        return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
 }
 
-int bpf_map_get_next_key(int fd, void *key, void *next_key)
+int bpf_map_get_next_key(int fd, const void *key, void *next_key)
 {
        union bpf_attr attr;
 
index 4ac6c4b841001b86a58ae5e9ad72cda60d8a5814..9f838aa6d26ee004451c7da8afa0ee847013e7ab 100644 (file)
@@ -28,17 +28,17 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
 
 /* Recommend log buffer size */
 #define BPF_LOG_BUF_SIZE 65536
-int bpf_load_program(enum bpf_prog_type type, struct bpf_insn *insns,
-                    size_t insns_cnt, char *license,
+int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
+                    size_t insns_cnt, const char *license,
                     __u32 kern_version, char *log_buf,
                     size_t log_buf_sz);
 
-int bpf_map_update_elem(int fd, void *key, void *value,
+int bpf_map_update_elem(int fd, const void *key, const void *value,
                        __u64 flags);
 
-int bpf_map_lookup_elem(int fd, void *key, void *value);
-int bpf_map_delete_elem(int fd, void *key);
-int bpf_map_get_next_key(int fd, void *key, void *next_key);
+int bpf_map_lookup_elem(int fd, const void *key, void *value);
+int bpf_map_delete_elem(int fd, const void *key);
+int bpf_map_get_next_key(int fd, const void *key, void *next_key);
 int bpf_obj_pin(int fd, const char *pathname);
 int bpf_obj_get(const char *pathname);
 int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type,
index 14a4f623c1a57353e49869974125eee71c0e9671..f2ea78021450a53390b6754a78d25851317a1d2a 100644 (file)
@@ -831,6 +831,7 @@ static void free_arg(struct print_arg *arg)
                free_flag_sym(arg->symbol.symbols);
                break;
        case PRINT_HEX:
+       case PRINT_HEX_STR:
                free_arg(arg->hex.field);
                free_arg(arg->hex.size);
                break;
@@ -2629,10 +2630,11 @@ out_free:
 }
 
 static enum event_type
-process_hex(struct event_format *event, struct print_arg *arg, char **tok)
+process_hex_common(struct event_format *event, struct print_arg *arg,
+                  char **tok, enum print_arg_type type)
 {
        memset(arg, 0, sizeof(*arg));
-       arg->type = PRINT_HEX;
+       arg->type = type;
 
        if (alloc_and_process_delim(event, ",", &arg->hex.field))
                goto out;
@@ -2650,6 +2652,19 @@ out:
        return EVENT_ERROR;
 }
 
+static enum event_type
+process_hex(struct event_format *event, struct print_arg *arg, char **tok)
+{
+       return process_hex_common(event, arg, tok, PRINT_HEX);
+}
+
+static enum event_type
+process_hex_str(struct event_format *event, struct print_arg *arg,
+               char **tok)
+{
+       return process_hex_common(event, arg, tok, PRINT_HEX_STR);
+}
+
 static enum event_type
 process_int_array(struct event_format *event, struct print_arg *arg, char **tok)
 {
@@ -3009,6 +3024,10 @@ process_function(struct event_format *event, struct print_arg *arg,
                free_token(token);
                return process_hex(event, arg, tok);
        }
+       if (strcmp(token, "__print_hex_str") == 0) {
+               free_token(token);
+               return process_hex_str(event, arg, tok);
+       }
        if (strcmp(token, "__print_array") == 0) {
                free_token(token);
                return process_int_array(event, arg, tok);
@@ -3547,6 +3566,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
        case PRINT_SYMBOL:
        case PRINT_INT_ARRAY:
        case PRINT_HEX:
+       case PRINT_HEX_STR:
                break;
        case PRINT_TYPE:
                val = eval_num_arg(data, size, event, arg->typecast.item);
@@ -3962,6 +3982,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                }
                break;
        case PRINT_HEX:
+       case PRINT_HEX_STR:
                if (arg->hex.field->type == PRINT_DYNAMIC_ARRAY) {
                        unsigned long offset;
                        offset = pevent_read_number(pevent,
@@ -3981,7 +4002,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
                }
                len = eval_num_arg(data, size, event, arg->hex.size);
                for (i = 0; i < len; i++) {
-                       if (i)
+                       if (i && arg->type == PRINT_HEX)
                                trace_seq_putc(s, ' ');
                        trace_seq_printf(s, "%02x", hex[i]);
                }
@@ -5727,6 +5748,13 @@ static void print_args(struct print_arg *args)
                print_args(args->hex.size);
                printf(")");
                break;
+       case PRINT_HEX_STR:
+               printf("__print_hex_str(");
+               print_args(args->hex.field);
+               printf(", ");
+               print_args(args->hex.size);
+               printf(")");
+               break;
        case PRINT_INT_ARRAY:
                printf("__print_array(");
                print_args(args->int_array.field);
index 7aae746ec2fee7a6d1e19de8dcb720fecb5a11ef..74cecba87daaa654504a7c66154b70490c23e8bd 100644 (file)
@@ -292,6 +292,7 @@ enum print_arg_type {
        PRINT_FUNC,
        PRINT_BITMASK,
        PRINT_DYNAMIC_ARRAY_LEN,
+       PRINT_HEX_STR,
 };
 
 struct print_arg {
index e55a132f69b73e23661bf1a6e21aaac36cbb85ad..e74adfbd6a2ed7296125b0a41a6ea33d3c036dea 100644 (file)
@@ -217,6 +217,7 @@ static void define_event_symbols(struct event_format *event,
                                       cur_field_name);
                break;
        case PRINT_HEX:
+       case PRINT_HEX_STR:
                define_event_symbols(event, ev_name, args->hex.field);
                define_event_symbols(event, ev_name, args->hex.size);
                break;
index 089438da1f7f76b2318dd2f5b10bc039441e2fc4..581e0efd6356839567a9ae5303a3fa6581b8ee3b 100644 (file)
@@ -236,6 +236,7 @@ static void define_event_symbols(struct event_format *event,
                              cur_field_name);
                break;
        case PRINT_HEX:
+       case PRINT_HEX_STR:
                define_event_symbols(event, ev_name, args->hex.field);
                define_event_symbols(event, ev_name, args->hex.size);
                break;
index 071431bedde8fc150cfd5a7c22715d12dfeb78a6..541d9d7fad5a86a200d5871e31c81cce538ff975 100644 (file)
@@ -1,3 +1,5 @@
 test_verifier
 test_maps
 test_lru_map
+test_lpm_map
+test_tag
index 7a5f24543a5f06fc92e2c0c139f6af7a05f9ea37..c7816fe60feb92b2e53ab7f1c1e2a560de34c866 100644 (file)
@@ -1,12 +1,25 @@
-CFLAGS += -Wall -O2 -I../../../../usr/include
+LIBDIR := ../../../lib
+BPFOBJ := $(LIBDIR)/bpf/bpf.o
 
-test_objs = test_verifier test_maps test_lru_map
+CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR)
 
-TEST_PROGS := test_verifier test_maps test_lru_map test_kmod.sh
+test_objs = test_verifier test_tag test_maps test_lru_map test_lpm_map
+
+TEST_PROGS := $(test_objs) test_kmod.sh
 TEST_FILES := $(test_objs)
 
+.PHONY: all clean force
+
 all: $(test_objs)
 
+# force a rebuild of BPFOBJ when its dependencies are updated
+force:
+
+$(BPFOBJ): force
+       $(MAKE) -C $(dir $(BPFOBJ))
+
+$(test_objs): $(BPFOBJ)
+
 include ../lib.mk
 
 clean:
diff --git a/tools/testing/selftests/bpf/bpf_sys.h b/tools/testing/selftests/bpf/bpf_sys.h
deleted file mode 100644 (file)
index 6b4565f..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-#ifndef __BPF_SYS__
-#define __BPF_SYS__
-
-#include <stdint.h>
-#include <stdlib.h>
-
-#include <sys/syscall.h>
-
-#include <linux/bpf.h>
-
-static inline __u64 bpf_ptr_to_u64(const void *ptr)
-{
-       return (__u64)(unsigned long) ptr;
-}
-
-static inline int bpf(int cmd, union bpf_attr *attr, unsigned int size)
-{
-#ifdef __NR_bpf
-       return syscall(__NR_bpf, cmd, attr, size);
-#else
-       fprintf(stderr, "No bpf syscall, kernel headers too old?\n");
-       errno = ENOSYS;
-       return -1;
-#endif
-}
-
-static inline int bpf_map_lookup(int fd, const void *key, void *value)
-{
-       union bpf_attr attr = {};
-
-       attr.map_fd = fd;
-       attr.key = bpf_ptr_to_u64(key);
-       attr.value = bpf_ptr_to_u64(value);
-
-       return bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
-}
-
-static inline int bpf_map_update(int fd, const void *key, const void *value,
-                                uint64_t flags)
-{
-       union bpf_attr attr = {};
-
-       attr.map_fd = fd;
-       attr.key = bpf_ptr_to_u64(key);
-       attr.value = bpf_ptr_to_u64(value);
-       attr.flags = flags;
-
-       return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
-}
-
-static inline int bpf_map_delete(int fd, const void *key)
-{
-       union bpf_attr attr = {};
-
-       attr.map_fd = fd;
-       attr.key = bpf_ptr_to_u64(key);
-
-       return bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
-}
-
-static inline int bpf_map_next_key(int fd, const void *key, void *next_key)
-{
-       union bpf_attr attr = {};
-
-       attr.map_fd = fd;
-       attr.key = bpf_ptr_to_u64(key);
-       attr.next_key = bpf_ptr_to_u64(next_key);
-
-       return bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
-}
-
-static inline int bpf_map_create(enum bpf_map_type type, uint32_t size_key,
-                                uint32_t size_value, uint32_t max_elem,
-                                uint32_t flags)
-{
-       union bpf_attr attr = {};
-
-       attr.map_type = type;
-       attr.key_size = size_key;
-       attr.value_size = size_value;
-       attr.max_entries = max_elem;
-       attr.map_flags = flags;
-
-       return bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
-}
-
-static inline int bpf_prog_load(enum bpf_prog_type type,
-                               const struct bpf_insn *insns, size_t size_insns,
-                               const char *license, char *log, size_t size_log)
-{
-       union bpf_attr attr = {};
-
-       attr.prog_type = type;
-       attr.insns = bpf_ptr_to_u64(insns);
-       attr.insn_cnt = size_insns / sizeof(struct bpf_insn);
-       attr.license = bpf_ptr_to_u64(license);
-
-       if (size_log > 0) {
-               attr.log_buf = bpf_ptr_to_u64(log);
-               attr.log_size = size_log;
-               attr.log_level = 1;
-               log[0] = 0;
-       }
-
-       return bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
-}
-
-#endif /* __BPF_SYS__ */
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
new file mode 100644 (file)
index 0000000..e975652
--- /dev/null
@@ -0,0 +1,358 @@
+/*
+ * Randomized tests for eBPF longest-prefix-match maps
+ *
+ * This program runs randomized tests against the lpm-bpf-map. It implements a
+ * "Trivial Longest Prefix Match" (tlpm) based on simple, linear, singly linked
+ * lists. The implementation should be pretty straightforward.
+ *
+ * Based on tlpm, this inserts randomized data into bpf-lpm-maps and verifies
+ * the trie-based bpf-map implementation behaves the same way as tlpm.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <linux/bpf.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+#include <bpf/bpf.h>
+#include "bpf_util.h"
+
+struct tlpm_node {
+       struct tlpm_node *next;
+       size_t n_bits;
+       uint8_t key[];
+};
+
+static struct tlpm_node *tlpm_add(struct tlpm_node *list,
+                                 const uint8_t *key,
+                                 size_t n_bits)
+{
+       struct tlpm_node *node;
+       size_t n;
+
+       /* add new entry with @key/@n_bits to @list and return new head */
+
+       n = (n_bits + 7) / 8;
+       node = malloc(sizeof(*node) + n);
+       assert(node);
+
+       node->next = list;
+       node->n_bits = n_bits;
+       memcpy(node->key, key, n);
+
+       return node;
+}
+
+static void tlpm_clear(struct tlpm_node *list)
+{
+       struct tlpm_node *node;
+
+       /* free all entries in @list */
+
+       while ((node = list)) {
+               list = list->next;
+               free(node);
+       }
+}
+
+static struct tlpm_node *tlpm_match(struct tlpm_node *list,
+                                   const uint8_t *key,
+                                   size_t n_bits)
+{
+       struct tlpm_node *best = NULL;
+       size_t i;
+
+       /* Perform longest prefix-match on @key/@n_bits. That is, iterate all
+        * entries and match each prefix against @key. Remember the "best"
+        * entry we find (i.e., the longest prefix that matches) and return it
+        * to the caller when done.
+        */
+
+       for ( ; list; list = list->next) {
+               for (i = 0; i < n_bits && i < list->n_bits; ++i) {
+                       if ((key[i / 8] & (1 << (7 - i % 8))) !=
+                           (list->key[i / 8] & (1 << (7 - i % 8))))
+                               break;
+               }
+
+               if (i >= list->n_bits) {
+                       if (!best || i > best->n_bits)
+                               best = list;
+               }
+       }
+
+       return best;
+}
+
+static void test_lpm_basic(void)
+{
+       struct tlpm_node *list = NULL, *t1, *t2;
+
+       /* very basic, static tests to verify tlpm works as expected */
+
+       assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 8));
+
+       t1 = list = tlpm_add(list, (uint8_t[]){ 0xff }, 8);
+       assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8));
+       assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16));
+       assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0x00 }, 16));
+       assert(!tlpm_match(list, (uint8_t[]){ 0x7f }, 8));
+       assert(!tlpm_match(list, (uint8_t[]){ 0xfe }, 8));
+       assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 7));
+
+       t2 = list = tlpm_add(list, (uint8_t[]){ 0xff, 0xff }, 16);
+       assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8));
+       assert(t2 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16));
+       assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 15));
+       assert(!tlpm_match(list, (uint8_t[]){ 0x7f, 0xff }, 16));
+
+       tlpm_clear(list);
+}
+
+static void test_lpm_order(void)
+{
+       struct tlpm_node *t1, *t2, *l1 = NULL, *l2 = NULL;
+       size_t i, j;
+
+       /* Verify the tlpm implementation works correctly regardless of the
+        * order of entries. Insert a random set of entries into @l1, and copy
+        * the same data in reverse order into @l2. Then verify a lookup of
+        * random keys will yield the same result in both sets.
+        */
+
+       for (i = 0; i < (1 << 12); ++i)
+               l1 = tlpm_add(l1, (uint8_t[]){
+                                       rand() % 0xff,
+                                       rand() % 0xff,
+                               }, rand() % 16 + 1);
+
+       for (t1 = l1; t1; t1 = t1->next)
+               l2 = tlpm_add(l2, t1->key, t1->n_bits);
+
+       for (i = 0; i < (1 << 8); ++i) {
+               uint8_t key[] = { rand() % 0xff, rand() % 0xff };
+
+               t1 = tlpm_match(l1, key, 16);
+               t2 = tlpm_match(l2, key, 16);
+
+               assert(!t1 == !t2);
+               if (t1) {
+                       assert(t1->n_bits == t2->n_bits);
+                       for (j = 0; j < t1->n_bits; ++j)
+                               assert((t1->key[j / 8] & (1 << (7 - j % 8))) ==
+                                      (t2->key[j / 8] & (1 << (7 - j % 8))));
+               }
+       }
+
+       tlpm_clear(l1);
+       tlpm_clear(l2);
+}
+
+static void test_lpm_map(int keysize)
+{
+       size_t i, j, n_matches, n_nodes, n_lookups;
+       struct tlpm_node *t, *list = NULL;
+       struct bpf_lpm_trie_key *key;
+       uint8_t *data, *value;
+       int r, map;
+
+       /* Compare behavior of tlpm vs. bpf-lpm. Create a randomized set of
+        * prefixes and insert it into both tlpm and bpf-lpm. Then run some
+        * randomized lookups and verify both maps return the same result.
+        */
+
+       n_matches = 0;
+       n_nodes = 1 << 8;
+       n_lookups = 1 << 16;
+
+       data = alloca(keysize);
+       memset(data, 0, keysize);
+
+       value = alloca(keysize + 1);
+       memset(value, 0, keysize + 1);
+
+       key = alloca(sizeof(*key) + keysize);
+       memset(key, 0, sizeof(*key) + keysize);
+
+       map = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
+                            sizeof(*key) + keysize,
+                            keysize + 1,
+                            4096,
+                            BPF_F_NO_PREALLOC);
+       assert(map >= 0);
+
+       for (i = 0; i < n_nodes; ++i) {
+               for (j = 0; j < keysize; ++j)
+                       value[j] = rand() & 0xff;
+               value[keysize] = rand() % (8 * keysize + 1);
+
+               list = tlpm_add(list, value, value[keysize]);
+
+               key->prefixlen = value[keysize];
+               memcpy(key->data, value, keysize);
+               r = bpf_map_update_elem(map, key, value, 0);
+               assert(!r);
+       }
+
+       for (i = 0; i < n_lookups; ++i) {
+               for (j = 0; j < keysize; ++j)
+                       data[j] = rand() & 0xff;
+
+               t = tlpm_match(list, data, 8 * keysize);
+
+               key->prefixlen = 8 * keysize;
+               memcpy(key->data, data, keysize);
+               r = bpf_map_lookup_elem(map, key, value);
+               assert(!r || errno == ENOENT);
+               assert(!t == !!r);
+
+               if (t) {
+                       ++n_matches;
+                       assert(t->n_bits == value[keysize]);
+                       for (j = 0; j < t->n_bits; ++j)
+                               assert((t->key[j / 8] & (1 << (7 - j % 8))) ==
+                                      (value[j / 8] & (1 << (7 - j % 8))));
+               }
+       }
+
+       close(map);
+       tlpm_clear(list);
+
+       /* With 255 random nodes in the map, we are pretty likely to match
+        * something on every lookup. For statistics, use this:
+        *
+        *     printf("  nodes: %zu\n"
+        *            "lookups: %zu\n"
+        *            "matches: %zu\n", n_nodes, n_lookups, n_matches);
+        */
+}
+
+/* Test the implementation with some 'real world' examples */
+
+static void test_lpm_ipaddr(void)
+{
+       struct bpf_lpm_trie_key *key_ipv4;
+       struct bpf_lpm_trie_key *key_ipv6;
+       size_t key_size_ipv4;
+       size_t key_size_ipv6;
+       int map_fd_ipv4;
+       int map_fd_ipv6;
+       __u64 value;
+
+       key_size_ipv4 = sizeof(*key_ipv4) + sizeof(__u32);
+       key_size_ipv6 = sizeof(*key_ipv6) + sizeof(__u32) * 4;
+       key_ipv4 = alloca(key_size_ipv4);
+       key_ipv6 = alloca(key_size_ipv6);
+
+       map_fd_ipv4 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
+                                    key_size_ipv4, sizeof(value),
+                                    100, BPF_F_NO_PREALLOC);
+       assert(map_fd_ipv4 >= 0);
+
+       map_fd_ipv6 = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE,
+                                    key_size_ipv6, sizeof(value),
+                                    100, BPF_F_NO_PREALLOC);
+       assert(map_fd_ipv6 >= 0);
+
+       /* Fill data some IPv4 and IPv6 address ranges */
+       value = 1;
+       key_ipv4->prefixlen = 16;
+       inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
+       assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
+
+       value = 2;
+       key_ipv4->prefixlen = 24;
+       inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
+       assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
+
+       value = 3;
+       key_ipv4->prefixlen = 24;
+       inet_pton(AF_INET, "192.168.128.0", key_ipv4->data);
+       assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
+
+       value = 5;
+       key_ipv4->prefixlen = 24;
+       inet_pton(AF_INET, "192.168.1.0", key_ipv4->data);
+       assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
+
+       value = 4;
+       key_ipv4->prefixlen = 23;
+       inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
+       assert(bpf_map_update_elem(map_fd_ipv4, key_ipv4, &value, 0) == 0);
+
+       value = 0xdeadbeef;
+       key_ipv6->prefixlen = 64;
+       inet_pton(AF_INET6, "2a00:1450:4001:814::200e", key_ipv6->data);
+       assert(bpf_map_update_elem(map_fd_ipv6, key_ipv6, &value, 0) == 0);
+
+       /* Set tprefixlen to maximum for lookups */
+       key_ipv4->prefixlen = 32;
+       key_ipv6->prefixlen = 128;
+
+       /* Test some lookups that should come back with a value */
+       inet_pton(AF_INET, "192.168.128.23", key_ipv4->data);
+       assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0);
+       assert(value == 3);
+
+       inet_pton(AF_INET, "192.168.0.1", key_ipv4->data);
+       assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == 0);
+       assert(value == 2);
+
+       inet_pton(AF_INET6, "2a00:1450:4001:814::", key_ipv6->data);
+       assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0);
+       assert(value == 0xdeadbeef);
+
+       inet_pton(AF_INET6, "2a00:1450:4001:814::1", key_ipv6->data);
+       assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == 0);
+       assert(value == 0xdeadbeef);
+
+       /* Test some lookups that should not match any entry */
+       inet_pton(AF_INET, "10.0.0.1", key_ipv4->data);
+       assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
+              errno == ENOENT);
+
+       inet_pton(AF_INET, "11.11.11.11", key_ipv4->data);
+       assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
+              errno == ENOENT);
+
+       inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data);
+       assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -1 &&
+              errno == ENOENT);
+
+       close(map_fd_ipv4);
+       close(map_fd_ipv6);
+}
+
+int main(void)
+{
+       struct rlimit limit  = { RLIM_INFINITY, RLIM_INFINITY };
+       int i, ret;
+
+       /* we want predictable, pseudo random tests */
+       srand(0xf00ba1);
+
+       /* allow unlimited locked memory */
+       ret = setrlimit(RLIMIT_MEMLOCK, &limit);
+       if (ret < 0)
+               perror("Unable to lift memlock rlimit");
+
+       test_lpm_basic();
+       test_lpm_order();
+
+       /* Test with 8, 16, 24, 32, ... 128 bit prefix length */
+       for (i = 1; i <= 16; ++i)
+               test_lpm_map(i);
+
+       test_lpm_ipaddr();
+
+       printf("test_lpm: OK\n");
+       return 0;
+}
index 9f7bd1915c217bedc9b2ae51bb347b9a39bd1192..00b0aff56e2e7256de150815e0da2eb0fb20d7b6 100644 (file)
@@ -18,7 +18,7 @@
 #include <sys/wait.h>
 #include <sys/resource.h>
 
-#include "bpf_sys.h"
+#include <bpf/bpf.h>
 #include "bpf_util.h"
 
 #define LOCAL_FREE_TARGET      (128)
@@ -30,11 +30,11 @@ static int create_map(int map_type, int map_flags, unsigned int size)
 {
        int map_fd;
 
-       map_fd = bpf_map_create(map_type, sizeof(unsigned long long),
+       map_fd = bpf_create_map(map_type, sizeof(unsigned long long),
                                sizeof(unsigned long long), size, map_flags);
 
        if (map_fd == -1)
-               perror("bpf_map_create");
+               perror("bpf_create_map");
 
        return map_fd;
 }
@@ -45,9 +45,9 @@ static int map_subset(int map0, int map1)
        unsigned long long value0[nr_cpus], value1[nr_cpus];
        int ret;
 
-       while (!bpf_map_next_key(map1, &next_key, &next_key)) {
-               assert(!bpf_map_lookup(map1, &next_key, value1));
-               ret = bpf_map_lookup(map0, &next_key, value0);
+       while (!bpf_map_get_next_key(map1, &next_key, &next_key)) {
+               assert(!bpf_map_lookup_elem(map1, &next_key, value1));
+               ret = bpf_map_lookup_elem(map0, &next_key, value0);
                if (ret) {
                        printf("key:%llu not found from map. %s(%d)\n",
                               next_key, strerror(errno), errno);
@@ -119,52 +119,54 @@ static void test_lru_sanity0(int map_type, int map_flags)
        /* insert key=1 element */
 
        key = 1;
-       assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
-       assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST));
+       assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+       assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+                                   BPF_NOEXIST));
 
        /* BPF_NOEXIST means: add new element if it doesn't exist */
-       assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST) == -1 &&
+       assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1
               /* key=1 already exists */
-              errno == EEXIST);
+              && errno == EEXIST);
 
-       assert(bpf_map_update(lru_map_fd, &key, value, -1) == -1 &&
+       assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -1 &&
               errno == EINVAL);
 
        /* insert key=2 element */
 
        /* check that key=2 is not found */
        key = 2;
-       assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 &&
+       assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
               errno == ENOENT);
 
        /* BPF_EXIST means: update existing element */
-       assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
+       assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
               /* key=2 is not there */
               errno == ENOENT);
 
-       assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
+       assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
 
        /* insert key=3 element */
 
        /* check that key=3 is not found */
        key = 3;
-       assert(bpf_map_lookup(lru_map_fd, &key, value) == -1 &&
+       assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
               errno == ENOENT);
 
        /* check that key=1 can be found and mark the ref bit to
         * stop LRU from removing key=1
         */
        key = 1;
-       assert(!bpf_map_lookup(lru_map_fd, &key, value));
+       assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
        assert(value[0] == 1234);
 
        key = 3;
-       assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
-       assert(!bpf_map_update(expected_map_fd, &key, value, BPF_NOEXIST));
+       assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
+       assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+                                   BPF_NOEXIST));
 
        /* key=2 has been removed from the LRU */
        key = 2;
-       assert(bpf_map_lookup(lru_map_fd, &key, value) == -1);
+       assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1);
 
        assert(map_equal(lru_map_fd, expected_map_fd));
 
@@ -217,14 +219,15 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
        /* Insert 1 to tgt_free (+tgt_free keys) */
        end_key = 1 + tgt_free;
        for (key = 1; key < end_key; key++)
-               assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
+               assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+                                           BPF_NOEXIST));
 
        /* Lookup 1 to tgt_free/2 */
        end_key = 1 + batch_size;
        for (key = 1; key < end_key; key++) {
-               assert(!bpf_map_lookup(lru_map_fd, &key, value));
-               assert(!bpf_map_update(expected_map_fd, &key, value,
-                                      BPF_NOEXIST));
+               assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
+               assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+                                           BPF_NOEXIST));
        }
 
        /* Insert 1+tgt_free to 2*tgt_free
@@ -234,9 +237,10 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
        key = 1 + tgt_free;
        end_key = key + tgt_free;
        for (; key < end_key; key++) {
-               assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
-               assert(!bpf_map_update(expected_map_fd, &key, value,
-                                      BPF_NOEXIST));
+               assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+                                           BPF_NOEXIST));
+               assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+                                           BPF_NOEXIST));
        }
 
        assert(map_equal(lru_map_fd, expected_map_fd));
@@ -301,9 +305,10 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
        /* Insert 1 to tgt_free (+tgt_free keys) */
        end_key = 1 + tgt_free;
        for (key = 1; key < end_key; key++)
-               assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
+               assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+                                           BPF_NOEXIST));
 
-       /* Any bpf_map_update will require to acquire a new node
+       /* Any bpf_map_update_elem will require to acquire a new node
         * from LRU first.
         *
         * The local list is running out of free nodes.
@@ -316,10 +321,12 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
         */
        key = 1;
        if (map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
-               assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
-               assert(!bpf_map_delete(lru_map_fd, &key));
+               assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+                                           BPF_NOEXIST));
+               assert(!bpf_map_delete_elem(lru_map_fd, &key));
        } else {
-               assert(bpf_map_update(lru_map_fd, &key, value, BPF_EXIST));
+               assert(bpf_map_update_elem(lru_map_fd, &key, value,
+                                          BPF_EXIST));
        }
 
        /* Re-insert 1 to tgt_free/2 again and do a lookup
@@ -328,12 +335,13 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
        end_key = 1 + batch_size;
        value[0] = 4321;
        for (key = 1; key < end_key; key++) {
-               assert(bpf_map_lookup(lru_map_fd, &key, value));
-               assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
-               assert(!bpf_map_lookup(lru_map_fd, &key, value));
+               assert(bpf_map_lookup_elem(lru_map_fd, &key, value));
+               assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+                                           BPF_NOEXIST));
+               assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
                assert(value[0] == 4321);
-               assert(!bpf_map_update(expected_map_fd, &key, value,
-                                      BPF_NOEXIST));
+               assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+                                           BPF_NOEXIST));
        }
 
        value[0] = 1234;
@@ -344,14 +352,16 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
                /* These newly added but not referenced keys will be
                 * gone during the next LRU shrink.
                 */
-               assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
+               assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+                                           BPF_NOEXIST));
 
        /* Insert 1+tgt_free*3/2 to  tgt_free*5/2 */
        end_key = key + tgt_free;
        for (; key < end_key; key++) {
-               assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
-               assert(!bpf_map_update(expected_map_fd, &key, value,
-                                      BPF_NOEXIST));
+               assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+                                           BPF_NOEXIST));
+               assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+                                           BPF_NOEXIST));
        }
 
        assert(map_equal(lru_map_fd, expected_map_fd));
@@ -401,14 +411,15 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
        /* Insert 1 to 2*tgt_free (+2*tgt_free keys) */
        end_key = 1 + (2 * tgt_free);
        for (key = 1; key < end_key; key++)
-               assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
+               assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+                                           BPF_NOEXIST));
 
        /* Lookup key 1 to tgt_free*3/2 */
        end_key = tgt_free + batch_size;
        for (key = 1; key < end_key; key++) {
-               assert(!bpf_map_lookup(lru_map_fd, &key, value));
-               assert(!bpf_map_update(expected_map_fd, &key, value,
-                                      BPF_NOEXIST));
+               assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
+               assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+                                           BPF_NOEXIST));
        }
 
        /* Add 1+2*tgt_free to tgt_free*5/2
@@ -417,9 +428,10 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
        key = 2 * tgt_free + 1;
        end_key = key + batch_size;
        for (; key < end_key; key++) {
-               assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
-               assert(!bpf_map_update(expected_map_fd, &key, value,
-                                      BPF_NOEXIST));
+               assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+                                           BPF_NOEXIST));
+               assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+                                           BPF_NOEXIST));
        }
 
        assert(map_equal(lru_map_fd, expected_map_fd));
@@ -457,27 +469,29 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
        value[0] = 1234;
 
        for (key = 1; key <= 2 * tgt_free; key++)
-               assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
+               assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+                                           BPF_NOEXIST));
 
        key = 1;
-       assert(bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
+       assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
 
        for (key = 1; key <= tgt_free; key++) {
-               assert(!bpf_map_lookup(lru_map_fd, &key, value));
-               assert(!bpf_map_update(expected_map_fd, &key, value,
-                                      BPF_NOEXIST));
+               assert(!bpf_map_lookup_elem(lru_map_fd, &key, value));
+               assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+                                           BPF_NOEXIST));
        }
 
        for (; key <= 2 * tgt_free; key++) {
-               assert(!bpf_map_delete(lru_map_fd, &key));
-               assert(bpf_map_delete(lru_map_fd, &key));
+               assert(!bpf_map_delete_elem(lru_map_fd, &key));
+               assert(bpf_map_delete_elem(lru_map_fd, &key));
        }
 
        end_key = key + 2 * tgt_free;
        for (; key < end_key; key++) {
-               assert(!bpf_map_update(lru_map_fd, &key, value, BPF_NOEXIST));
-               assert(!bpf_map_update(expected_map_fd, &key, value,
-                                      BPF_NOEXIST));
+               assert(!bpf_map_update_elem(lru_map_fd, &key, value,
+                                           BPF_NOEXIST));
+               assert(!bpf_map_update_elem(expected_map_fd, &key, value,
+                                           BPF_NOEXIST));
        }
 
        assert(map_equal(lru_map_fd, expected_map_fd));
@@ -493,16 +507,16 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
        unsigned long long key, value[nr_cpus];
 
        /* Ensure the last key inserted by previous CPU can be found */
-       assert(!bpf_map_lookup(map_fd, &last_key, value));
+       assert(!bpf_map_lookup_elem(map_fd, &last_key, value));
 
        value[0] = 1234;
 
        key = last_key + 1;
-       assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST));
-       assert(!bpf_map_lookup(map_fd, &key, value));
+       assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
+       assert(!bpf_map_lookup_elem(map_fd, &key, value));
 
        /* Cannot find the last key because it was removed by LRU */
-       assert(bpf_map_lookup(map_fd, &last_key, value));
+       assert(bpf_map_lookup_elem(map_fd, &last_key, value));
 }
 
 /* Test map with only one element */
@@ -523,7 +537,7 @@ static void test_lru_sanity5(int map_type, int map_flags)
 
        value[0] = 1234;
        key = 0;
-       assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST));
+       assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST));
 
        while (sched_next_online(0, &next_cpu) != -1) {
                pid_t pid;
index eedfef8d29469b562e8ff8f609bef5016ca44bd1..cada17ac00b8e6b5af37554ea8489be6ffc873a2 100644 (file)
@@ -21,7 +21,7 @@
 
 #include <linux/bpf.h>
 
-#include "bpf_sys.h"
+#include <bpf/bpf.h>
 #include "bpf_util.h"
 
 static int map_flags;
@@ -31,7 +31,7 @@ static void test_hashmap(int task, void *data)
        long long key, next_key, value;
        int fd;
 
-       fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
+       fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
                            2, map_flags);
        if (fd < 0) {
                printf("Failed to create hashmap '%s'!\n", strerror(errno));
@@ -41,69 +41,70 @@ static void test_hashmap(int task, void *data)
        key = 1;
        value = 1234;
        /* Insert key=1 element. */
-       assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0);
+       assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
 
        value = 0;
        /* BPF_NOEXIST means add new element if it doesn't exist. */
-       assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 &&
+       assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
               /* key=1 already exists. */
               errno == EEXIST);
 
        /* -1 is an invalid flag. */
-       assert(bpf_map_update(fd, &key, &value, -1) == -1 && errno == EINVAL);
+       assert(bpf_map_update_elem(fd, &key, &value, -1) == -1 &&
+              errno == EINVAL);
 
        /* Check that key=1 can be found. */
-       assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 1234);
+       assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 1234);
 
        key = 2;
        /* Check that key=2 is not found. */
-       assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT);
+       assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT);
 
        /* BPF_EXIST means update existing element. */
-       assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == -1 &&
+       assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == -1 &&
               /* key=2 is not there. */
               errno == ENOENT);
 
        /* Insert key=2 element. */
-       assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0);
+       assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0);
 
        /* key=1 and key=2 were inserted, check that key=0 cannot be
         * inserted due to max_entries limit.
         */
        key = 0;
-       assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 &&
+       assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
               errno == E2BIG);
 
        /* Update existing element, though the map is full. */
        key = 1;
-       assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == 0);
+       assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0);
        key = 2;
-       assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0);
+       assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
        key = 1;
-       assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0);
+       assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
 
        /* Check that key = 0 doesn't exist. */
        key = 0;
-       assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT);
+       assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
 
        /* Iterate over two elements. */
-       assert(bpf_map_next_key(fd, &key, &next_key) == 0 &&
+       assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 &&
               (next_key == 1 || next_key == 2));
-       assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 &&
+       assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
               (next_key == 1 || next_key == 2));
-       assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 &&
+       assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 &&
               errno == ENOENT);
 
        /* Delete both elements. */
        key = 1;
-       assert(bpf_map_delete(fd, &key) == 0);
+       assert(bpf_map_delete_elem(fd, &key) == 0);
        key = 2;
-       assert(bpf_map_delete(fd, &key) == 0);
-       assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT);
+       assert(bpf_map_delete_elem(fd, &key) == 0);
+       assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
 
        key = 0;
        /* Check that map is empty. */
-       assert(bpf_map_next_key(fd, &key, &next_key) == -1 &&
+       assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 &&
               errno == ENOENT);
 
        close(fd);
@@ -117,7 +118,7 @@ static void test_hashmap_percpu(int task, void *data)
        int expected_key_mask = 0;
        int fd, i;
 
-       fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key),
+       fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key),
                            sizeof(value[0]), 2, map_flags);
        if (fd < 0) {
                printf("Failed to create hashmap '%s'!\n", strerror(errno));
@@ -130,53 +131,54 @@ static void test_hashmap_percpu(int task, void *data)
        key = 1;
        /* Insert key=1 element. */
        assert(!(expected_key_mask & key));
-       assert(bpf_map_update(fd, &key, value, BPF_ANY) == 0);
+       assert(bpf_map_update_elem(fd, &key, value, BPF_ANY) == 0);
        expected_key_mask |= key;
 
        /* BPF_NOEXIST means add new element if it doesn't exist. */
-       assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == -1 &&
+       assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == -1 &&
               /* key=1 already exists. */
               errno == EEXIST);
 
        /* -1 is an invalid flag. */
-       assert(bpf_map_update(fd, &key, value, -1) == -1 && errno == EINVAL);
+       assert(bpf_map_update_elem(fd, &key, value, -1) == -1 &&
+              errno == EINVAL);
 
        /* Check that key=1 can be found. Value could be 0 if the lookup
         * was run from a different CPU.
         */
        value[0] = 1;
-       assert(bpf_map_lookup(fd, &key, value) == 0 && value[0] == 100);
+       assert(bpf_map_lookup_elem(fd, &key, value) == 0 && value[0] == 100);
 
        key = 2;
        /* Check that key=2 is not found. */
-       assert(bpf_map_lookup(fd, &key, value) == -1 && errno == ENOENT);
+       assert(bpf_map_lookup_elem(fd, &key, value) == -1 && errno == ENOENT);
 
        /* BPF_EXIST means update existing element. */
-       assert(bpf_map_update(fd, &key, value, BPF_EXIST) == -1 &&
+       assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == -1 &&
               /* key=2 is not there. */
               errno == ENOENT);
 
        /* Insert key=2 element. */
        assert(!(expected_key_mask & key));
-       assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == 0);
+       assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == 0);
        expected_key_mask |= key;
 
        /* key=1 and key=2 were inserted, check that key=0 cannot be
         * inserted due to max_entries limit.
         */
        key = 0;
-       assert(bpf_map_update(fd, &key, value, BPF_NOEXIST) == -1 &&
+       assert(bpf_map_update_elem(fd, &key, value, BPF_NOEXIST) == -1 &&
               errno == E2BIG);
 
        /* Check that key = 0 doesn't exist. */
-       assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT);
+       assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
 
        /* Iterate over two elements. */
-       while (!bpf_map_next_key(fd, &key, &next_key)) {
+       while (!bpf_map_get_next_key(fd, &key, &next_key)) {
                assert((expected_key_mask & next_key) == next_key);
                expected_key_mask &= ~next_key;
 
-               assert(bpf_map_lookup(fd, &next_key, value) == 0);
+               assert(bpf_map_lookup_elem(fd, &next_key, value) == 0);
 
                for (i = 0; i < nr_cpus; i++)
                        assert(value[i] == i + 100);
@@ -187,18 +189,18 @@ static void test_hashmap_percpu(int task, void *data)
 
        /* Update with BPF_EXIST. */
        key = 1;
-       assert(bpf_map_update(fd, &key, value, BPF_EXIST) == 0);
+       assert(bpf_map_update_elem(fd, &key, value, BPF_EXIST) == 0);
 
        /* Delete both elements. */
        key = 1;
-       assert(bpf_map_delete(fd, &key) == 0);
+       assert(bpf_map_delete_elem(fd, &key) == 0);
        key = 2;
-       assert(bpf_map_delete(fd, &key) == 0);
-       assert(bpf_map_delete(fd, &key) == -1 && errno == ENOENT);
+       assert(bpf_map_delete_elem(fd, &key) == 0);
+       assert(bpf_map_delete_elem(fd, &key) == -1 && errno == ENOENT);
 
        key = 0;
        /* Check that map is empty. */
-       assert(bpf_map_next_key(fd, &key, &next_key) == -1 &&
+       assert(bpf_map_get_next_key(fd, &key, &next_key) == -1 &&
               errno == ENOENT);
 
        close(fd);
@@ -209,7 +211,7 @@ static void test_arraymap(int task, void *data)
        int key, next_key, fd;
        long long value;
 
-       fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value),
+       fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(key), sizeof(value),
                            2, 0);
        if (fd < 0) {
                printf("Failed to create arraymap '%s'!\n", strerror(errno));
@@ -219,40 +221,40 @@ static void test_arraymap(int task, void *data)
        key = 1;
        value = 1234;
        /* Insert key=1 element. */
-       assert(bpf_map_update(fd, &key, &value, BPF_ANY) == 0);
+       assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0);
 
        value = 0;
-       assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 &&
+       assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
               errno == EEXIST);
 
        /* Check that key=1 can be found. */
-       assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 1234);
+       assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 1234);
 
        key = 0;
        /* Check that key=0 is also found and zero initialized. */
-       assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 0);
+       assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0);
 
        /* key=0 and key=1 were inserted, check that key=2 cannot be inserted
         * due to max_entries limit.
         */
        key = 2;
-       assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == -1 &&
+       assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == -1 &&
               errno == E2BIG);
 
        /* Check that key = 2 doesn't exist. */
-       assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT);
+       assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT);
 
        /* Iterate over two elements. */
-       assert(bpf_map_next_key(fd, &key, &next_key) == 0 &&
+       assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 &&
               next_key == 0);
-       assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 &&
+       assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
               next_key == 1);
-       assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 &&
+       assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 &&
               errno == ENOENT);
 
        /* Delete shouldn't succeed. */
        key = 1;
-       assert(bpf_map_delete(fd, &key) == -1 && errno == EINVAL);
+       assert(bpf_map_delete_elem(fd, &key) == -1 && errno == EINVAL);
 
        close(fd);
 }
@@ -263,7 +265,7 @@ static void test_arraymap_percpu(int task, void *data)
        int key, next_key, fd, i;
        long values[nr_cpus];
 
-       fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
+       fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
                            sizeof(values[0]), 2, 0);
        if (fd < 0) {
                printf("Failed to create arraymap '%s'!\n", strerror(errno));
@@ -275,39 +277,39 @@ static void test_arraymap_percpu(int task, void *data)
 
        key = 1;
        /* Insert key=1 element. */
-       assert(bpf_map_update(fd, &key, values, BPF_ANY) == 0);
+       assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0);
 
        values[0] = 0;
-       assert(bpf_map_update(fd, &key, values, BPF_NOEXIST) == -1 &&
+       assert(bpf_map_update_elem(fd, &key, values, BPF_NOEXIST) == -1 &&
               errno == EEXIST);
 
        /* Check that key=1 can be found. */
-       assert(bpf_map_lookup(fd, &key, values) == 0 && values[0] == 100);
+       assert(bpf_map_lookup_elem(fd, &key, values) == 0 && values[0] == 100);
 
        key = 0;
        /* Check that key=0 is also found and zero initialized. */
-       assert(bpf_map_lookup(fd, &key, values) == 0 &&
+       assert(bpf_map_lookup_elem(fd, &key, values) == 0 &&
               values[0] == 0 && values[nr_cpus - 1] == 0);
 
        /* Check that key=2 cannot be inserted due to max_entries limit. */
        key = 2;
-       assert(bpf_map_update(fd, &key, values, BPF_EXIST) == -1 &&
+       assert(bpf_map_update_elem(fd, &key, values, BPF_EXIST) == -1 &&
               errno == E2BIG);
 
        /* Check that key = 2 doesn't exist. */
-       assert(bpf_map_lookup(fd, &key, values) == -1 && errno == ENOENT);
+       assert(bpf_map_lookup_elem(fd, &key, values) == -1 && errno == ENOENT);
 
        /* Iterate over two elements. */
-       assert(bpf_map_next_key(fd, &key, &next_key) == 0 &&
+       assert(bpf_map_get_next_key(fd, &key, &next_key) == 0 &&
               next_key == 0);
-       assert(bpf_map_next_key(fd, &next_key, &next_key) == 0 &&
+       assert(bpf_map_get_next_key(fd, &next_key, &next_key) == 0 &&
               next_key == 1);
-       assert(bpf_map_next_key(fd, &next_key, &next_key) == -1 &&
+       assert(bpf_map_get_next_key(fd, &next_key, &next_key) == -1 &&
               errno == ENOENT);
 
        /* Delete shouldn't succeed. */
        key = 1;
-       assert(bpf_map_delete(fd, &key) == -1 && errno == EINVAL);
+       assert(bpf_map_delete_elem(fd, &key) == -1 && errno == EINVAL);
 
        close(fd);
 }
@@ -319,7 +321,7 @@ static void test_arraymap_percpu_many_keys(void)
        long values[nr_cpus];
        int key, fd, i;
 
-       fd = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
+       fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
                            sizeof(values[0]), nr_keys, 0);
        if (fd < 0) {
                printf("Failed to create per-cpu arraymap '%s'!\n",
@@ -331,13 +333,13 @@ static void test_arraymap_percpu_many_keys(void)
                values[i] = i + 10;
 
        for (key = 0; key < nr_keys; key++)
-               assert(bpf_map_update(fd, &key, values, BPF_ANY) == 0);
+               assert(bpf_map_update_elem(fd, &key, values, BPF_ANY) == 0);
 
        for (key = 0; key < nr_keys; key++) {
                for (i = 0; i < nr_cpus; i++)
                        values[i] = 0;
 
-               assert(bpf_map_lookup(fd, &key, values) == 0);
+               assert(bpf_map_lookup_elem(fd, &key, values) == 0);
 
                for (i = 0; i < nr_cpus; i++)
                        assert(values[i] == i + 10);
@@ -357,7 +359,7 @@ static void test_map_large(void)
        } key;
        int fd, i, value;
 
-       fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
+       fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
                            MAP_SIZE, map_flags);
        if (fd < 0) {
                printf("Failed to create large map '%s'!\n", strerror(errno));
@@ -368,22 +370,22 @@ static void test_map_large(void)
                key = (struct bigkey) { .c = i };
                value = i;
 
-               assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0);
+               assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == 0);
        }
 
        key.c = -1;
-       assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 &&
+       assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
               errno == E2BIG);
 
        /* Iterate through all elements. */
        for (i = 0; i < MAP_SIZE; i++)
-               assert(bpf_map_next_key(fd, &key, &key) == 0);
-       assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT);
+               assert(bpf_map_get_next_key(fd, &key, &key) == 0);
+       assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT);
 
        key.c = 0;
-       assert(bpf_map_lookup(fd, &key, &value) == 0 && value == 0);
+       assert(bpf_map_lookup_elem(fd, &key, &value) == 0 && value == 0);
        key.a = 1;
-       assert(bpf_map_lookup(fd, &key, &value) == -1 && errno == ENOENT);
+       assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT);
 
        close(fd);
 }
@@ -437,10 +439,12 @@ static void do_work(int fn, void *data)
                key = value = i;
 
                if (do_update) {
-                       assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == 0);
-                       assert(bpf_map_update(fd, &key, &value, BPF_EXIST) == 0);
+                       assert(bpf_map_update_elem(fd, &key, &value,
+                                                  BPF_NOEXIST) == 0);
+                       assert(bpf_map_update_elem(fd, &key, &value,
+                                                  BPF_EXIST) == 0);
                } else {
-                       assert(bpf_map_delete(fd, &key) == 0);
+                       assert(bpf_map_delete_elem(fd, &key) == 0);
                }
        }
 }
@@ -450,7 +454,7 @@ static void test_map_parallel(void)
        int i, fd, key = 0, value = 0;
        int data[2];
 
-       fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
+       fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value),
                            MAP_SIZE, map_flags);
        if (fd < 0) {
                printf("Failed to create map for parallel test '%s'!\n",
@@ -468,20 +472,20 @@ static void test_map_parallel(void)
        run_parallel(TASKS, do_work, data);
 
        /* Check that key=0 is already there. */
-       assert(bpf_map_update(fd, &key, &value, BPF_NOEXIST) == -1 &&
+       assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 &&
               errno == EEXIST);
 
        /* Check that all elements were inserted. */
        key = -1;
        for (i = 0; i < MAP_SIZE; i++)
-               assert(bpf_map_next_key(fd, &key, &key) == 0);
-       assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT);
+               assert(bpf_map_get_next_key(fd, &key, &key) == 0);
+       assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT);
 
        /* Another check for all elements */
        for (i = 0; i < MAP_SIZE; i++) {
                key = MAP_SIZE - i - 1;
 
-               assert(bpf_map_lookup(fd, &key, &value) == 0 &&
+               assert(bpf_map_lookup_elem(fd, &key, &value) == 0 &&
                       value == key);
        }
 
@@ -491,7 +495,7 @@ static void test_map_parallel(void)
 
        /* Nothing should be left. */
        key = -1;
-       assert(bpf_map_next_key(fd, &key, &key) == -1 && errno == ENOENT);
+       assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT);
 }
 
 static void run_all_tests(void)
diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c
new file mode 100644 (file)
index 0000000..de409fc
--- /dev/null
@@ -0,0 +1,203 @@
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <time.h>
+#include <errno.h>
+#include <unistd.h>
+#include <string.h>
+#include <sched.h>
+#include <limits.h>
+#include <assert.h>
+
+#include <sys/socket.h>
+#include <sys/resource.h>
+
+#include <linux/filter.h>
+#include <linux/bpf.h>
+#include <linux/if_alg.h>
+
+#include <bpf/bpf.h>
+
+#include "../../../include/linux/filter.h"
+
+static struct bpf_insn prog[BPF_MAXINSNS];
+
+static void bpf_gen_imm_prog(unsigned int insns, int fd_map)
+{
+       int i;
+
+       srand(time(NULL));
+       for (i = 0; i < insns; i++)
+               prog[i] = BPF_ALU64_IMM(BPF_MOV, i % BPF_REG_10, rand());
+       prog[i - 1] = BPF_EXIT_INSN();
+}
+
+static void bpf_gen_map_prog(unsigned int insns, int fd_map)
+{
+       int i, j = 0;
+
+       for (i = 0; i + 1 < insns; i += 2) {
+               struct bpf_insn tmp[] = {
+                       BPF_LD_MAP_FD(j++ % BPF_REG_10, fd_map)
+               };
+
+               memcpy(&prog[i], tmp, sizeof(tmp));
+       }
+       if (insns % 2 == 0)
+               prog[insns - 2] = BPF_ALU64_IMM(BPF_MOV, i % BPF_REG_10, 42);
+       prog[insns - 1] = BPF_EXIT_INSN();
+}
+
+static int bpf_try_load_prog(int insns, int fd_map,
+                            void (*bpf_filler)(unsigned int insns,
+                                               int fd_map))
+{
+       int fd_prog;
+
+       bpf_filler(insns, fd_map);
+       fd_prog = bpf_load_program(BPF_PROG_TYPE_SCHED_CLS, prog, insns, "", 0,
+                                  NULL, 0);
+       assert(fd_prog > 0);
+       if (fd_map > 0)
+               bpf_filler(insns, 0);
+       return fd_prog;
+}
+
+static int __hex2bin(char ch)
+{
+       if ((ch >= '0') && (ch <= '9'))
+               return ch - '0';
+       ch = tolower(ch);
+       if ((ch >= 'a') && (ch <= 'f'))
+               return ch - 'a' + 10;
+       return -1;
+}
+
+static int hex2bin(uint8_t *dst, const char *src, size_t count)
+{
+       while (count--) {
+               int hi = __hex2bin(*src++);
+               int lo = __hex2bin(*src++);
+
+               if ((hi < 0) || (lo < 0))
+                       return -1;
+               *dst++ = (hi << 4) | lo;
+       }
+       return 0;
+}
+
+static void tag_from_fdinfo(int fd_prog, uint8_t *tag, uint32_t len)
+{
+       const int prefix_len = sizeof("prog_tag:\t") - 1;
+       char buff[256];
+       int ret = -1;
+       FILE *fp;
+
+       snprintf(buff, sizeof(buff), "/proc/%d/fdinfo/%d", getpid(),
+                fd_prog);
+       fp = fopen(buff, "r");
+       assert(fp);
+
+       while (fgets(buff, sizeof(buff), fp)) {
+               if (strncmp(buff, "prog_tag:\t", prefix_len))
+                       continue;
+               ret = hex2bin(tag, buff + prefix_len, len);
+               break;
+       }
+
+       fclose(fp);
+       assert(!ret);
+}
+
+static void tag_from_alg(int insns, uint8_t *tag, uint32_t len)
+{
+       static const struct sockaddr_alg alg = {
+               .salg_family    = AF_ALG,
+               .salg_type      = "hash",
+               .salg_name      = "sha1",
+       };
+       int fd_base, fd_alg, ret;
+       ssize_t size;
+
+       fd_base = socket(AF_ALG, SOCK_SEQPACKET, 0);
+       assert(fd_base > 0);
+
+       ret = bind(fd_base, (struct sockaddr *)&alg, sizeof(alg));
+       assert(!ret);
+
+       fd_alg = accept(fd_base, NULL, 0);
+       assert(fd_alg > 0);
+
+       insns *= sizeof(struct bpf_insn);
+       size = write(fd_alg, prog, insns);
+       assert(size == insns);
+
+       size = read(fd_alg, tag, len);
+       assert(size == len);
+
+       close(fd_alg);
+       close(fd_base);
+}
+
+static void tag_dump(const char *prefix, uint8_t *tag, uint32_t len)
+{
+       int i;
+
+       printf("%s", prefix);
+       for (i = 0; i < len; i++)
+               printf("%02x", tag[i]);
+       printf("\n");
+}
+
+static void tag_exit_report(int insns, int fd_map, uint8_t *ftag,
+                           uint8_t *atag, uint32_t len)
+{
+       printf("Program tag mismatch for %d insns%s!\n", insns,
+              fd_map < 0 ? "" : " with map");
+
+       tag_dump("  fdinfo result: ", ftag, len);
+       tag_dump("  af_alg result: ", atag, len);
+       exit(1);
+}
+
+static void do_test(uint32_t *tests, int start_insns, int fd_map,
+                   void (*bpf_filler)(unsigned int insns, int fd))
+{
+       int i, fd_prog;
+
+       for (i = start_insns; i <= BPF_MAXINSNS; i++) {
+               uint8_t ftag[8], atag[sizeof(ftag)];
+
+               fd_prog = bpf_try_load_prog(i, fd_map, bpf_filler);
+               tag_from_fdinfo(fd_prog, ftag, sizeof(ftag));
+               tag_from_alg(i, atag, sizeof(atag));
+               if (memcmp(ftag, atag, sizeof(ftag)))
+                       tag_exit_report(i, fd_map, ftag, atag, sizeof(ftag));
+
+               close(fd_prog);
+               sched_yield();
+               (*tests)++;
+       }
+}
+
+int main(void)
+{
+       struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
+       uint32_t tests = 0;
+       int i, fd_map;
+
+       setrlimit(RLIMIT_MEMLOCK, &rinf);
+       fd_map = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(int),
+                               sizeof(int), 1, BPF_F_NO_PREALLOC);
+       assert(fd_map > 0);
+
+       for (i = 0; i < 5; i++) {
+               do_test(&tests, 2, -1,     bpf_gen_imm_prog);
+               do_test(&tests, 3, fd_map, bpf_gen_map_prog);
+       }
+
+       printf("test_tag: OK (%u tests)\n", tests);
+       close(fd_map);
+       return 0;
+}
index 853d7e43434acaeb2eddab46c48a64e98701864e..e1f5b9eea1e874ab7f4698a1e20abc5588fe00bb 100644 (file)
@@ -8,7 +8,9 @@
  * License as published by the Free Software Foundation.
  */
 
+#include <stdint.h>
 #include <stdio.h>
+#include <stdlib.h>
 #include <unistd.h>
 #include <errno.h>
 #include <string.h>
@@ -16,6 +18,7 @@
 #include <stdbool.h>
 #include <sched.h>
 
+#include <sys/capability.h>
 #include <sys/resource.h>
 
 #include <linux/unistd.h>
@@ -23,9 +26,9 @@
 #include <linux/bpf_perf_event.h>
 #include <linux/bpf.h>
 
-#include "../../../include/linux/filter.h"
+#include <bpf/bpf.h>
 
-#include "bpf_sys.h"
+#include "../../../include/linux/filter.h"
 
 #ifndef ARRAY_SIZE
 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
@@ -859,808 +862,1008 @@ static struct bpf_test tests[] = {
                .result = REJECT,
        },
        {
-               "check non-u32 access to cb",
+               "check cb access: byte",
                .insns = {
-                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0])),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0]) + 1),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0]) + 2),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0]) + 3),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[1])),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[1]) + 1),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[1]) + 2),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[1]) + 3),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[2])),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[2]) + 1),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[2]) + 2),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[2]) + 3),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[3])),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[3]) + 1),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[3]) + 2),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[3]) + 3),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[4])),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[4]) + 1),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[4]) + 2),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[4]) + 3),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, cb[0])),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0]) + 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0]) + 2),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0]) + 3),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[1])),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[1]) + 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[1]) + 2),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[1]) + 3),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[2])),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[2]) + 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[2]) + 2),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[2]) + 3),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[3])),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[3]) + 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[3]) + 2),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[3]) + 3),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[4])),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[4]) + 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[4]) + 2),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[4]) + 3),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
+       {
+               "check cb access: byte, oob 1",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[4]) + 4),
                        BPF_EXIT_INSN(),
                },
                .errstr = "invalid bpf_context access",
-               .errstr_unpriv = "R1 leaks addr",
                .result = REJECT,
        },
        {
-               "check out of range skb->cb access",
+               "check cb access: byte, oob 2",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
-                                   offsetof(struct __sk_buff, cb[0]) + 256),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0]) - 1),
                        BPF_EXIT_INSN(),
                },
                .errstr = "invalid bpf_context access",
-               .errstr_unpriv = "",
                .result = REJECT,
-               .prog_type = BPF_PROG_TYPE_SCHED_ACT,
        },
        {
-               "write skb fields from socket prog",
+               "check cb access: byte, oob 3",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
-                                   offsetof(struct __sk_buff, cb[4])),
-                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
-                                   offsetof(struct __sk_buff, mark)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
-                                   offsetof(struct __sk_buff, tc_index)),
-                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
-                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[4]) + 4),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+       },
+       {
+               "check cb access: byte, oob 4",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0]) - 1),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+       },
+       {
+               "check cb access: byte, wrong type",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
                                    offsetof(struct __sk_buff, cb[0])),
-                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
-                                   offsetof(struct __sk_buff, cb[2])),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
-               .errstr_unpriv = "R1 leaks addr",
-               .result_unpriv = REJECT,
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
        },
        {
-               "write skb fields from tc_cls_act prog",
+               "check cb access: half",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
                                    offsetof(struct __sk_buff, cb[0])),
-                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
-                                   offsetof(struct __sk_buff, mark)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
-                                   offsetof(struct __sk_buff, tc_index)),
-                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
-                                   offsetof(struct __sk_buff, tc_index)),
-                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0]) + 2),
+                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[1])),
+                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[1]) + 2),
+                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[2])),
+                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[2]) + 2),
+                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[3])),
+                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[3]) + 2),
+                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[4])),
+                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[4]) + 2),
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0])),
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0]) + 2),
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[1])),
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[1]) + 2),
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[2])),
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[2]) + 2),
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
                                    offsetof(struct __sk_buff, cb[3])),
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[3]) + 2),
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[4])),
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[4]) + 2),
                        BPF_EXIT_INSN(),
                },
-               .errstr_unpriv = "",
-               .result_unpriv = REJECT,
                .result = ACCEPT,
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "PTR_TO_STACK store/load",
+               "check cb access: half, unaligned",
                .insns = {
-                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0]) + 1),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
+               .errstr = "misaligned access",
+               .result = REJECT,
        },
        {
-               "PTR_TO_STACK store/load - bad alignment on off",
+               "check cb access: half, oob 1",
                .insns = {
-                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[4]) + 4),
                        BPF_EXIT_INSN(),
                },
+               .errstr = "invalid bpf_context access",
                .result = REJECT,
-               .errstr = "misaligned access off -6 size 8",
        },
        {
-               "PTR_TO_STACK store/load - bad alignment on reg",
+               "check cb access: half, oob 2",
                .insns = {
-                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0]) - 2),
                        BPF_EXIT_INSN(),
                },
+               .errstr = "invalid bpf_context access",
                .result = REJECT,
-               .errstr = "misaligned access off -2 size 8",
        },
        {
-               "PTR_TO_STACK store/load - out of bounds low",
+               "check cb access: half, oob 3",
                .insns = {
-                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[4]) + 4),
                        BPF_EXIT_INSN(),
                },
+               .errstr = "invalid bpf_context access",
                .result = REJECT,
-               .errstr = "invalid stack off=-79992 size=8",
        },
        {
-               "PTR_TO_STACK store/load - out of bounds high",
+               "check cb access: half, oob 4",
                .insns = {
-                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0]) - 2),
                        BPF_EXIT_INSN(),
                },
+               .errstr = "invalid bpf_context access",
                .result = REJECT,
-               .errstr = "invalid stack off=0 size=8",
        },
        {
-               "unpriv: return pointer",
+               "check cb access: half, wrong type",
                .insns = {
-                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0])),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
-               .result_unpriv = REJECT,
-               .errstr_unpriv = "R0 leaks addr",
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
        },
        {
-               "unpriv: add const to pointer",
+               "check cb access: word",
                .insns = {
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0])),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[1])),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[2])),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[3])),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[4])),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0])),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[1])),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[2])),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[3])),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[4])),
                        BPF_EXIT_INSN(),
                },
                .result = ACCEPT,
-               .result_unpriv = REJECT,
-               .errstr_unpriv = "R1 pointer arithmetic",
        },
        {
-               "unpriv: add pointer to pointer",
+               "check cb access: word, unaligned 1",
                .insns = {
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0]) + 2),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
-               .result_unpriv = REJECT,
-               .errstr_unpriv = "R1 pointer arithmetic",
+               .errstr = "misaligned access",
+               .result = REJECT,
        },
        {
-               "unpriv: neg pointer",
+               "check cb access: word, unaligned 2",
                .insns = {
-                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[4]) + 1),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
-               .result_unpriv = REJECT,
-               .errstr_unpriv = "R1 pointer arithmetic",
+               .errstr = "misaligned access",
+               .result = REJECT,
        },
        {
-               "unpriv: cmp pointer with const",
+               "check cb access: word, unaligned 3",
                .insns = {
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[4]) + 2),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
-               .result_unpriv = REJECT,
-               .errstr_unpriv = "R1 pointer comparison",
+               .errstr = "misaligned access",
+               .result = REJECT,
        },
        {
-               "unpriv: cmp pointer with pointer",
+               "check cb access: word, unaligned 4",
                .insns = {
-                       BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[4]) + 3),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
-               .result_unpriv = REJECT,
-               .errstr_unpriv = "R10 pointer comparison",
+               .errstr = "misaligned access",
+               .result = REJECT,
        },
        {
-               "unpriv: check that printk is disallowed",
+               "check cb access: double",
                .insns = {
-                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
-                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
-                       BPF_MOV64_IMM(BPF_REG_2, 8),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_trace_printk),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0])),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[2])),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0])),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[2])),
                        BPF_EXIT_INSN(),
                },
-               .errstr_unpriv = "unknown func bpf_trace_printk#6",
-               .result_unpriv = REJECT,
                .result = ACCEPT,
        },
        {
-               "unpriv: pass pointer to helper function",
+               "check cb access: double, unaligned 1",
                .insns = {
-                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
-                       BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
-                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_update_elem),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[1])),
                        BPF_EXIT_INSN(),
                },
-               .fixup_map1 = { 3 },
-               .errstr_unpriv = "R4 leaks addr",
-               .result_unpriv = REJECT,
-               .result = ACCEPT,
+               .errstr = "misaligned access",
+               .result = REJECT,
        },
        {
-               "unpriv: indirectly pass pointer on stack to helper function",
+               "check cb access: double, unaligned 2",
                .insns = {
-                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
-                       BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[3])),
                        BPF_EXIT_INSN(),
                },
-               .fixup_map1 = { 3 },
-               .errstr = "invalid indirect read from stack off -8+0 size 8",
+               .errstr = "misaligned access",
                .result = REJECT,
        },
        {
-               "unpriv: mangle pointer on stack 1",
+               "check cb access: double, oob 1",
                .insns = {
-                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
-                       BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[4])),
                        BPF_EXIT_INSN(),
                },
-               .errstr_unpriv = "attempt to corrupt spilled",
-               .result_unpriv = REJECT,
-               .result = ACCEPT,
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
        },
        {
-               "unpriv: mangle pointer on stack 2",
+               "check cb access: double, oob 2",
                .insns = {
-                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
-                       BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[4]) + 8),
                        BPF_EXIT_INSN(),
                },
-               .errstr_unpriv = "attempt to corrupt spilled",
-               .result_unpriv = REJECT,
-               .result = ACCEPT,
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
        },
        {
-               "unpriv: read pointer from stack in small chunks",
+               "check cb access: double, oob 3",
                .insns = {
-                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0]) - 8),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "invalid size",
+               .errstr = "invalid bpf_context access",
                .result = REJECT,
        },
        {
-               "unpriv: write pointer into ctx",
+               "check cb access: double, oob 4",
                .insns = {
-                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[4])),
                        BPF_EXIT_INSN(),
                },
-               .errstr_unpriv = "R1 leaks addr",
-               .result_unpriv = REJECT,
                .errstr = "invalid bpf_context access",
                .result = REJECT,
        },
        {
-               "unpriv: spill/fill of ctx",
+               "check cb access: double, oob 5",
                .insns = {
-                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[4]) + 8),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
        },
        {
-               "unpriv: spill/fill of ctx 2",
+               "check cb access: double, oob 6",
                .insns = {
-                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_get_hash_recalc),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0]) - 8),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+       },
+       {
+               "check cb access: double, wrong type",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[0])),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+       },
+       {
+               "check out of range skb->cb access",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0]) + 256),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access",
+               .errstr_unpriv = "",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SCHED_ACT,
+       },
+       {
+               "write skb fields from socket prog",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[4])),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, tc_index)),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0])),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[2])),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .errstr_unpriv = "R1 leaks addr",
+               .result_unpriv = REJECT,
+       },
+       {
+               "write skb fields from tc_cls_act prog",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, cb[0])),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, tc_index)),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, tc_index)),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                                   offsetof(struct __sk_buff, cb[3])),
                        BPF_EXIT_INSN(),
                },
+               .errstr_unpriv = "",
+               .result_unpriv = REJECT,
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "unpriv: spill/fill of ctx 3",
+               "PTR_TO_STACK store/load",
                .insns = {
-                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_get_hash_recalc),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+       },
+       {
+               "PTR_TO_STACK store/load - bad alignment on off",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 2, 0xfaceb00c),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 2),
                        BPF_EXIT_INSN(),
                },
                .result = REJECT,
-               .errstr = "R1 type=fp expected=ctx",
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .errstr = "misaligned access off -6 size 8",
        },
        {
-               "unpriv: spill/fill of ctx 4",
+               "PTR_TO_STACK store/load - bad alignment on reg",
                .insns = {
-                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
-                       BPF_MOV64_IMM(BPF_REG_0, 1),
-                       BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
-                                    BPF_REG_0, -8, 0),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_get_hash_recalc),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -10),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
                        BPF_EXIT_INSN(),
                },
                .result = REJECT,
-               .errstr = "R1 type=inv expected=ctx",
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .errstr = "misaligned access off -2 size 8",
        },
        {
-               "unpriv: spill/fill of different pointers stx",
+               "PTR_TO_STACK store/load - out of bounds low",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_3, 42),
-                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
-                       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
-                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
-                                   offsetof(struct __sk_buff, mark)),
-                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -80000),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
                        BPF_EXIT_INSN(),
                },
                .result = REJECT,
-               .errstr = "same insn cannot be used with different pointers",
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .errstr = "invalid stack off=-79992 size=8",
        },
        {
-               "unpriv: spill/fill of different pointers ldx",
+               "PTR_TO_STACK store/load - out of bounds high",
                .insns = {
-                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
-                                     -(__s32)offsetof(struct bpf_perf_event_data,
-                                                      sample_period) - 8),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
-                       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
-                                   offsetof(struct bpf_perf_event_data,
-                                            sample_period)),
-                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_1, 8, 0xfaceb00c),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
                        BPF_EXIT_INSN(),
                },
                .result = REJECT,
-               .errstr = "same insn cannot be used with different pointers",
-               .prog_type = BPF_PROG_TYPE_PERF_EVENT,
+               .errstr = "invalid stack off=0 size=8",
        },
        {
-               "unpriv: write pointer into map elem value",
+               "unpriv: return pointer",
                .insns = {
-                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
-                       BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
                        BPF_EXIT_INSN(),
                },
-               .fixup_map1 = { 3 },
-               .errstr_unpriv = "R0 leaks addr",
-               .result_unpriv = REJECT,
                .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R0 leaks addr",
        },
        {
-               "unpriv: partial copy of pointer",
+               "unpriv: add const to pointer",
                .insns = {
-                       BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr_unpriv = "R10 partial copy",
-               .result_unpriv = REJECT,
                .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R1 pointer arithmetic",
        },
        {
-               "unpriv: pass pointer to tail_call",
+               "unpriv: add pointer to pointer",
                .insns = {
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
-                       BPF_LD_MAP_FD(BPF_REG_2, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_tail_call),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_10),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .fixup_prog = { 1 },
-               .errstr_unpriv = "R3 leaks addr into helper",
-               .result_unpriv = REJECT,
                .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R1 pointer arithmetic",
        },
        {
-               "unpriv: cmp map pointer with zero",
+               "unpriv: neg pointer",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_1, 0),
-                       BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+                       BPF_ALU64_IMM(BPF_NEG, BPF_REG_1, 0),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .fixup_map1 = { 1 },
-               .errstr_unpriv = "R1 pointer comparison",
-               .result_unpriv = REJECT,
                .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R1 pointer arithmetic",
        },
        {
-               "unpriv: write into frame pointer",
+               "unpriv: cmp pointer with const",
                .insns = {
-                       BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "frame pointer is read only",
-               .result = REJECT,
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R1 pointer comparison",
        },
        {
-               "unpriv: spill/fill frame pointer",
+               "unpriv: cmp pointer with pointer",
                .insns = {
-                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
+                       BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_10, 0),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "frame pointer is read only",
-               .result = REJECT,
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+               .errstr_unpriv = "R10 pointer comparison",
        },
        {
-               "unpriv: cmp of frame pointer",
+               "unpriv: check that printk is disallowed",
                .insns = {
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_2, 8),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_trace_printk),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr_unpriv = "R10 pointer comparison",
+               .errstr_unpriv = "unknown func bpf_trace_printk#6",
                .result_unpriv = REJECT,
                .result = ACCEPT,
        },
        {
-               "unpriv: cmp of stack pointer",
+               "unpriv: pass pointer to helper function",
                .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_update_elem),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr_unpriv = "R2 pointer comparison",
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R4 leaks addr",
                .result_unpriv = REJECT,
                .result = ACCEPT,
        },
        {
-               "unpriv: obfuscate stack pointer",
+               "unpriv: indirectly pass pointer on stack to helper function",
                .insns = {
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr_unpriv = "R2 pointer arithmetic",
+               .fixup_map1 = { 3 },
+               .errstr = "invalid indirect read from stack off -8+0 size 8",
+               .result = REJECT,
+       },
+       {
+               "unpriv: mangle pointer on stack 1",
+               .insns = {
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+                       BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "attempt to corrupt spilled",
                .result_unpriv = REJECT,
                .result = ACCEPT,
        },
        {
-               "raw_stack: no skb_load_bytes",
+               "unpriv: mangle pointer on stack 2",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_2, 4),
-                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, 8),
-                       /* Call to skb_load_bytes() omitted. */
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+                       BPF_ST_MEM(BPF_B, BPF_REG_10, -1, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .result = REJECT,
-               .errstr = "invalid read from stack off -8+0 size 8",
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .errstr_unpriv = "attempt to corrupt spilled",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
        },
        {
-               "raw_stack: skb_load_bytes, negative len",
+               "unpriv: read pointer from stack in small chunks",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_2, 4),
-                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, -8),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_skb_load_bytes),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_10, -8),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
+               .errstr = "invalid size",
                .result = REJECT,
-               .errstr = "invalid stack type R3",
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "raw_stack: skb_load_bytes, negative len 2",
+               "unpriv: write pointer into ctx",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_2, 4),
-                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, ~0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_skb_load_bytes),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
+               .errstr_unpriv = "R1 leaks addr",
+               .result_unpriv = REJECT,
+               .errstr = "invalid bpf_context access",
                .result = REJECT,
-               .errstr = "invalid stack type R3",
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "raw_stack: skb_load_bytes, zero len",
+               "unpriv: spill/fill of ctx",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_2, 4),
                        BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_skb_load_bytes),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .result = REJECT,
-               .errstr = "invalid stack type R3",
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
        },
        {
-               "raw_stack: skb_load_bytes, no init",
+               "unpriv: spill/fill of ctx 2",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_2, 4),
                        BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
                        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_skb_load_bytes),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                                    BPF_FUNC_get_hash_recalc),
                        BPF_EXIT_INSN(),
                },
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "raw_stack: skb_load_bytes, init",
+               "unpriv: spill/fill of ctx 3",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_2, 4),
                        BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
                        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_skb_load_bytes),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                                    BPF_FUNC_get_hash_recalc),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
+               .result = REJECT,
+               .errstr = "R1 type=fp expected=ctx",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "raw_stack: skb_load_bytes, spilled regs around bounds",
+               "unpriv: spill/fill of ctx 4",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_2, 4),
                        BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_DW, BPF_REG_10,
+                                    BPF_REG_0, -8, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
                        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_skb_load_bytes),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
-                                   offsetof(struct __sk_buff, mark)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
-                                   offsetof(struct __sk_buff, priority)),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                                    BPF_FUNC_get_hash_recalc),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
+               .result = REJECT,
+               .errstr = "R1 type=inv expected=ctx",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "raw_stack: skb_load_bytes, spilled regs corruption",
+               "unpriv: spill/fill of different pointers stx",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_MOV64_IMM(BPF_REG_3, 42),
                        BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
                        BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, 8),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_skb_load_bytes),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_3,
                                    offsetof(struct __sk_buff, mark)),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
                .result = REJECT,
-               .errstr = "R0 invalid mem access 'inv'",
+               .errstr = "same insn cannot be used with different pointers",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "raw_stack: skb_load_bytes, spilled regs corruption 2",
+               "unpriv: spill/fill of different pointers ldx",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_2, 4),
                        BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, 8),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_skb_load_bytes),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
-                                   offsetof(struct __sk_buff, mark)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
-                                   offsetof(struct __sk_buff, priority)),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
-                                   offsetof(struct __sk_buff, pkt_type)),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
+                                     -(__s32)offsetof(struct bpf_perf_event_data,
+                                                      sample_period) - 8),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_2, 0),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1,
+                                   offsetof(struct bpf_perf_event_data,
+                                            sample_period)),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
                .result = REJECT,
-               .errstr = "R3 invalid mem access 'inv'",
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .errstr = "same insn cannot be used with different pointers",
+               .prog_type = BPF_PROG_TYPE_PERF_EVENT,
        },
        {
-               "raw_stack: skb_load_bytes, spilled regs + data",
+               "unpriv: write pointer into map elem value",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_2, 4),
-                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
                        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_skb_load_bytes),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
-                                   offsetof(struct __sk_buff, mark)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
-                                   offsetof(struct __sk_buff, priority)),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
+               .fixup_map1 = { 3 },
+               .errstr_unpriv = "R0 leaks addr",
+               .result_unpriv = REJECT,
                .result = ACCEPT,
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "raw_stack: skb_load_bytes, invalid access 1",
+               "unpriv: partial copy of pointer",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_2, 4),
-                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, 8),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_skb_load_bytes),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_MOV32_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .result = REJECT,
-               .errstr = "invalid stack type R3 off=-513 access_size=8",
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .errstr_unpriv = "R10 partial copy",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
        },
        {
-               "raw_stack: skb_load_bytes, invalid access 2",
+               "unpriv: pass pointer to tail_call",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_2, 4),
-                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_1),
+                       BPF_LD_MAP_FD(BPF_REG_2, 0),
                        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_skb_load_bytes),
-                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
-                       BPF_EXIT_INSN(),
+                                    BPF_FUNC_tail_call),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_prog = { 1 },
+               .errstr_unpriv = "R3 leaks addr into helper",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+       },
+       {
+               "unpriv: cmp map pointer with zero",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 1 },
+               .errstr_unpriv = "R1 pointer comparison",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+       },
+       {
+               "unpriv: write into frame pointer",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_10, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
                },
+               .errstr = "frame pointer is read only",
                .result = REJECT,
-               .errstr = "invalid stack type R3 off=-1 access_size=8",
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "raw_stack: skb_load_bytes, invalid access 3",
+               "unpriv: spill/fill frame pointer",
+               .insns = {
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "frame pointer is read only",
+               .result = REJECT,
+       },
+       {
+               "unpriv: cmp of frame pointer",
+               .insns = {
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_10, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "R10 pointer comparison",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+       },
+       {
+               "unpriv: cmp of stack pointer",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "R2 pointer comparison",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+       },
+       {
+               "unpriv: obfuscate stack pointer",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr_unpriv = "R2 pointer arithmetic",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+       },
+       {
+               "raw_stack: no skb_load_bytes",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_2, 4),
                        BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
                        BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_skb_load_bytes),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       /* Call to skb_load_bytes() omitted. */
                        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
                        BPF_EXIT_INSN(),
                },
                .result = REJECT,
-               .errstr = "invalid stack type R3 off=-1 access_size=-1",
+               .errstr = "invalid read from stack off -8+0 size 8",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "raw_stack: skb_load_bytes, invalid access 4",
+               "raw_stack: skb_load_bytes, negative len",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_2, 4),
                        BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
                        BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+                       BPF_MOV64_IMM(BPF_REG_4, -8),
                        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
                                     BPF_FUNC_skb_load_bytes),
                        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
                        BPF_EXIT_INSN(),
                },
                .result = REJECT,
-               .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
+               .errstr = "invalid stack type R3",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "raw_stack: skb_load_bytes, invalid access 5",
+               "raw_stack: skb_load_bytes, negative len 2",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_2, 4),
                        BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
                        BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+                       BPF_MOV64_IMM(BPF_REG_4, ~0),
                        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
                                     BPF_FUNC_skb_load_bytes),
                        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
                        BPF_EXIT_INSN(),
                },
                .result = REJECT,
-               .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
+               .errstr = "invalid stack type R3",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "raw_stack: skb_load_bytes, invalid access 6",
+               "raw_stack: skb_load_bytes, zero len",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_2, 4),
                        BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
                        BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
                        BPF_MOV64_IMM(BPF_REG_4, 0),
                        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
@@ -1669,17 +1872,17 @@ static struct bpf_test tests[] = {
                        BPF_EXIT_INSN(),
                },
                .result = REJECT,
-               .errstr = "invalid stack type R3 off=-512 access_size=0",
+               .errstr = "invalid stack type R3",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "raw_stack: skb_load_bytes, large access",
+               "raw_stack: skb_load_bytes, no init",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_2, 4),
                        BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
                        BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
-                       BPF_MOV64_IMM(BPF_REG_4, 512),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
                        BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
                                     BPF_FUNC_skb_load_bytes),
                        BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
@@ -1689,101 +1892,337 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "direct packet access: test1",
+               "raw_stack: skb_load_bytes, init",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data_end)),
-                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
-                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
-                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
-                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xcafe),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
                        BPF_EXIT_INSN(),
                },
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "direct packet access: test2",
+               "raw_stack: skb_load_bytes, spilled regs around bounds",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_0, 1),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data_end)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data)),
-                       BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
-                       BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
-                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
-                       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
-                       BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data)),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
-                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
-                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data_end)),
-                       BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
-                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
-                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+                                   offsetof(struct __sk_buff, priority)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
                        BPF_EXIT_INSN(),
                },
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "direct packet access: test3",
+               "raw_stack: skb_load_bytes, spilled regs corruption",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data)),
-                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -8),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+                                   offsetof(struct __sk_buff, mark)),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "invalid bpf_context access off=76",
                .result = REJECT,
-               .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
-       },
-       {
-               "direct packet access: test4 (write)",
-               .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data_end)),
-                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
-                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
-                       BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
-                       BPF_MOV64_IMM(BPF_REG_0, 0),
-                       BPF_EXIT_INSN(),
-               },
-               .result = ACCEPT,
+               .errstr = "R0 invalid mem access 'inv'",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "direct packet access: test5 (pkt_end >= reg, good access)",
+               "raw_stack: skb_load_bytes, spilled regs corruption 2",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data_end)),
-                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
-                       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
-                       BPF_MOV64_IMM(BPF_REG_0, 1),
-                       BPF_EXIT_INSN(),
-                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
-                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+                                   offsetof(struct __sk_buff, priority)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_3,
+                                   offsetof(struct __sk_buff, pkt_type)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
+               .result = REJECT,
+               .errstr = "R3 invalid mem access 'inv'",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, spilled regs + data",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1,  8),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, -8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6,  8),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,  0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2,
+                                   offsetof(struct __sk_buff, priority)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, invalid access 1",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -513),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack type R3 off=-513 access_size=8",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, invalid access 2",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 8),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack type R3 off=-1 access_size=8",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, invalid access 3",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0xffffffff),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 0xffffffff),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack type R3 off=-1 access_size=-1",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, invalid access 4",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -1),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack type R3 off=-1 access_size=2147483647",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, invalid access 5",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 0x7fffffff),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack type R3 off=-512 access_size=2147483647",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, invalid access 6",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid stack type R3 off=-512 access_size=0",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "raw_stack: skb_load_bytes, large access",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -512),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_4, 512),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_skb_load_bytes),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "direct packet access: test1",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "direct packet access: test2",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_5, BPF_REG_4, 15),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_3, 7),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_3, 12),
+                       BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 14),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
+                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48),
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_3, 4),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "direct packet access: test3",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access off=76",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+       },
+       {
+               "direct packet access: test4 (write)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+                       BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "direct packet access: test5 (pkt_end >= reg, good access)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
@@ -1890,21 +2329,122 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "helper access to packet: test1, valid packet_ptr range",
+               "direct packet access: test11 (shift, good access)",
                .insns = {
                        BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
-                                   offsetof(struct xdp_md, data)),
+                                   offsetof(struct __sk_buff, data)),
                        BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
-                                   offsetof(struct xdp_md, data_end)),
-                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
-                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
-                       BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
-                       BPF_MOV64_IMM(BPF_REG_4, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_update_elem),
-                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+                       BPF_MOV64_IMM(BPF_REG_3, 144),
+                       BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "direct packet access: test12 (and, good access)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+                       BPF_MOV64_IMM(BPF_REG_3, 144),
+                       BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "direct packet access: test13 (branches, good access)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, mark)),
+                       BPF_MOV64_IMM(BPF_REG_4, 1),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
+                       BPF_MOV64_IMM(BPF_REG_3, 14),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+                       BPF_MOV64_IMM(BPF_REG_3, 24),
+                       BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "direct packet access: test14 (pkt_ptr += 0, CONST_IMM, good access)",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 7),
+                       BPF_MOV64_IMM(BPF_REG_5, 12),
+                       BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 4),
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "helper access to packet: test1, valid packet_ptr range",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 5),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_update_elem),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
                .fixup_map1 = { 5 },
@@ -2444,467 +2984,1473 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
        },
        {
-               "invalid map access into an array with a constant",
+               "invalid map access into an array with a constant",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
+                                  offsetof(struct test_val, foo)),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr = "invalid access to map value, value_size=48 off=48 size=8",
+               .result = REJECT,
+       },
+       {
+               "invalid map access into an array with a register",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
+                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
+                                  offsetof(struct test_val, foo)),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is outside of the array range",
+               .result_unpriv = REJECT,
+               .result = REJECT,
+       },
+       {
+               "invalid map access into an array with a variable",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
+                                  offsetof(struct test_val, foo)),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+               .result_unpriv = REJECT,
+               .result = REJECT,
+       },
+       {
+               "invalid map access into an array with no floor check",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
+                       BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
+                       BPF_MOV32_IMM(BPF_REG_1, 0),
+                       BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
+                                  offsetof(struct test_val, foo)),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+               .result_unpriv = REJECT,
+               .result = REJECT,
+       },
+       {
+               "invalid map access into an array with a invalid max check",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
+                       BPF_MOV32_IMM(BPF_REG_1, 0),
+                       BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
+                                  offsetof(struct test_val, foo)),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "invalid access to map value, value_size=48 off=44 size=8",
+               .result_unpriv = REJECT,
+               .result = REJECT,
+       },
+       {
+               "invalid map access into an array with a invalid max check",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+                       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
+                                   offsetof(struct test_val, foo)),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3, 11 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+               .result_unpriv = REJECT,
+               .result = REJECT,
+       },
+       {
+               "multiple registers share map_lookup_elem result",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 10),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS
+       },
+       {
+               "invalid memory access with multiple map_lookup_elem calls",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 10),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .result = REJECT,
+               .errstr = "R4 !read_ok",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS
+       },
+       {
+               "valid indirect map_lookup_elem access with 2nd lookup in branch",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 10),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_IMM(BPF_REG_2, 10),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS
+       },
+       {
+               "multiple registers share map_lookup_elem bad reg type",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_1, 10),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
+                       BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_MOV64_IMM(BPF_REG_1, 1),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_MOV64_IMM(BPF_REG_1, 2),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
+                       BPF_MOV64_IMM(BPF_REG_1, 3),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 4 },
+               .result = REJECT,
+               .errstr = "R3 invalid mem access 'inv'",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS
+       },
+       {
+               "invalid map access from else condition",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
+               .result = REJECT,
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .result_unpriv = REJECT,
+       },
+       {
+               "constant register |= constant should keep constant type",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+                       BPF_MOV64_IMM(BPF_REG_2, 34),
+                       BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "constant register |= constant should not bypass stack boundary checks",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+                       BPF_MOV64_IMM(BPF_REG_2, 34),
+                       BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid stack type R1 off=-48 access_size=58",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "constant register |= constant register should keep constant type",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+                       BPF_MOV64_IMM(BPF_REG_2, 34),
+                       BPF_MOV64_IMM(BPF_REG_4, 13),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "constant register |= constant register should not bypass stack boundary checks",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
+                       BPF_MOV64_IMM(BPF_REG_2, 34),
+                       BPF_MOV64_IMM(BPF_REG_4, 24),
+                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid stack type R1 off=-48 access_size=58",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "invalid direct packet write for LWT_IN",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+                       BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "cannot write into packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_LWT_IN,
+       },
+       {
+               "invalid direct packet write for LWT_OUT",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+                       BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "cannot write into packet",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_LWT_OUT,
+       },
+       {
+               "direct packet write for LWT_XMIT",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+                       BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+       },
+       {
+               "direct packet read for LWT_IN",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_LWT_IN,
+       },
+       {
+               "direct packet read for LWT_OUT",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_LWT_OUT,
+       },
+       {
+               "direct packet read for LWT_XMIT",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct __sk_buff, data_end)),
+                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+       },
+       {
+               "invalid access of tc_classid for LWT_IN",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, tc_classid)),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid bpf_context access",
+       },
+       {
+               "invalid access of tc_classid for LWT_OUT",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, tc_classid)),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid bpf_context access",
+       },
+       {
+               "invalid access of tc_classid for LWT_XMIT",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, tc_classid)),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "invalid bpf_context access",
+       },
+       {
+               "helper access to map: full range",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: partial range",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_2, 8),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: empty range",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr = "invalid access to map value, value_size=48 off=0 size=0",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: out-of-bound range",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val) + 8),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr = "invalid access to map value, value_size=48 off=0 size=56",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to map: negative range",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_2, -8),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr = "invalid access to map value, value_size=48 off=0 size=-8",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to adjusted map (via const imm): full range",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+                               offsetof(struct test_val, foo)),
+                       BPF_MOV64_IMM(BPF_REG_2,
+                               sizeof(struct test_val) -
+                               offsetof(struct test_val, foo)),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to adjusted map (via const imm): partial range",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+                               offsetof(struct test_val, foo)),
+                       BPF_MOV64_IMM(BPF_REG_2, 8),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to adjusted map (via const imm): empty range",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+                               offsetof(struct test_val, foo)),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr = "R1 min value is outside of the array range",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to adjusted map (via const imm): out-of-bound range",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+                               offsetof(struct test_val, foo)),
+                       BPF_MOV64_IMM(BPF_REG_2,
+                               sizeof(struct test_val) -
+                               offsetof(struct test_val, foo) + 8),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr = "invalid access to map value, value_size=48 off=4 size=52",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to adjusted map (via const imm): negative range (> adjustment)",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+                               offsetof(struct test_val, foo)),
+                       BPF_MOV64_IMM(BPF_REG_2, -8),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to adjusted map (via const imm): negative range (< adjustment)",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+                               offsetof(struct test_val, foo)),
+                       BPF_MOV64_IMM(BPF_REG_2, -1),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr = "R1 min value is outside of the array range",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to adjusted map (via const reg): full range",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_3,
+                               offsetof(struct test_val, foo)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_MOV64_IMM(BPF_REG_2,
+                               sizeof(struct test_val) -
+                               offsetof(struct test_val, foo)),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to adjusted map (via const reg): partial range",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_3,
+                               offsetof(struct test_val, foo)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_MOV64_IMM(BPF_REG_2, 8),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to adjusted map (via const reg): empty range",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr = "R1 min value is outside of the array range",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to adjusted map (via const reg): out-of-bound range",
                .insns = {
-                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
-                                  offsetof(struct test_val, foo)),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_3,
+                               offsetof(struct test_val, foo)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_MOV64_IMM(BPF_REG_2,
+                               sizeof(struct test_val) -
+                               offsetof(struct test_val, foo) + 8),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr = "invalid access to map value, value_size=48 off=48 size=8",
+               .errstr = "invalid access to map value, value_size=48 off=4 size=52",
                .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "invalid map access into an array with a register",
+               "helper access to adjusted map (via const reg): negative range (> adjustment)",
                .insns = {
-                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
-                       BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
-                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
-                                  offsetof(struct test_val, foo)),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_3,
+                               offsetof(struct test_val, foo)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_MOV64_IMM(BPF_REG_2, -8),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 pointer arithmetic prohibited",
-               .errstr = "R0 min value is outside of the array range",
-               .result_unpriv = REJECT,
+               .errstr = "invalid access to map value, value_size=48 off=4 size=-8",
                .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "invalid map access into an array with a variable",
+               "helper access to adjusted map (via const reg): negative range (< adjustment)",
                .insns = {
-                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
-                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
-                                  offsetof(struct test_val, foo)),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_3,
+                               offsetof(struct test_val, foo)),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_MOV64_IMM(BPF_REG_2, -1),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 pointer arithmetic prohibited",
-               .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
-               .result_unpriv = REJECT,
+               .errstr = "R1 min value is outside of the array range",
                .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "invalid map access into an array with no floor check",
+               "helper access to adjusted map (via variable): full range",
                .insns = {
-                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
                        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
-                       BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
-                       BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
-                       BPF_MOV32_IMM(BPF_REG_1, 0),
-                       BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
-                                  offsetof(struct test_val, foo)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
+                               offsetof(struct test_val, foo), 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_MOV64_IMM(BPF_REG_2,
+                               sizeof(struct test_val) -
+                               offsetof(struct test_val, foo)),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 pointer arithmetic prohibited",
-               .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
-               .result_unpriv = REJECT,
-               .result = REJECT,
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "invalid map access into an array with a invalid max check",
+               "helper access to adjusted map (via variable): partial range",
                .insns = {
-                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
                        BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
-                       BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
-                       BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
-                       BPF_MOV32_IMM(BPF_REG_1, 0),
-                       BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
-                                  offsetof(struct test_val, foo)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
+                               offsetof(struct test_val, foo), 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_MOV64_IMM(BPF_REG_2, 8),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_EXIT_INSN(),
                },
                .fixup_map2 = { 3 },
-               .errstr_unpriv = "R0 pointer arithmetic prohibited",
-               .errstr = "invalid access to map value, value_size=48 off=44 size=8",
-               .result_unpriv = REJECT,
-               .result = REJECT,
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "invalid map access into an array with a invalid max check",
+               "helper access to adjusted map (via variable): empty range",
                .insns = {
-                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
-                       BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
-                       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
-                                   offsetof(struct test_val, foo)),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
+                               offsetof(struct test_val, foo), 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_EXIT_INSN(),
                },
-               .fixup_map2 = { 3, 11 },
-               .errstr_unpriv = "R0 pointer arithmetic prohibited",
-               .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
-               .result_unpriv = REJECT,
+               .fixup_map2 = { 3 },
+               .errstr = "R1 min value is outside of the array range",
                .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "multiple registers share map_lookup_elem result",
+               "helper access to adjusted map (via variable): no max check",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_1, 10),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
-                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_EXIT_INSN(),
                },
-               .fixup_map1 = { 4 },
-               .result = ACCEPT,
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS
+               .fixup_map2 = { 3 },
+               .errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "invalid memory access with multiple map_lookup_elem calls",
+               "helper access to adjusted map (via variable): wrong max check",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_1, 10),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
-                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
-                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
-                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
+                               offsetof(struct test_val, foo), 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
+                       BPF_MOV64_IMM(BPF_REG_2,
+                               sizeof(struct test_val) -
+                               offsetof(struct test_val, foo) + 1),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_EXIT_INSN(),
                },
-               .fixup_map1 = { 4 },
+               .fixup_map2 = { 3 },
+               .errstr = "invalid access to map value, value_size=48 off=4 size=45",
                .result = REJECT,
-               .errstr = "R4 !read_ok",
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "valid indirect map_lookup_elem access with 2nd lookup in branch",
+               "map element value is preserved across register spilling",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_1, 10),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
-                       BPF_MOV64_REG(BPF_REG_7, BPF_REG_2),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
-                       BPF_MOV64_IMM(BPF_REG_2, 10),
-                       BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 3),
-                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
-                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_4, 0, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
                        BPF_EXIT_INSN(),
                },
-               .fixup_map1 = { 4 },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 leaks addr",
                .result = ACCEPT,
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS
+               .result_unpriv = REJECT,
        },
        {
-               "multiple registers share map_lookup_elem bad reg type",
+               "map element value (adjusted) is preserved across register spilling",
                .insns = {
-                       BPF_MOV64_IMM(BPF_REG_1, 10),
-                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8),
                        BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
                        BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
                        BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
-                                    BPF_FUNC_map_lookup_elem),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
-                       BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
-                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_0),
-                       BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
-                       BPF_MOV64_IMM(BPF_REG_1, 1),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
-                       BPF_MOV64_IMM(BPF_REG_1, 2),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0),
-                       BPF_MOV64_IMM(BPF_REG_1, 3),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0,
+                               offsetof(struct test_val, foo)),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -184),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, 0),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 42),
                        BPF_EXIT_INSN(),
                },
-               .fixup_map1 = { 4 },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .result = ACCEPT,
+               .result_unpriv = REJECT,
+       },
+       {
+               "helper access to variable memory: stack, bitwise AND + JMP, correct bounds",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_MOV64_IMM(BPF_REG_2, 16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to variable memory: stack, bitwise AND, zero included",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+                       BPF_MOV64_IMM(BPF_REG_2, 16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid stack type R1 off=-64 access_size=0",
                .result = REJECT,
-               .errstr = "R3 invalid mem access 'inv'",
-               .prog_type = BPF_PROG_TYPE_SCHED_CLS
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "invalid map access from else condition",
+               "helper access to variable memory: stack, bitwise AND + JMP, wrong max",
                .insns = {
-                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
-                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
-                       BPF_LD_MAP_FD(BPF_REG_1, 0),
-                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
-                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
-                       BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES-1, 1),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
-                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
-                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
-                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+                       BPF_MOV64_IMM(BPF_REG_2, 16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid stack type R1 off=-64 access_size=65",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to variable memory: stack, JMP, correct bounds",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_MOV64_IMM(BPF_REG_2, 16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 4),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .fixup_map2 = { 3 },
-               .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
-               .result = REJECT,
-               .errstr_unpriv = "R0 pointer arithmetic prohibited",
-               .result_unpriv = REJECT,
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "constant register |= constant should keep constant type",
+               "helper access to variable memory: stack, JMP (signed), correct bounds",
                .insns = {
                        BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
-                       BPF_MOV64_IMM(BPF_REG_2, 34),
-                       BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_MOV64_IMM(BPF_REG_2, 16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 4),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
                        BPF_MOV64_IMM(BPF_REG_3, 0),
                        BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "constant register |= constant should not bypass stack boundary checks",
+               "helper access to variable memory: stack, JMP, bounds + offset",
                .insns = {
                        BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
-                       BPF_MOV64_IMM(BPF_REG_2, 34),
-                       BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+                       BPF_MOV64_IMM(BPF_REG_2, 16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 3),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
                        BPF_MOV64_IMM(BPF_REG_3, 0),
                        BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "invalid stack type R1 off=-48 access_size=58",
+               .errstr = "invalid stack type R1 off=-64 access_size=65",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "constant register |= constant register should keep constant type",
+               "helper access to variable memory: stack, JMP, wrong max",
                .insns = {
                        BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
-                       BPF_MOV64_IMM(BPF_REG_2, 34),
-                       BPF_MOV64_IMM(BPF_REG_4, 13),
-                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+                       BPF_MOV64_IMM(BPF_REG_2, 16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
                        BPF_MOV64_IMM(BPF_REG_3, 0),
                        BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
+               .errstr = "invalid stack type R1 off=-64 access_size=65",
+               .result = REJECT,
                .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "constant register |= constant register should not bypass stack boundary checks",
+               "helper access to variable memory: stack, JMP, no max check",
                .insns = {
                        BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
-                       BPF_MOV64_IMM(BPF_REG_2, 34),
-                       BPF_MOV64_IMM(BPF_REG_4, 24),
-                       BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+                       BPF_MOV64_IMM(BPF_REG_2, 16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
                        BPF_MOV64_IMM(BPF_REG_3, 0),
                        BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "invalid stack type R1 off=-48 access_size=58",
+               .errstr = "R2 unbounded memory access",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "invalid direct packet write for LWT_IN",
+               "helper access to variable memory: stack, JMP, no min check",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data_end)),
-                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
-                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
-                       BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+                       BPF_MOV64_IMM(BPF_REG_2, 16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+                       BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "cannot write into packet",
+               .errstr = "invalid stack type R1 off=-64 access_size=0",
                .result = REJECT,
-               .prog_type = BPF_PROG_TYPE_LWT_IN,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "invalid direct packet write for LWT_OUT",
+               "helper access to variable memory: stack, JMP (signed), no min check",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data_end)),
-                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
-                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
-                       BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+                       BPF_MOV64_IMM(BPF_REG_2, 16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .errstr = "cannot write into packet",
+               .errstr = "R2 min value is negative",
                .result = REJECT,
-               .prog_type = BPF_PROG_TYPE_LWT_OUT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "direct packet write for LWT_XMIT",
+               "helper access to variable memory: map, JMP, correct bounds",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data_end)),
-                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
-                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
-                       BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
+                               sizeof(struct test_val), 4),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
+               .fixup_map2 = { 3 },
                .result = ACCEPT,
-               .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "direct packet read for LWT_IN",
+               "helper access to variable memory: map, JMP, wrong max",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data_end)),
-                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
-                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
-                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
+                               sizeof(struct test_val) + 1, 4),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
-               .prog_type = BPF_PROG_TYPE_LWT_IN,
+               .fixup_map2 = { 3 },
+               .errstr = "invalid access to map value, value_size=48 off=0 size=49",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "direct packet read for LWT_OUT",
+               "helper access to variable memory: map adjusted, JMP, correct bounds",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data_end)),
-                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
-                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
-                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
+                       BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
+                               sizeof(struct test_val) - 20, 4),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
+               .fixup_map2 = { 3 },
                .result = ACCEPT,
-               .prog_type = BPF_PROG_TYPE_LWT_OUT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "direct packet read for LWT_XMIT",
+               "helper access to variable memory: map adjusted, JMP, wrong max",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data)),
-                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
-                                   offsetof(struct __sk_buff, data_end)),
-                       BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
-                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
-                       BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
-                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
+                       BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+                       BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
+                               sizeof(struct test_val) - 19, 4),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
                        BPF_MOV64_IMM(BPF_REG_0, 0),
                        BPF_EXIT_INSN(),
                },
-               .result = ACCEPT,
-               .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+               .fixup_map2 = { 3 },
+               .errstr = "R1 min value is outside of the array range",
+               .result = REJECT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
-               "invalid access of tc_classid for LWT_IN",
+               "helper access to variable memory: size > 0 not allowed on NULL",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
-                                   offsetof(struct __sk_buff, tc_classid)),
+                       BPF_MOV64_IMM(BPF_REG_1, 0),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_MOV64_IMM(BPF_REG_5, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_csum_diff),
                        BPF_EXIT_INSN(),
                },
+               .errstr = "R1 type=imm expected=fp",
                .result = REJECT,
-               .errstr = "invalid bpf_context access",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "invalid access of tc_classid for LWT_OUT",
+               "helper access to variable memory: size = 0 not allowed on != NULL",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
-                                   offsetof(struct __sk_buff, tc_classid)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 8),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_MOV64_IMM(BPF_REG_4, 0),
+                       BPF_MOV64_IMM(BPF_REG_5, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_csum_diff),
                        BPF_EXIT_INSN(),
                },
+               .errstr = "invalid stack type R1 off=-8 access_size=0",
                .result = REJECT,
-               .errstr = "invalid bpf_context access",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        },
        {
-               "invalid access of tc_classid for LWT_XMIT",
+               "helper access to variable memory: 8 bytes leak",
                .insns = {
-                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
-                                   offsetof(struct __sk_buff, tc_classid)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
                        BPF_EXIT_INSN(),
                },
+               .errstr = "invalid indirect read from stack off -64+32 size 64",
                .result = REJECT,
-               .errstr = "invalid bpf_context access",
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "helper access to variable memory: 8 bytes no leak (init memory)",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -64),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -56),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -48),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -40),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -32),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
+                       BPF_MOV64_IMM(BPF_REG_2, 0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 32),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 32),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_probe_read),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       },
+       {
+               "invalid and of negative number",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_MOV64_IMM(BPF_REG_1, 6),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
+                       BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
+                                  offsetof(struct test_val, foo)),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+               .result = REJECT,
+               .result_unpriv = REJECT,
        },
+       {
+               "invalid range check",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_9, 1),
+                       BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
+                       BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
+                       BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
+                       BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
+                       BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
+                       BPF_MOV32_IMM(BPF_REG_3, 1),
+                       BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
+                       BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
+                       BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
+                       BPF_MOV64_REG(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map2 = { 3 },
+               .errstr_unpriv = "R0 pointer arithmetic prohibited",
+               .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.",
+               .result = REJECT,
+               .result_unpriv = REJECT,
+       }
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
@@ -2921,7 +4467,7 @@ static int create_map(uint32_t size_value, uint32_t max_elem)
 {
        int fd;
 
-       fd = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(long long),
+       fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
                            size_value, max_elem, BPF_F_NO_PREALLOC);
        if (fd < 0)
                printf("Failed to create hash map '%s'!\n", strerror(errno));
@@ -2933,7 +4479,7 @@ static int create_prog_array(void)
 {
        int fd;
 
-       fd = bpf_map_create(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
+       fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
                            sizeof(int), 4, 0);
        if (fd < 0)
                printf("Failed to create prog array '%s'!\n", strerror(errno));
@@ -2991,9 +4537,9 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
 
        do_test_fixup(test, prog, &fd_f1, &fd_f2, &fd_f3);
 
-       fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
-                               prog, prog_len * sizeof(struct bpf_insn),
-                               "GPL", bpf_vlog, sizeof(bpf_vlog));
+       fd_prog = bpf_load_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
+                                  prog, prog_len, "GPL", 0, bpf_vlog,
+                                  sizeof(bpf_vlog));
 
        expected_ret = unpriv && test->result_unpriv != UNDEF ?
                       test->result_unpriv : test->result;
@@ -3031,6 +4577,55 @@ fail_log:
        goto close_fds;
 }
 
+static bool is_admin(void)
+{
+       cap_t caps;
+       cap_flag_value_t sysadmin = CAP_CLEAR;
+       const cap_value_t cap_val = CAP_SYS_ADMIN;
+
+       if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
+               perror("cap_get_flag");
+               return false;
+       }
+       caps = cap_get_proc();
+       if (!caps) {
+               perror("cap_get_proc");
+               return false;
+       }
+       if (cap_get_flag(caps, cap_val, CAP_EFFECTIVE, &sysadmin))
+               perror("cap_get_flag");
+       if (cap_free(caps))
+               perror("cap_free");
+       return (sysadmin == CAP_SET);
+}
+
+static int set_admin(bool admin)
+{
+       cap_t caps;
+       const cap_value_t cap_val = CAP_SYS_ADMIN;
+       int ret = -1;
+
+       caps = cap_get_proc();
+       if (!caps) {
+               perror("cap_get_proc");
+               return -1;
+       }
+       if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
+                               admin ? CAP_SET : CAP_CLEAR)) {
+               perror("cap_set_flag");
+               goto out;
+       }
+       if (cap_set_proc(caps)) {
+               perror("cap_set_proc");
+               goto out;
+       }
+       ret = 0;
+out:
+       if (cap_free(caps))
+               perror("cap_free");
+       return ret;
+}
+
 static int do_test(bool unpriv, unsigned int from, unsigned int to)
 {
        int i, passes = 0, errors = 0;
@@ -3041,11 +4636,19 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to)
                /* Program types that are not supported by non-root we
                 * skip right away.
                 */
-               if (unpriv && test->prog_type)
-                       continue;
+               if (!test->prog_type) {
+                       if (!unpriv)
+                               set_admin(false);
+                       printf("#%d/u %s ", i, test->descr);
+                       do_test_single(test, true, &passes, &errors);
+                       if (!unpriv)
+                               set_admin(true);
+               }
 
-               printf("#%d %s ", i, test->descr);
-               do_test_single(test, unpriv, &passes, &errors);
+               if (!unpriv) {
+                       printf("#%d/p %s ", i, test->descr);
+                       do_test_single(test, false, &passes, &errors);
+               }
        }
 
        printf("Summary: %d PASSED, %d FAILED\n", passes, errors);
@@ -3057,7 +4660,7 @@ int main(int argc, char **argv)
        struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
        struct rlimit rlim = { 1 << 20, 1 << 20 };
        unsigned int from = 0, to = ARRAY_SIZE(tests);
-       bool unpriv = geteuid() != 0;
+       bool unpriv = !is_admin();
 
        if (argc == 3) {
                unsigned int l = atoi(argv[argc - 2]);
index 24bc7ec1be7dab217689fbda39d9c503a1bc6bfc..a77da88bf9469d515713e1d1f0f6d318544ac458 100644 (file)
 
 static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
 {
+       /* the filter below checks for all of the following conditions that
+        * are based on the contents of create_payload()
+        *  ether type 0x800 and
+        *  ip proto udp     and
+        *  skb->len == DATA_LEN and
+        *  udp[38] == 'a' or udp[38] == 'b'
+        * It can be generated from the following bpf_asm input:
+        *      ldh [12]
+        *      jne #0x800, drop        ; ETH_P_IP
+        *      ldb [23]
+        *      jneq #17, drop          ; IPPROTO_UDP
+        *      ld len                  ; ld skb->len
+        *      jlt #100, drop          ; DATA_LEN
+        *      ldb [80]
+        *      jeq #97, pass           ; DATA_CHAR
+        *      jne #98, drop           ; DATA_CHAR_1
+        *      pass:
+        *        ret #-1
+        *      drop:
+        *        ret #0
+        */
        struct sock_filter bpf_filter[] = {
-               { 0x80, 0, 0, 0x00000000 },  /* LD  pktlen                    */
-               { 0x35, 0, 4, DATA_LEN   },  /* JGE DATA_LEN  [f goto nomatch]*/
-               { 0x30, 0, 0, 0x00000050 },  /* LD  ip[80]                    */
-               { 0x15, 1, 0, DATA_CHAR  },  /* JEQ DATA_CHAR   [t goto match]*/
-               { 0x15, 0, 1, DATA_CHAR_1},  /* JEQ DATA_CHAR_1 [t goto match]*/
-               { 0x06, 0, 0, 0x00000060 },  /* RET match                     */
-               { 0x06, 0, 0, 0x00000000 },  /* RET no match                  */
+               { 0x28,  0,  0, 0x0000000c },
+               { 0x15,  0,  8, 0x00000800 },
+               { 0x30,  0,  0, 0x00000017 },
+               { 0x15,  0,  6, 0x00000011 },
+               { 0x80,  0,  0, 0000000000 },
+               { 0x35,  0,  4, 0x00000064 },
+               { 0x30,  0,  0, 0x00000050 },
+               { 0x15,  1,  0, 0x00000061 },
+               { 0x15,  0,  1, 0x00000062 },
+               { 0x06,  0,  0, 0xffffffff },
+               { 0x06,  0,  0, 0000000000 },
        };
        struct sock_fprog bpf_prog;
 
index 24adf709bd9ddadfa02e6d308511340fe4eb52e1..7f6cd9fdacf3e14b6bb313b90e664778ef0005de 100644 (file)
@@ -110,7 +110,7 @@ static unsigned int total_packets, total_bytes;
 
 static int pfsocket(int ver)
 {
-       int ret, sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
+       int ret, sock = socket(PF_PACKET, SOCK_RAW, 0);
        if (sock == -1) {
                perror("socket");
                exit(1);
@@ -239,7 +239,6 @@ static void walk_v1_v2_rx(int sock, struct ring *ring)
        bug_on(ring->type != PACKET_RX_RING);
 
        pair_udp_open(udp_sock, PORT_BASE);
-       pair_udp_setfilter(sock);
 
        memset(&pfd, 0, sizeof(pfd));
        pfd.fd = sock;
@@ -311,20 +310,33 @@ static inline void __v2_tx_user_ready(struct tpacket2_hdr *hdr)
        __sync_synchronize();
 }
 
-static inline int __v1_v2_tx_kernel_ready(void *base, int version)
+static inline int __v3_tx_kernel_ready(struct tpacket3_hdr *hdr)
+{
+       return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING));
+}
+
+static inline void __v3_tx_user_ready(struct tpacket3_hdr *hdr)
+{
+       hdr->tp_status = TP_STATUS_SEND_REQUEST;
+       __sync_synchronize();
+}
+
+static inline int __tx_kernel_ready(void *base, int version)
 {
        switch (version) {
        case TPACKET_V1:
                return __v1_tx_kernel_ready(base);
        case TPACKET_V2:
                return __v2_tx_kernel_ready(base);
+       case TPACKET_V3:
+               return __v3_tx_kernel_ready(base);
        default:
                bug_on(1);
                return 0;
        }
 }
 
-static inline void __v1_v2_tx_user_ready(void *base, int version)
+static inline void __tx_user_ready(void *base, int version)
 {
        switch (version) {
        case TPACKET_V1:
@@ -333,6 +345,9 @@ static inline void __v1_v2_tx_user_ready(void *base, int version)
        case TPACKET_V2:
                __v2_tx_user_ready(base);
                break;
+       case TPACKET_V3:
+               __v3_tx_user_ready(base);
+               break;
        }
 }
 
@@ -348,7 +363,22 @@ static void __v1_v2_set_packet_loss_discard(int sock)
        }
 }
 
-static void walk_v1_v2_tx(int sock, struct ring *ring)
+static inline void *get_next_frame(struct ring *ring, int n)
+{
+       uint8_t *f0 = ring->rd[0].iov_base;
+
+       switch (ring->version) {
+       case TPACKET_V1:
+       case TPACKET_V2:
+               return ring->rd[n].iov_base;
+       case TPACKET_V3:
+               return f0 + (n * ring->req3.tp_frame_size);
+       default:
+               bug_on(1);
+       }
+}
+
+static void walk_tx(int sock, struct ring *ring)
 {
        struct pollfd pfd;
        int rcv_sock, ret;
@@ -360,9 +390,19 @@ static void walk_v1_v2_tx(int sock, struct ring *ring)
                .sll_family = PF_PACKET,
                .sll_halen = ETH_ALEN,
        };
+       int nframes;
+
+       /* TPACKET_V{1,2} sets up the ring->rd* related variables based
+        * on frames (e.g., rd_num is tp_frame_nr) whereas V3 sets these
+        * up based on blocks (e.g, rd_num is  tp_block_nr)
+        */
+       if (ring->version <= TPACKET_V2)
+               nframes = ring->rd_num;
+       else
+               nframes = ring->req3.tp_frame_nr;
 
        bug_on(ring->type != PACKET_TX_RING);
-       bug_on(ring->rd_num < NUM_PACKETS);
+       bug_on(nframes < NUM_PACKETS);
 
        rcv_sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
        if (rcv_sock == -1) {
@@ -388,10 +428,11 @@ static void walk_v1_v2_tx(int sock, struct ring *ring)
        create_payload(packet, &packet_len);
 
        while (total_packets > 0) {
-               while (__v1_v2_tx_kernel_ready(ring->rd[frame_num].iov_base,
-                                              ring->version) &&
+               void *next = get_next_frame(ring, frame_num);
+
+               while (__tx_kernel_ready(next, ring->version) &&
                       total_packets > 0) {
-                       ppd.raw = ring->rd[frame_num].iov_base;
+                       ppd.raw = next;
 
                        switch (ring->version) {
                        case TPACKET_V1:
@@ -413,14 +454,27 @@ static void walk_v1_v2_tx(int sock, struct ring *ring)
                                       packet_len);
                                total_bytes += ppd.v2->tp_h.tp_snaplen;
                                break;
+                       case TPACKET_V3: {
+                               struct tpacket3_hdr *tx = next;
+
+                               tx->tp_snaplen = packet_len;
+                               tx->tp_len = packet_len;
+                               tx->tp_next_offset = 0;
+
+                               memcpy((uint8_t *)tx + TPACKET3_HDRLEN -
+                                      sizeof(struct sockaddr_ll), packet,
+                                      packet_len);
+                               total_bytes += tx->tp_snaplen;
+                               break;
+                       }
                        }
 
                        status_bar_update();
                        total_packets--;
 
-                       __v1_v2_tx_user_ready(ppd.raw, ring->version);
+                       __tx_user_ready(next, ring->version);
 
-                       frame_num = (frame_num + 1) % ring->rd_num;
+                       frame_num = (frame_num + 1) % nframes;
                }
 
                poll(&pfd, 1, 1);
@@ -460,7 +514,7 @@ static void walk_v1_v2(int sock, struct ring *ring)
        if (ring->type == PACKET_RX_RING)
                walk_v1_v2_rx(sock, ring);
        else
-               walk_v1_v2_tx(sock, ring);
+               walk_tx(sock, ring);
 }
 
 static uint64_t __v3_prev_block_seq_num = 0;
@@ -546,7 +600,6 @@ static void walk_v3_rx(int sock, struct ring *ring)
        bug_on(ring->type != PACKET_RX_RING);
 
        pair_udp_open(udp_sock, PORT_BASE);
-       pair_udp_setfilter(sock);
 
        memset(&pfd, 0, sizeof(pfd));
        pfd.fd = sock;
@@ -583,7 +636,7 @@ static void walk_v3(int sock, struct ring *ring)
        if (ring->type == PACKET_RX_RING)
                walk_v3_rx(sock, ring);
        else
-               bug_on(1);
+               walk_tx(sock, ring);
 }
 
 static void __v1_v2_fill(struct ring *ring, unsigned int blocks)
@@ -602,12 +655,13 @@ static void __v1_v2_fill(struct ring *ring, unsigned int blocks)
        ring->flen = ring->req.tp_frame_size;
 }
 
-static void __v3_fill(struct ring *ring, unsigned int blocks)
+static void __v3_fill(struct ring *ring, unsigned int blocks, int type)
 {
-       ring->req3.tp_retire_blk_tov = 64;
-       ring->req3.tp_sizeof_priv = 0;
-       ring->req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH;
-
+       if (type == PACKET_RX_RING) {
+               ring->req3.tp_retire_blk_tov = 64;
+               ring->req3.tp_sizeof_priv = 0;
+               ring->req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH;
+       }
        ring->req3.tp_block_size = getpagesize() << 2;
        ring->req3.tp_frame_size = TPACKET_ALIGNMENT << 7;
        ring->req3.tp_block_nr = blocks;
@@ -641,7 +695,7 @@ static void setup_ring(int sock, struct ring *ring, int version, int type)
                break;
 
        case TPACKET_V3:
-               __v3_fill(ring, blocks);
+               __v3_fill(ring, blocks, type);
                ret = setsockopt(sock, SOL_PACKET, type, &ring->req3,
                                 sizeof(ring->req3));
                break;
@@ -685,6 +739,8 @@ static void bind_ring(int sock, struct ring *ring)
 {
        int ret;
 
+       pair_udp_setfilter(sock);
+
        ring->ll.sll_family = PF_PACKET;
        ring->ll.sll_protocol = htons(ETH_P_ALL);
        ring->ll.sll_ifindex = if_nametoindex("lo");
@@ -796,6 +852,7 @@ int main(void)
        ret |= test_tpacket(TPACKET_V2, PACKET_TX_RING);
 
        ret |= test_tpacket(TPACKET_V3, PACKET_RX_RING);
+       ret |= test_tpacket(TPACKET_V3, PACKET_TX_RING);
 
        if (ret)
                return 1;